nvc0/ir: do constant folding of extbf/insbf
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class LoadPropagation : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125
126 void checkSwapSrc01(Instruction *);
127
128 bool isCSpaceLoad(Instruction *);
129 bool isImmd32Load(Instruction *);
130 bool isAttribOrSharedLoad(Instruction *);
131 };
132
133 bool
134 LoadPropagation::isCSpaceLoad(Instruction *ld)
135 {
136 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
137 }
138
139 bool
140 LoadPropagation::isImmd32Load(Instruction *ld)
141 {
142 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
143 return false;
144 return ld->src(0).getFile() == FILE_IMMEDIATE;
145 }
146
147 bool
148 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
149 {
150 return ld &&
151 (ld->op == OP_VFETCH ||
152 (ld->op == OP_LOAD &&
153 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
154 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
155 }
156
157 void
158 LoadPropagation::checkSwapSrc01(Instruction *insn)
159 {
160 if (!prog->getTarget()->getOpInfo(insn).commutative)
161 if (insn->op != OP_SET && insn->op != OP_SLCT)
162 return;
163 if (insn->src(1).getFile() != FILE_GPR)
164 return;
165
166 Instruction *i0 = insn->getSrc(0)->getInsn();
167 Instruction *i1 = insn->getSrc(1)->getInsn();
168
169 if (isCSpaceLoad(i0)) {
170 if (!isCSpaceLoad(i1))
171 insn->swapSources(0, 1);
172 else
173 return;
174 } else
175 if (isImmd32Load(i0)) {
176 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
177 insn->swapSources(0, 1);
178 else
179 return;
180 } else
181 if (isAttribOrSharedLoad(i1)) {
182 if (!isAttribOrSharedLoad(i0))
183 insn->swapSources(0, 1);
184 else
185 return;
186 } else {
187 return;
188 }
189
190 if (insn->op == OP_SET)
191 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
192 else
193 if (insn->op == OP_SLCT)
194 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
195 }
196
197 bool
198 LoadPropagation::visit(BasicBlock *bb)
199 {
200 const Target *targ = prog->getTarget();
201 Instruction *next;
202
203 for (Instruction *i = bb->getEntry(); i; i = next) {
204 next = i->next;
205
206 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
207 continue;
208
209 if (i->srcExists(1))
210 checkSwapSrc01(i);
211
212 for (int s = 0; i->srcExists(s); ++s) {
213 Instruction *ld = i->getSrc(s)->getInsn();
214
215 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
216 continue;
217 if (!targ->insnCanLoad(i, s, ld))
218 continue;
219
220 // propagate !
221 i->setSrc(s, ld->getSrc(0));
222 if (ld->src(0).isIndirect(0))
223 i->setIndirect(s, 0, ld->getIndirect(0, 0));
224
225 if (ld->getDef(0)->refCount() == 0)
226 delete_Instruction(prog, ld);
227 }
228 }
229 return true;
230 }
231
232 // =============================================================================
233
234 // Evaluate constant expressions.
235 class ConstantFolding : public Pass
236 {
237 public:
238 bool foldAll(Program *);
239
240 private:
241 virtual bool visit(BasicBlock *);
242
243 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
244 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
245 void opnd(Instruction *, ImmediateValue&, int s);
246
247 void unary(Instruction *, const ImmediateValue&);
248
249 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
250
251 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
252 CmpInstruction *findOriginForTestWithZero(Value *);
253
254 unsigned int foldCount;
255
256 BuildUtil bld;
257 };
258
259 // TODO: remember generated immediates and only revisit these
260 bool
261 ConstantFolding::foldAll(Program *prog)
262 {
263 unsigned int iterCount = 0;
264 do {
265 foldCount = 0;
266 if (!run(prog))
267 return false;
268 } while (foldCount && ++iterCount < 2);
269 return true;
270 }
271
272 bool
273 ConstantFolding::visit(BasicBlock *bb)
274 {
275 Instruction *i, *next;
276
277 for (i = bb->getEntry(); i; i = next) {
278 next = i->next;
279 if (i->op == OP_MOV || i->op == OP_CALL)
280 continue;
281
282 ImmediateValue src0, src1, src2;
283
284 if (i->srcExists(2) &&
285 i->src(0).getImmediate(src0) &&
286 i->src(1).getImmediate(src1) &&
287 i->src(2).getImmediate(src2))
288 expr(i, src0, src1, src2);
289 else
290 if (i->srcExists(1) &&
291 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
292 expr(i, src0, src1);
293 else
294 if (i->srcExists(0) && i->src(0).getImmediate(src0))
295 opnd(i, src0, 0);
296 else
297 if (i->srcExists(1) && i->src(1).getImmediate(src1))
298 opnd(i, src1, 1);
299 }
300 return true;
301 }
302
303 CmpInstruction *
304 ConstantFolding::findOriginForTestWithZero(Value *value)
305 {
306 if (!value)
307 return NULL;
308 Instruction *insn = value->getInsn();
309
310 while (insn && insn->op != OP_SET) {
311 Instruction *next = NULL;
312 switch (insn->op) {
313 case OP_NEG:
314 case OP_ABS:
315 case OP_CVT:
316 next = insn->getSrc(0)->getInsn();
317 if (insn->sType != next->dType)
318 return NULL;
319 break;
320 case OP_MOV:
321 next = insn->getSrc(0)->getInsn();
322 break;
323 default:
324 return NULL;
325 }
326 insn = next;
327 }
328 return insn ? insn->asCmp() : NULL;
329 }
330
331 void
332 Modifier::applyTo(ImmediateValue& imm) const
333 {
334 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
335 return;
336 switch (imm.reg.type) {
337 case TYPE_F32:
338 if (bits & NV50_IR_MOD_ABS)
339 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
340 if (bits & NV50_IR_MOD_NEG)
341 imm.reg.data.f32 = -imm.reg.data.f32;
342 if (bits & NV50_IR_MOD_SAT) {
343 if (imm.reg.data.f32 < 0.0f)
344 imm.reg.data.f32 = 0.0f;
345 else
346 if (imm.reg.data.f32 > 1.0f)
347 imm.reg.data.f32 = 1.0f;
348 }
349 assert(!(bits & NV50_IR_MOD_NOT));
350 break;
351
352 case TYPE_S8: // NOTE: will be extended
353 case TYPE_S16:
354 case TYPE_S32:
355 case TYPE_U8: // NOTE: treated as signed
356 case TYPE_U16:
357 case TYPE_U32:
358 if (bits & NV50_IR_MOD_ABS)
359 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
360 imm.reg.data.s32 : -imm.reg.data.s32;
361 if (bits & NV50_IR_MOD_NEG)
362 imm.reg.data.s32 = -imm.reg.data.s32;
363 if (bits & NV50_IR_MOD_NOT)
364 imm.reg.data.s32 = ~imm.reg.data.s32;
365 break;
366
367 case TYPE_F64:
368 if (bits & NV50_IR_MOD_ABS)
369 imm.reg.data.f64 = fabs(imm.reg.data.f64);
370 if (bits & NV50_IR_MOD_NEG)
371 imm.reg.data.f64 = -imm.reg.data.f64;
372 if (bits & NV50_IR_MOD_SAT) {
373 if (imm.reg.data.f64 < 0.0)
374 imm.reg.data.f64 = 0.0;
375 else
376 if (imm.reg.data.f64 > 1.0)
377 imm.reg.data.f64 = 1.0;
378 }
379 assert(!(bits & NV50_IR_MOD_NOT));
380 break;
381
382 default:
383 assert(!"invalid/unhandled type");
384 imm.reg.data.u64 = 0;
385 break;
386 }
387 }
388
389 operation
390 Modifier::getOp() const
391 {
392 switch (bits) {
393 case NV50_IR_MOD_ABS: return OP_ABS;
394 case NV50_IR_MOD_NEG: return OP_NEG;
395 case NV50_IR_MOD_SAT: return OP_SAT;
396 case NV50_IR_MOD_NOT: return OP_NOT;
397 case 0:
398 return OP_MOV;
399 default:
400 return OP_CVT;
401 }
402 }
403
404 void
405 ConstantFolding::expr(Instruction *i,
406 ImmediateValue &imm0, ImmediateValue &imm1)
407 {
408 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
409 struct Storage res;
410
411 memset(&res.data, 0, sizeof(res.data));
412
413 switch (i->op) {
414 case OP_MAD:
415 case OP_FMA:
416 case OP_MUL:
417 if (i->dnz && i->dType == TYPE_F32) {
418 if (!isfinite(a->data.f32))
419 a->data.f32 = 0.0f;
420 if (!isfinite(b->data.f32))
421 b->data.f32 = 0.0f;
422 }
423 switch (i->dType) {
424 case TYPE_F32: res.data.f32 = a->data.f32 * b->data.f32; break;
425 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
426 case TYPE_S32:
427 case TYPE_U32: res.data.u32 = a->data.u32 * b->data.u32; break;
428 default:
429 return;
430 }
431 break;
432 case OP_DIV:
433 if (b->data.u32 == 0)
434 break;
435 switch (i->dType) {
436 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
437 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
438 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
439 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
440 default:
441 return;
442 }
443 break;
444 case OP_ADD:
445 switch (i->dType) {
446 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
447 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
448 case TYPE_S32:
449 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
450 default:
451 return;
452 }
453 break;
454 case OP_POW:
455 switch (i->dType) {
456 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
457 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
458 default:
459 return;
460 }
461 break;
462 case OP_MAX:
463 switch (i->dType) {
464 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
465 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
466 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
467 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
468 default:
469 return;
470 }
471 break;
472 case OP_MIN:
473 switch (i->dType) {
474 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
475 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
476 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
477 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
478 default:
479 return;
480 }
481 break;
482 case OP_AND:
483 res.data.u64 = a->data.u64 & b->data.u64;
484 break;
485 case OP_OR:
486 res.data.u64 = a->data.u64 | b->data.u64;
487 break;
488 case OP_XOR:
489 res.data.u64 = a->data.u64 ^ b->data.u64;
490 break;
491 case OP_SHL:
492 res.data.u32 = a->data.u32 << b->data.u32;
493 break;
494 case OP_SHR:
495 switch (i->dType) {
496 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
497 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
498 default:
499 return;
500 }
501 break;
502 case OP_SLCT:
503 if (a->data.u32 != b->data.u32)
504 return;
505 res.data.u32 = a->data.u32;
506 break;
507 case OP_EXTBF: {
508 int offset = b->data.u32 & 0xff;
509 int width = (b->data.u32 >> 8) & 0xff;
510 int rshift = offset;
511 int lshift = 0;
512 if (width == 0) {
513 res.data.u32 = 0;
514 break;
515 }
516 if (width + offset < 32) {
517 rshift = 32 - width;
518 lshift = 32 - width - offset;
519 }
520 switch (i->dType) {
521 case TYPE_S32: res.data.s32 = (a->data.s32 << lshift) >> rshift; break;
522 case TYPE_U32: res.data.u32 = (a->data.u32 << lshift) >> rshift; break;
523 default:
524 return;
525 }
526 break;
527 }
528 default:
529 return;
530 }
531 ++foldCount;
532
533 i->src(0).mod = Modifier(0);
534 i->src(1).mod = Modifier(0);
535
536 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
537 i->setSrc(1, NULL);
538
539 i->getSrc(0)->reg.data = res.data;
540
541 if (i->op == OP_MAD || i->op == OP_FMA) {
542 i->op = OP_ADD;
543
544 i->setSrc(1, i->getSrc(0));
545 i->src(1).mod = i->src(2).mod;
546 i->setSrc(0, i->getSrc(2));
547 i->setSrc(2, NULL);
548
549 ImmediateValue src0;
550 if (i->src(0).getImmediate(src0))
551 expr(i, src0, *i->getSrc(1)->asImm());
552 } else {
553 i->op = OP_MOV;
554 }
555 }
556
557 void
558 ConstantFolding::expr(Instruction *i,
559 ImmediateValue &imm0,
560 ImmediateValue &imm1,
561 ImmediateValue &imm2)
562 {
563 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
564 struct Storage res;
565
566 memset(&res.data, 0, sizeof(res.data));
567
568 switch (i->op) {
569 case OP_INSBF: {
570 int offset = b->data.u32 & 0xff;
571 int width = (b->data.u32 >> 8) & 0xff;
572 unsigned bitmask = ((1 << width) - 1) << offset;
573 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
574 break;
575 }
576 default:
577 return;
578 }
579
580 ++foldCount;
581 i->src(0).mod = Modifier(0);
582 i->src(1).mod = Modifier(0);
583 i->src(2).mod = Modifier(0);
584
585 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
586 i->setSrc(1, NULL);
587 i->setSrc(2, NULL);
588
589 i->getSrc(0)->reg.data = res.data;
590
591 i->op = OP_MOV;
592 }
593
594 void
595 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
596 {
597 Storage res;
598
599 if (i->dType != TYPE_F32)
600 return;
601 switch (i->op) {
602 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
603 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
604 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
605 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
606 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
607 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
608 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
609 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
610 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
611 case OP_PRESIN:
612 case OP_PREEX2:
613 // these should be handled in subsequent OP_SIN/COS/EX2
614 res.data.f32 = imm.reg.data.f32;
615 break;
616 default:
617 return;
618 }
619 i->op = OP_MOV;
620 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
621 i->src(0).mod = Modifier(0);
622 }
623
624 void
625 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
626 const int s, ImmediateValue& imm2)
627 {
628 const int t = s ? 0 : 1;
629 Instruction *insn;
630 Instruction *mul1 = NULL; // mul1 before mul2
631 int e = 0;
632 float f = imm2.reg.data.f32;
633 ImmediateValue imm1;
634
635 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
636
637 if (mul2->getSrc(t)->refCount() == 1) {
638 insn = mul2->getSrc(t)->getInsn();
639 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
640 mul1 = insn;
641 if (mul1 && !mul1->saturate) {
642 int s1;
643
644 if (mul1->src(s1 = 0).getImmediate(imm1) ||
645 mul1->src(s1 = 1).getImmediate(imm1)) {
646 bld.setPosition(mul1, false);
647 // a = mul r, imm1
648 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
649 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
650 mul1->src(s1).mod = Modifier(0);
651 mul2->def(0).replace(mul1->getDef(0), false);
652 } else
653 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
654 // c = mul a, b
655 // d = mul c, imm -> d = mul_x_imm a, b
656 mul1->postFactor = e;
657 mul2->def(0).replace(mul1->getDef(0), false);
658 if (f < 0)
659 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
660 }
661 mul1->saturate = mul2->saturate;
662 return;
663 }
664 }
665 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
666 // b = mul a, imm
667 // d = mul b, c -> d = mul_x_imm a, c
668 int s2, t2;
669 insn = mul2->getDef(0)->uses.front()->getInsn();
670 if (!insn)
671 return;
672 mul1 = mul2;
673 mul2 = NULL;
674 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
675 t2 = s2 ? 0 : 1;
676 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
677 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
678 mul2 = insn;
679 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
680 mul2->postFactor = e;
681 mul2->setSrc(s2, mul1->src(t));
682 if (f < 0)
683 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
684 }
685 }
686 }
687
688 void
689 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
690 {
691 const int t = !s;
692 const operation op = i->op;
693
694 switch (i->op) {
695 case OP_MUL:
696 if (i->dType == TYPE_F32)
697 tryCollapseChainedMULs(i, s, imm0);
698
699 if (imm0.isInteger(0)) {
700 i->op = OP_MOV;
701 i->setSrc(0, new_ImmediateValue(prog, 0u));
702 i->src(0).mod = Modifier(0);
703 i->setSrc(1, NULL);
704 } else
705 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
706 if (imm0.isNegative())
707 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
708 i->op = i->src(t).mod.getOp();
709 if (s == 0) {
710 i->setSrc(0, i->getSrc(1));
711 i->src(0).mod = i->src(1).mod;
712 i->src(1).mod = 0;
713 }
714 if (i->op != OP_CVT)
715 i->src(0).mod = 0;
716 i->setSrc(1, NULL);
717 } else
718 if (imm0.isInteger(2) || imm0.isInteger(-2)) {
719 if (imm0.isNegative())
720 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
721 i->op = OP_ADD;
722 i->setSrc(s, i->getSrc(t));
723 i->src(s).mod = i->src(t).mod;
724 } else
725 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
726 i->op = OP_SHL;
727 imm0.applyLog2();
728 i->setSrc(0, i->getSrc(t));
729 i->src(0).mod = i->src(t).mod;
730 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
731 i->src(1).mod = 0;
732 }
733 break;
734 case OP_ADD:
735 if (i->usesFlags())
736 break;
737 if (imm0.isInteger(0)) {
738 if (s == 0) {
739 i->setSrc(0, i->getSrc(1));
740 i->src(0).mod = i->src(1).mod;
741 }
742 i->setSrc(1, NULL);
743 i->op = i->src(0).mod.getOp();
744 if (i->op != OP_CVT)
745 i->src(0).mod = Modifier(0);
746 }
747 break;
748
749 case OP_DIV:
750 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
751 break;
752 bld.setPosition(i, false);
753 if (imm0.reg.data.u32 == 0) {
754 break;
755 } else
756 if (imm0.reg.data.u32 == 1) {
757 i->op = OP_MOV;
758 i->setSrc(1, NULL);
759 } else
760 if (i->dType == TYPE_U32 && imm0.isPow2()) {
761 i->op = OP_SHR;
762 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
763 } else
764 if (i->dType == TYPE_U32) {
765 Instruction *mul;
766 Value *tA, *tB;
767 const uint32_t d = imm0.reg.data.u32;
768 uint32_t m;
769 int r, s;
770 uint32_t l = util_logbase2(d);
771 if (((uint32_t)1 << l) < d)
772 ++l;
773 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
774 r = l ? 1 : 0;
775 s = l ? (l - 1) : 0;
776
777 tA = bld.getSSA();
778 tB = bld.getSSA();
779 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
780 bld.loadImm(NULL, m));
781 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
782 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
783 tA = bld.getSSA();
784 if (r)
785 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
786 else
787 tA = tB;
788 tB = s ? bld.getSSA() : i->getDef(0);
789 bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
790 if (s)
791 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
792
793 delete_Instruction(prog, i);
794 } else
795 if (imm0.reg.data.s32 == -1) {
796 i->op = OP_NEG;
797 i->setSrc(1, NULL);
798 } else {
799 LValue *tA, *tB;
800 LValue *tD;
801 const int32_t d = imm0.reg.data.s32;
802 int32_t m;
803 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
804 if ((1 << l) < abs(d))
805 ++l;
806 if (!l)
807 l = 1;
808 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
809
810 tA = bld.getSSA();
811 tB = bld.getSSA();
812 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
813 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
814 if (l > 1)
815 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
816 else
817 tB = tA;
818 tA = bld.getSSA();
819 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
820 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
821 bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
822 if (d < 0)
823 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
824
825 delete_Instruction(prog, i);
826 }
827 break;
828
829 case OP_MOD:
830 if (i->sType == TYPE_U32 && imm0.isPow2()) {
831 bld.setPosition(i, false);
832 i->op = OP_AND;
833 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
834 }
835 break;
836
837 case OP_SET: // TODO: SET_AND,OR,XOR
838 {
839 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
840 CondCode cc, ccZ;
841 if (i->src(t).mod != Modifier(0))
842 return;
843 if (imm0.reg.data.u32 != 0 || !si || si->op != OP_SET)
844 return;
845 cc = si->setCond;
846 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
847 if (s == 0)
848 ccZ = reverseCondCode(ccZ);
849 switch (ccZ) {
850 case CC_LT: cc = CC_FL; break;
851 case CC_GE: cc = CC_TR; break;
852 case CC_EQ: cc = inverseCondCode(cc); break;
853 case CC_LE: cc = inverseCondCode(cc); break;
854 case CC_GT: break;
855 case CC_NE: break;
856 default:
857 return;
858 }
859 i->asCmp()->setCond = cc;
860 i->setSrc(0, si->src(0));
861 i->setSrc(1, si->src(1));
862 i->sType = si->sType;
863 }
864 break;
865
866 case OP_SHL:
867 {
868 if (s != 1 || i->src(0).mod != Modifier(0))
869 break;
870 // try to concatenate shifts
871 Instruction *si = i->getSrc(0)->getInsn();
872 if (!si || si->op != OP_SHL)
873 break;
874 ImmediateValue imm1;
875 if (si->src(1).getImmediate(imm1)) {
876 bld.setPosition(i, false);
877 i->setSrc(0, si->getSrc(0));
878 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
879 }
880 }
881 break;
882
883 case OP_ABS:
884 case OP_NEG:
885 case OP_LG2:
886 case OP_RCP:
887 case OP_SQRT:
888 case OP_RSQ:
889 case OP_PRESIN:
890 case OP_SIN:
891 case OP_COS:
892 case OP_PREEX2:
893 case OP_EX2:
894 unary(i, imm0);
895 break;
896 default:
897 return;
898 }
899 if (i->op != op)
900 foldCount++;
901 }
902
903 // =============================================================================
904
905 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
906 class ModifierFolding : public Pass
907 {
908 private:
909 virtual bool visit(BasicBlock *);
910 };
911
912 bool
913 ModifierFolding::visit(BasicBlock *bb)
914 {
915 const Target *target = prog->getTarget();
916
917 Instruction *i, *next, *mi;
918 Modifier mod;
919
920 for (i = bb->getEntry(); i; i = next) {
921 next = i->next;
922
923 if (0 && i->op == OP_SUB) {
924 // turn "sub" into "add neg" (do we really want this ?)
925 i->op = OP_ADD;
926 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
927 }
928
929 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
930 mi = i->getSrc(s)->getInsn();
931 if (!mi ||
932 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
933 continue;
934 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
935 if ((i->op != OP_ADD &&
936 i->op != OP_MUL) ||
937 (mi->op != OP_ABS &&
938 mi->op != OP_NEG))
939 continue;
940 } else
941 if (i->sType != mi->dType) {
942 continue;
943 }
944 if ((mod = Modifier(mi->op)) == Modifier(0))
945 continue;
946 mod *= mi->src(0).mod;
947
948 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
949 // abs neg [abs] = abs
950 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
951 } else
952 if ((i->op == OP_NEG) && mod.neg()) {
953 assert(s == 0);
954 // neg as both opcode and modifier on same insn is prohibited
955 // neg neg abs = abs, neg neg = identity
956 mod = mod & Modifier(~NV50_IR_MOD_NEG);
957 i->op = mod.getOp();
958 mod = mod & Modifier(~NV50_IR_MOD_ABS);
959 if (mod == Modifier(0))
960 i->op = OP_MOV;
961 }
962
963 if (target->isModSupported(i, s, mod)) {
964 i->setSrc(s, mi->getSrc(0));
965 i->src(s).mod *= mod;
966 }
967 }
968
969 if (i->op == OP_SAT) {
970 mi = i->getSrc(0)->getInsn();
971 if (mi &&
972 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
973 mi->saturate = 1;
974 mi->setDef(0, i->getDef(0));
975 delete_Instruction(prog, i);
976 }
977 }
978 }
979
980 return true;
981 }
982
983 // =============================================================================
984
985 // MUL + ADD -> MAD/FMA
986 // MIN/MAX(a, a) -> a, etc.
987 // SLCT(a, b, const) -> cc(const) ? a : b
988 // RCP(RCP(a)) -> a
989 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
990 class AlgebraicOpt : public Pass
991 {
992 private:
993 virtual bool visit(BasicBlock *);
994
995 void handleABS(Instruction *);
996 bool handleADD(Instruction *);
997 bool tryADDToMADOrSAD(Instruction *, operation toOp);
998 void handleMINMAX(Instruction *);
999 void handleRCP(Instruction *);
1000 void handleSLCT(Instruction *);
1001 void handleLOGOP(Instruction *);
1002 void handleCVT(Instruction *);
1003 void handleSUCLAMP(Instruction *);
1004
1005 BuildUtil bld;
1006 };
1007
1008 void
1009 AlgebraicOpt::handleABS(Instruction *abs)
1010 {
1011 Instruction *sub = abs->getSrc(0)->getInsn();
1012 DataType ty;
1013 if (!sub ||
1014 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1015 return;
1016 // expect not to have mods yet, if we do, bail
1017 if (sub->src(0).mod || sub->src(1).mod)
1018 return;
1019 // hidden conversion ?
1020 ty = intTypeToSigned(sub->dType);
1021 if (abs->dType != abs->sType || ty != abs->sType)
1022 return;
1023
1024 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1025 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1026 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1027 return;
1028
1029 Value *src0 = sub->getSrc(0);
1030 Value *src1 = sub->getSrc(1);
1031
1032 if (sub->op == OP_ADD) {
1033 Instruction *neg = sub->getSrc(1)->getInsn();
1034 if (neg && neg->op != OP_NEG) {
1035 neg = sub->getSrc(0)->getInsn();
1036 src0 = sub->getSrc(1);
1037 }
1038 if (!neg || neg->op != OP_NEG ||
1039 neg->dType != neg->sType || neg->sType != ty)
1040 return;
1041 src1 = neg->getSrc(0);
1042 }
1043
1044 // found ABS(SUB))
1045 abs->moveSources(1, 2); // move sources >=1 up by 2
1046 abs->op = OP_SAD;
1047 abs->setType(sub->dType);
1048 abs->setSrc(0, src0);
1049 abs->setSrc(1, src1);
1050 bld.setPosition(abs, false);
1051 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1052 }
1053
1054 bool
1055 AlgebraicOpt::handleADD(Instruction *add)
1056 {
1057 Value *src0 = add->getSrc(0);
1058 Value *src1 = add->getSrc(1);
1059
1060 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1061 return false;
1062
1063 bool changed = false;
1064 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1065 changed = tryADDToMADOrSAD(add, OP_MAD);
1066 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1067 changed = tryADDToMADOrSAD(add, OP_SAD);
1068 return changed;
1069 }
1070
1071 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1072 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1073 bool
1074 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1075 {
1076 Value *src0 = add->getSrc(0);
1077 Value *src1 = add->getSrc(1);
1078 Value *src;
1079 int s;
1080 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1081 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1082 Modifier mod[4];
1083
1084 if (src0->refCount() == 1 &&
1085 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1086 s = 0;
1087 else
1088 if (src1->refCount() == 1 &&
1089 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1090 s = 1;
1091 else
1092 return false;
1093
1094 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
1095 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
1096 return false;
1097
1098 src = add->getSrc(s);
1099
1100 if (src->getInsn()->postFactor)
1101 return false;
1102 if (toOp == OP_SAD) {
1103 ImmediateValue imm;
1104 if (!src->getInsn()->src(2).getImmediate(imm))
1105 return false;
1106 if (!imm.isInteger(0))
1107 return false;
1108 }
1109
1110 mod[0] = add->src(0).mod;
1111 mod[1] = add->src(1).mod;
1112 mod[2] = src->getUniqueInsn()->src(0).mod;
1113 mod[3] = src->getUniqueInsn()->src(1).mod;
1114
1115 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1116 return false;
1117
1118 add->op = toOp;
1119 add->subOp = src->getInsn()->subOp; // potentially mul-high
1120
1121 add->setSrc(2, add->src(s ? 0 : 1));
1122
1123 add->setSrc(0, src->getInsn()->getSrc(0));
1124 add->src(0).mod = mod[2] ^ mod[s];
1125 add->setSrc(1, src->getInsn()->getSrc(1));
1126 add->src(1).mod = mod[3];
1127
1128 return true;
1129 }
1130
1131 void
1132 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1133 {
1134 Value *src0 = minmax->getSrc(0);
1135 Value *src1 = minmax->getSrc(1);
1136
1137 if (src0 != src1 || src0->reg.file != FILE_GPR)
1138 return;
1139 if (minmax->src(0).mod == minmax->src(1).mod) {
1140 if (minmax->def(0).mayReplace(minmax->src(0))) {
1141 minmax->def(0).replace(minmax->src(0), false);
1142 minmax->bb->remove(minmax);
1143 } else {
1144 minmax->op = OP_CVT;
1145 minmax->setSrc(1, NULL);
1146 }
1147 } else {
1148 // TODO:
1149 // min(x, -x) = -abs(x)
1150 // min(x, -abs(x)) = -abs(x)
1151 // min(x, abs(x)) = x
1152 // max(x, -abs(x)) = x
1153 // max(x, abs(x)) = abs(x)
1154 // max(x, -x) = abs(x)
1155 }
1156 }
1157
1158 void
1159 AlgebraicOpt::handleRCP(Instruction *rcp)
1160 {
1161 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1162
1163 if (si && si->op == OP_RCP) {
1164 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1165 rcp->op = mod.getOp();
1166 rcp->setSrc(0, si->getSrc(0));
1167 }
1168 }
1169
1170 void
1171 AlgebraicOpt::handleSLCT(Instruction *slct)
1172 {
1173 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1174 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1175 slct->setSrc(0, slct->getSrc(1));
1176 } else
1177 if (slct->getSrc(0) != slct->getSrc(1)) {
1178 return;
1179 }
1180 slct->op = OP_MOV;
1181 slct->setSrc(1, NULL);
1182 slct->setSrc(2, NULL);
1183 }
1184
1185 void
1186 AlgebraicOpt::handleLOGOP(Instruction *logop)
1187 {
1188 Value *src0 = logop->getSrc(0);
1189 Value *src1 = logop->getSrc(1);
1190
1191 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1192 return;
1193
1194 if (src0 == src1) {
1195 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1196 logop->def(0).mayReplace(logop->src(0))) {
1197 logop->def(0).replace(logop->src(0), false);
1198 delete_Instruction(prog, logop);
1199 }
1200 } else {
1201 // try AND(SET, SET) -> SET_AND(SET)
1202 Instruction *set0 = src0->getInsn();
1203 Instruction *set1 = src1->getInsn();
1204
1205 if (!set0 || set0->fixed || !set1 || set1->fixed)
1206 return;
1207 if (set1->op != OP_SET) {
1208 Instruction *xchg = set0;
1209 set0 = set1;
1210 set1 = xchg;
1211 if (set1->op != OP_SET)
1212 return;
1213 }
1214 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1215 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1216 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1217 return;
1218 if (set0->op != OP_SET &&
1219 set0->op != OP_SET_AND &&
1220 set0->op != OP_SET_OR &&
1221 set0->op != OP_SET_XOR)
1222 return;
1223 if (set0->getDef(0)->refCount() > 1 &&
1224 set1->getDef(0)->refCount() > 1)
1225 return;
1226 if (set0->getPredicate() || set1->getPredicate())
1227 return;
1228 // check that they don't source each other
1229 for (int s = 0; s < 2; ++s)
1230 if (set0->getSrc(s) == set1->getDef(0) ||
1231 set1->getSrc(s) == set0->getDef(0))
1232 return;
1233
1234 set0 = cloneForward(func, set0);
1235 set1 = cloneShallow(func, set1);
1236 logop->bb->insertAfter(logop, set1);
1237 logop->bb->insertAfter(logop, set0);
1238
1239 set0->dType = TYPE_U8;
1240 set0->getDef(0)->reg.file = FILE_PREDICATE;
1241 set0->getDef(0)->reg.size = 1;
1242 set1->setSrc(2, set0->getDef(0));
1243 set1->op = redOp;
1244 set1->setDef(0, logop->getDef(0));
1245 delete_Instruction(prog, logop);
1246 }
1247 }
1248
1249 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1250 // nv50:
1251 // F2I(NEG(I2F(ABS(SET))))
1252 void
1253 AlgebraicOpt::handleCVT(Instruction *cvt)
1254 {
1255 if (cvt->sType != TYPE_F32 ||
1256 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1257 return;
1258 Instruction *insn = cvt->getSrc(0)->getInsn();
1259 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1260 return;
1261 if (insn->src(0).mod != Modifier(0))
1262 return;
1263 insn = insn->getSrc(0)->getInsn();
1264
1265 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1266 if (insn && insn->op == OP_CVT &&
1267 insn->dType == TYPE_F32 &&
1268 insn->sType == TYPE_S32) {
1269 insn = insn->getSrc(0)->getInsn();
1270 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1271 insn->src(0).mod)
1272 return;
1273 insn = insn->getSrc(0)->getInsn();
1274 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1275 return;
1276 } else
1277 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1278 return;
1279 }
1280
1281 Instruction *bset = cloneShallow(func, insn);
1282 bset->dType = TYPE_U32;
1283 bset->setDef(0, cvt->getDef(0));
1284 cvt->bb->insertAfter(cvt, bset);
1285 delete_Instruction(prog, cvt);
1286 }
1287
1288 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1289 void
1290 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1291 {
1292 ImmediateValue imm;
1293 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1294 int s;
1295 Instruction *add;
1296
1297 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1298
1299 // look for ADD (TODO: only count references by non-SUCLAMP)
1300 if (insn->getSrc(0)->refCount() > 1)
1301 return;
1302 add = insn->getSrc(0)->getInsn();
1303 if (!add || add->op != OP_ADD ||
1304 (add->dType != TYPE_U32 &&
1305 add->dType != TYPE_S32))
1306 return;
1307
1308 // look for immediate
1309 for (s = 0; s < 2; ++s)
1310 if (add->src(s).getImmediate(imm))
1311 break;
1312 if (s >= 2)
1313 return;
1314 s = s ? 0 : 1;
1315 // determine if immediate fits
1316 val += imm.reg.data.s32;
1317 if (val > 31 || val < -32)
1318 return;
1319 // determine if other addend fits
1320 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
1321 return;
1322
1323 bld.setPosition(insn, false); // make sure bld is init'ed
1324 // replace sources
1325 insn->setSrc(2, bld.mkImm(val));
1326 insn->setSrc(0, add->getSrc(s));
1327 }
1328
1329 bool
1330 AlgebraicOpt::visit(BasicBlock *bb)
1331 {
1332 Instruction *next;
1333 for (Instruction *i = bb->getEntry(); i; i = next) {
1334 next = i->next;
1335 switch (i->op) {
1336 case OP_ABS:
1337 handleABS(i);
1338 break;
1339 case OP_ADD:
1340 handleADD(i);
1341 break;
1342 case OP_RCP:
1343 handleRCP(i);
1344 break;
1345 case OP_MIN:
1346 case OP_MAX:
1347 handleMINMAX(i);
1348 break;
1349 case OP_SLCT:
1350 handleSLCT(i);
1351 break;
1352 case OP_AND:
1353 case OP_OR:
1354 case OP_XOR:
1355 handleLOGOP(i);
1356 break;
1357 case OP_CVT:
1358 handleCVT(i);
1359 break;
1360 case OP_SUCLAMP:
1361 handleSUCLAMP(i);
1362 break;
1363 default:
1364 break;
1365 }
1366 }
1367
1368 return true;
1369 }
1370
1371 // =============================================================================
1372
1373 static inline void
1374 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1375 {
1376 if (offset != ldst->getSrc(0)->reg.data.offset) {
1377 if (ldst->getSrc(0)->refCount() > 1)
1378 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
1379 ldst->getSrc(0)->reg.data.offset = offset;
1380 }
1381 }
1382
1383 // Combine loads and stores, forward stores to loads where possible.
1384 class MemoryOpt : public Pass
1385 {
1386 private:
1387 class Record
1388 {
1389 public:
1390 Record *next;
1391 Instruction *insn;
1392 const Value *rel[2];
1393 const Value *base;
1394 int32_t offset;
1395 int8_t fileIndex;
1396 uint8_t size;
1397 bool locked;
1398 Record *prev;
1399
1400 bool overlaps(const Instruction *ldst) const;
1401
1402 inline void link(Record **);
1403 inline void unlink(Record **);
1404 inline void set(const Instruction *ldst);
1405 };
1406
1407 public:
1408 MemoryOpt();
1409
1410 Record *loads[DATA_FILE_COUNT];
1411 Record *stores[DATA_FILE_COUNT];
1412
1413 MemoryPool recordPool;
1414
1415 private:
1416 virtual bool visit(BasicBlock *);
1417 bool runOpt(BasicBlock *);
1418
1419 Record **getList(const Instruction *);
1420
1421 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1422
1423 // merge @insn into load/store instruction from @rec
1424 bool combineLd(Record *rec, Instruction *ld);
1425 bool combineSt(Record *rec, Instruction *st);
1426
1427 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1428 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1429 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1430
1431 void addRecord(Instruction *ldst);
1432 void purgeRecords(Instruction *const st, DataFile);
1433 void lockStores(Instruction *const ld);
1434 void reset();
1435
1436 private:
1437 Record *prevRecord;
1438 };
1439
1440 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1441 {
1442 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1443 loads[i] = NULL;
1444 stores[i] = NULL;
1445 }
1446 prevRecord = NULL;
1447 }
1448
1449 void
1450 MemoryOpt::reset()
1451 {
1452 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1453 Record *it, *next;
1454 for (it = loads[i]; it; it = next) {
1455 next = it->next;
1456 recordPool.release(it);
1457 }
1458 loads[i] = NULL;
1459 for (it = stores[i]; it; it = next) {
1460 next = it->next;
1461 recordPool.release(it);
1462 }
1463 stores[i] = NULL;
1464 }
1465 }
1466
1467 bool
1468 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1469 {
1470 int32_t offRc = rec->offset;
1471 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1472 int sizeRc = rec->size;
1473 int sizeLd = typeSizeof(ld->dType);
1474 int size = sizeRc + sizeLd;
1475 int d, j;
1476
1477 if (!prog->getTarget()->
1478 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1479 return false;
1480 // no unaligned loads
1481 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1482 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1483 return false;
1484
1485 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1486
1487 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1488
1489 if (offLd < offRc) {
1490 int sz;
1491 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1492 // d: nr of definitions in ld
1493 // j: nr of definitions in rec->insn, move:
1494 for (d = d + j - 1; j > 0; --j, --d)
1495 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1496
1497 if (rec->insn->getSrc(0)->refCount() > 1)
1498 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
1499 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1500
1501 d = 0;
1502 } else {
1503 d = j;
1504 }
1505 // move definitions of @ld to @rec->insn
1506 for (j = 0; sizeLd; ++j, ++d) {
1507 sizeLd -= ld->getDef(j)->reg.size;
1508 rec->insn->setDef(d, ld->getDef(j));
1509 }
1510
1511 rec->size = size;
1512 rec->insn->getSrc(0)->reg.size = size;
1513 rec->insn->setType(typeOfSize(size));
1514
1515 delete_Instruction(prog, ld);
1516
1517 return true;
1518 }
1519
1520 bool
1521 MemoryOpt::combineSt(Record *rec, Instruction *st)
1522 {
1523 int32_t offRc = rec->offset;
1524 int32_t offSt = st->getSrc(0)->reg.data.offset;
1525 int sizeRc = rec->size;
1526 int sizeSt = typeSizeof(st->dType);
1527 int s = sizeSt / 4;
1528 int size = sizeRc + sizeSt;
1529 int j, k;
1530 Value *src[4]; // no modifiers in ValueRef allowed for st
1531 Value *extra[3];
1532
1533 if (!prog->getTarget()->
1534 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
1535 return false;
1536 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1537 return false;
1538
1539 st->takeExtraSources(0, extra); // save predicate and indirect address
1540
1541 if (offRc < offSt) {
1542 // save values from @st
1543 for (s = 0; sizeSt; ++s) {
1544 sizeSt -= st->getSrc(s + 1)->reg.size;
1545 src[s] = st->getSrc(s + 1);
1546 }
1547 // set record's values as low sources of @st
1548 for (j = 1; sizeRc; ++j) {
1549 sizeRc -= rec->insn->getSrc(j)->reg.size;
1550 st->setSrc(j, rec->insn->getSrc(j));
1551 }
1552 // set saved values as high sources of @st
1553 for (k = j, j = 0; j < s; ++j)
1554 st->setSrc(k++, src[j]);
1555
1556 updateLdStOffset(st, offRc, func);
1557 } else {
1558 for (j = 1; sizeSt; ++j)
1559 sizeSt -= st->getSrc(j)->reg.size;
1560 for (s = 1; sizeRc; ++j, ++s) {
1561 sizeRc -= rec->insn->getSrc(s)->reg.size;
1562 st->setSrc(j, rec->insn->getSrc(s));
1563 }
1564 rec->offset = offSt;
1565 }
1566 st->putExtraSources(0, extra); // restore pointer and predicate
1567
1568 delete_Instruction(prog, rec->insn);
1569 rec->insn = st;
1570 rec->size = size;
1571 rec->insn->getSrc(0)->reg.size = size;
1572 rec->insn->setType(typeOfSize(size));
1573 return true;
1574 }
1575
1576 void
1577 MemoryOpt::Record::set(const Instruction *ldst)
1578 {
1579 const Symbol *mem = ldst->getSrc(0)->asSym();
1580 fileIndex = mem->reg.fileIndex;
1581 rel[0] = ldst->getIndirect(0, 0);
1582 rel[1] = ldst->getIndirect(0, 1);
1583 offset = mem->reg.data.offset;
1584 base = mem->getBase();
1585 size = typeSizeof(ldst->sType);
1586 }
1587
1588 void
1589 MemoryOpt::Record::link(Record **list)
1590 {
1591 next = *list;
1592 if (next)
1593 next->prev = this;
1594 prev = NULL;
1595 *list = this;
1596 }
1597
1598 void
1599 MemoryOpt::Record::unlink(Record **list)
1600 {
1601 if (next)
1602 next->prev = prev;
1603 if (prev)
1604 prev->next = next;
1605 else
1606 *list = next;
1607 }
1608
1609 MemoryOpt::Record **
1610 MemoryOpt::getList(const Instruction *insn)
1611 {
1612 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
1613 return &loads[insn->src(0).getFile()];
1614 return &stores[insn->src(0).getFile()];
1615 }
1616
1617 void
1618 MemoryOpt::addRecord(Instruction *i)
1619 {
1620 Record **list = getList(i);
1621 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
1622
1623 it->link(list);
1624 it->set(i);
1625 it->insn = i;
1626 it->locked = false;
1627 }
1628
1629 MemoryOpt::Record *
1630 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
1631 {
1632 const Symbol *sym = insn->getSrc(0)->asSym();
1633 const int size = typeSizeof(insn->sType);
1634 Record *rec = NULL;
1635 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
1636
1637 for (; it; it = it->next) {
1638 if (it->locked && insn->op != OP_LOAD)
1639 continue;
1640 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
1641 it->rel[0] != insn->getIndirect(0, 0) ||
1642 it->fileIndex != sym->reg.fileIndex ||
1643 it->rel[1] != insn->getIndirect(0, 1))
1644 continue;
1645
1646 if (it->offset < sym->reg.data.offset) {
1647 if (it->offset + it->size >= sym->reg.data.offset) {
1648 isAdj = (it->offset + it->size == sym->reg.data.offset);
1649 if (!isAdj)
1650 return it;
1651 if (!(it->offset & 0x7))
1652 rec = it;
1653 }
1654 } else {
1655 isAdj = it->offset != sym->reg.data.offset;
1656 if (size <= it->size && !isAdj)
1657 return it;
1658 else
1659 if (!(sym->reg.data.offset & 0x7))
1660 if (it->offset - size <= sym->reg.data.offset)
1661 rec = it;
1662 }
1663 }
1664 return rec;
1665 }
1666
1667 bool
1668 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
1669 {
1670 Instruction *st = rec->insn;
1671 int32_t offSt = rec->offset;
1672 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1673 int d, s;
1674
1675 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
1676 offSt += st->getSrc(s)->reg.size;
1677 if (offSt != offLd)
1678 return false;
1679
1680 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
1681 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
1682 return false;
1683 if (st->getSrc(s)->reg.file != FILE_GPR)
1684 return false;
1685 ld->def(d).replace(st->src(s), false);
1686 }
1687 ld->bb->remove(ld);
1688 return true;
1689 }
1690
1691 bool
1692 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
1693 {
1694 Instruction *ldR = rec->insn;
1695 int32_t offR = rec->offset;
1696 int32_t offE = ldE->getSrc(0)->reg.data.offset;
1697 int dR, dE;
1698
1699 assert(offR <= offE);
1700 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
1701 offR += ldR->getDef(dR)->reg.size;
1702 if (offR != offE)
1703 return false;
1704
1705 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
1706 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
1707 return false;
1708 ldE->def(dE).replace(ldR->getDef(dR), false);
1709 }
1710
1711 delete_Instruction(prog, ldE);
1712 return true;
1713 }
1714
1715 bool
1716 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
1717 {
1718 const Instruction *const ri = rec->insn;
1719 Value *extra[3];
1720
1721 int32_t offS = st->getSrc(0)->reg.data.offset;
1722 int32_t offR = rec->offset;
1723 int32_t endS = offS + typeSizeof(st->dType);
1724 int32_t endR = offR + typeSizeof(ri->dType);
1725
1726 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
1727
1728 st->takeExtraSources(0, extra);
1729
1730 if (offR < offS) {
1731 Value *vals[10];
1732 int s, n;
1733 int k = 0;
1734 // get non-replaced sources of ri
1735 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
1736 vals[k++] = ri->getSrc(s);
1737 n = s;
1738 // get replaced sources of st
1739 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
1740 vals[k++] = st->getSrc(s);
1741 // skip replaced sources of ri
1742 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
1743 // get non-replaced sources after values covered by st
1744 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
1745 vals[k++] = ri->getSrc(s);
1746 assert((unsigned int)k <= Elements(vals));
1747 for (s = 0; s < k; ++s)
1748 st->setSrc(s + 1, vals[s]);
1749 st->setSrc(0, ri->getSrc(0));
1750 } else
1751 if (endR > endS) {
1752 int j, s;
1753 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
1754 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
1755 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
1756 st->setSrc(s++, ri->getSrc(j));
1757 }
1758 st->putExtraSources(0, extra);
1759
1760 delete_Instruction(prog, rec->insn);
1761
1762 rec->insn = st;
1763 rec->offset = st->getSrc(0)->reg.data.offset;
1764
1765 st->setType(typeOfSize(rec->size));
1766
1767 return true;
1768 }
1769
1770 bool
1771 MemoryOpt::Record::overlaps(const Instruction *ldst) const
1772 {
1773 Record that;
1774 that.set(ldst);
1775
1776 if (this->fileIndex != that.fileIndex)
1777 return false;
1778
1779 if (this->rel[0] || that.rel[0])
1780 return this->base == that.base;
1781 return
1782 (this->offset < that.offset + that.size) &&
1783 (this->offset + this->size > that.offset);
1784 }
1785
1786 // We must not eliminate stores that affect the result of @ld if
1787 // we find later stores to the same location, and we may no longer
1788 // merge them with later stores.
1789 // The stored value can, however, still be used to determine the value
1790 // returned by future loads.
1791 void
1792 MemoryOpt::lockStores(Instruction *const ld)
1793 {
1794 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
1795 if (!r->locked && r->overlaps(ld))
1796 r->locked = true;
1797 }
1798
1799 // Prior loads from the location of @st are no longer valid.
1800 // Stores to the location of @st may no longer be used to derive
1801 // the value at it nor be coalesced into later stores.
1802 void
1803 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
1804 {
1805 if (st)
1806 f = st->src(0).getFile();
1807
1808 for (Record *r = loads[f]; r; r = r->next)
1809 if (!st || r->overlaps(st))
1810 r->unlink(&loads[f]);
1811
1812 for (Record *r = stores[f]; r; r = r->next)
1813 if (!st || r->overlaps(st))
1814 r->unlink(&stores[f]);
1815 }
1816
1817 bool
1818 MemoryOpt::visit(BasicBlock *bb)
1819 {
1820 bool ret = runOpt(bb);
1821 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1822 // where 96 bit memory operations are forbidden.
1823 if (ret)
1824 ret = runOpt(bb);
1825 return ret;
1826 }
1827
1828 bool
1829 MemoryOpt::runOpt(BasicBlock *bb)
1830 {
1831 Instruction *ldst, *next;
1832 Record *rec;
1833 bool isAdjacent = true;
1834
1835 for (ldst = bb->getEntry(); ldst; ldst = next) {
1836 bool keep = true;
1837 bool isLoad = true;
1838 next = ldst->next;
1839
1840 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
1841 if (ldst->isDead()) {
1842 // might have been produced by earlier optimization
1843 delete_Instruction(prog, ldst);
1844 continue;
1845 }
1846 } else
1847 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
1848 isLoad = false;
1849 } else {
1850 // TODO: maybe have all fixed ops act as barrier ?
1851 if (ldst->op == OP_CALL ||
1852 ldst->op == OP_BAR ||
1853 ldst->op == OP_MEMBAR) {
1854 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1855 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1856 purgeRecords(NULL, FILE_MEMORY_SHARED);
1857 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1858 } else
1859 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
1860 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
1861 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1862 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1863 purgeRecords(NULL, FILE_MEMORY_SHARED);
1864 } else {
1865 purgeRecords(NULL, ldst->src(0).getFile());
1866 }
1867 } else
1868 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
1869 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1870 }
1871 continue;
1872 }
1873 if (ldst->getPredicate()) // TODO: handle predicated ld/st
1874 continue;
1875
1876 if (isLoad) {
1877 DataFile file = ldst->src(0).getFile();
1878
1879 // if ld l[]/g[] look for previous store to eliminate the reload
1880 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
1881 // TODO: shared memory ?
1882 rec = findRecord(ldst, false, isAdjacent);
1883 if (rec && !isAdjacent)
1884 keep = !replaceLdFromSt(ldst, rec);
1885 }
1886
1887 // or look for ld from the same location and replace this one
1888 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
1889 if (rec) {
1890 if (!isAdjacent)
1891 keep = !replaceLdFromLd(ldst, rec);
1892 else
1893 // or combine a previous load with this one
1894 keep = !combineLd(rec, ldst);
1895 }
1896 if (keep)
1897 lockStores(ldst);
1898 } else {
1899 rec = findRecord(ldst, false, isAdjacent);
1900 if (rec) {
1901 if (!isAdjacent)
1902 keep = !replaceStFromSt(ldst, rec);
1903 else
1904 keep = !combineSt(rec, ldst);
1905 }
1906 if (keep)
1907 purgeRecords(ldst, DATA_FILE_COUNT);
1908 }
1909 if (keep)
1910 addRecord(ldst);
1911 }
1912 reset();
1913
1914 return true;
1915 }
1916
1917 // =============================================================================
1918
1919 // Turn control flow into predicated instructions (after register allocation !).
1920 // TODO:
1921 // Could move this to before register allocation on NVC0 and also handle nested
1922 // constructs.
1923 class FlatteningPass : public Pass
1924 {
1925 private:
1926 virtual bool visit(BasicBlock *);
1927
1928 bool tryPredicateConditional(BasicBlock *);
1929 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
1930 void tryPropagateBranch(BasicBlock *);
1931 inline bool isConstantCondition(Value *pred);
1932 inline bool mayPredicate(const Instruction *, const Value *pred) const;
1933 inline void removeFlow(Instruction *);
1934 };
1935
1936 bool
1937 FlatteningPass::isConstantCondition(Value *pred)
1938 {
1939 Instruction *insn = pred->getUniqueInsn();
1940 assert(insn);
1941 if (insn->op != OP_SET || insn->srcExists(2))
1942 return false;
1943
1944 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
1945 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
1946 DataFile file;
1947 if (ld) {
1948 if (ld->op != OP_MOV && ld->op != OP_LOAD)
1949 return false;
1950 if (ld->src(0).isIndirect(0))
1951 return false;
1952 file = ld->src(0).getFile();
1953 } else {
1954 file = insn->src(s).getFile();
1955 // catch $r63 on NVC0
1956 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
1957 file = FILE_IMMEDIATE;
1958 }
1959 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
1960 return false;
1961 }
1962 return true;
1963 }
1964
1965 void
1966 FlatteningPass::removeFlow(Instruction *insn)
1967 {
1968 FlowInstruction *term = insn ? insn->asFlow() : NULL;
1969 if (!term)
1970 return;
1971 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
1972
1973 if (term->op == OP_BRA) {
1974 // TODO: this might get more difficult when we get arbitrary BRAs
1975 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
1976 return;
1977 } else
1978 if (term->op != OP_JOIN)
1979 return;
1980
1981 Value *pred = term->getPredicate();
1982
1983 delete_Instruction(prog, term);
1984
1985 if (pred && pred->refCount() == 0) {
1986 Instruction *pSet = pred->getUniqueInsn();
1987 pred->join->reg.data.id = -1; // deallocate
1988 if (pSet->isDead())
1989 delete_Instruction(prog, pSet);
1990 }
1991 }
1992
1993 void
1994 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
1995 {
1996 for (Instruction *i = bb->getEntry(); i; i = i->next) {
1997 if (i->isNop())
1998 continue;
1999 assert(!i->getPredicate());
2000 i->setPredicate(cc, pred);
2001 }
2002 removeFlow(bb->getExit());
2003 }
2004
2005 bool
2006 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
2007 {
2008 if (insn->isPseudo())
2009 return true;
2010 // TODO: calls where we don't know which registers are modified
2011
2012 if (!prog->getTarget()->mayPredicate(insn, pred))
2013 return false;
2014 for (int d = 0; insn->defExists(d); ++d)
2015 if (insn->getDef(d)->equals(pred))
2016 return false;
2017 return true;
2018 }
2019
2020 // If we jump to BRA/RET/EXIT, replace the jump with it.
2021 // NOTE: We do not update the CFG anymore here !
2022 //
2023 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2024 // BB:0
2025 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2026 // BB1:
2027 // bra BB:3
2028 // BB2:
2029 // ...
2030 // BB3:
2031 // ...
2032 void
2033 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
2034 {
2035 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
2036 BasicBlock *bf = i->asFlow()->target.bb;
2037
2038 if (bf->getInsnCount() != 1)
2039 continue;
2040
2041 FlowInstruction *bra = i->asFlow();
2042 FlowInstruction *rep = bf->getExit()->asFlow();
2043
2044 if (!rep || rep->getPredicate())
2045 continue;
2046 if (rep->op != OP_BRA &&
2047 rep->op != OP_JOIN &&
2048 rep->op != OP_EXIT)
2049 continue;
2050
2051 // TODO: If there are multiple branches to @rep, only the first would
2052 // be replaced, so only remove them after this pass is done ?
2053 // Also, need to check all incident blocks for fall-through exits and
2054 // add the branch there.
2055 bra->op = rep->op;
2056 bra->target.bb = rep->target.bb;
2057 if (bf->cfg.incidentCount() == 1)
2058 bf->remove(rep);
2059 }
2060 }
2061
2062 bool
2063 FlatteningPass::visit(BasicBlock *bb)
2064 {
2065 if (tryPredicateConditional(bb))
2066 return true;
2067
2068 // try to attach join to previous instruction
2069 Instruction *insn = bb->getExit();
2070 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2071 insn = insn->prev;
2072 if (insn && !insn->getPredicate() &&
2073 !insn->asFlow() &&
2074 insn->op != OP_TEXBAR &&
2075 !isTextureOp(insn->op) && // probably just nve4
2076 !isSurfaceOp(insn->op) && // not confirmed
2077 insn->op != OP_LINTERP && // probably just nve4
2078 insn->op != OP_PINTERP && // probably just nve4
2079 ((insn->op != OP_LOAD && insn->op != OP_STORE) ||
2080 typeSizeof(insn->dType) <= 4) &&
2081 !insn->isNop()) {
2082 insn->join = 1;
2083 bb->remove(bb->getExit());
2084 return true;
2085 }
2086 }
2087
2088 tryPropagateBranch(bb);
2089
2090 return true;
2091 }
2092
2093 bool
2094 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2095 {
2096 BasicBlock *bL = NULL, *bR = NULL;
2097 unsigned int nL = 0, nR = 0, limit = 12;
2098 Instruction *insn;
2099 unsigned int mask;
2100
2101 mask = bb->initiatesSimpleConditional();
2102 if (!mask)
2103 return false;
2104
2105 assert(bb->getExit());
2106 Value *pred = bb->getExit()->getPredicate();
2107 assert(pred);
2108
2109 if (isConstantCondition(pred))
2110 limit = 4;
2111
2112 Graph::EdgeIterator ei = bb->cfg.outgoing();
2113
2114 if (mask & 1) {
2115 bL = BasicBlock::get(ei.getNode());
2116 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2117 if (!mayPredicate(insn, pred))
2118 return false;
2119 if (nL > limit)
2120 return false; // too long, do a real branch
2121 }
2122 ei.next();
2123
2124 if (mask & 2) {
2125 bR = BasicBlock::get(ei.getNode());
2126 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2127 if (!mayPredicate(insn, pred))
2128 return false;
2129 if (nR > limit)
2130 return false; // too long, do a real branch
2131 }
2132
2133 if (bL)
2134 predicateInstructions(bL, pred, bb->getExit()->cc);
2135 if (bR)
2136 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2137
2138 if (bb->joinAt) {
2139 bb->remove(bb->joinAt);
2140 bb->joinAt = NULL;
2141 }
2142 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2143
2144 // remove potential join operations at the end of the conditional
2145 if (prog->getTarget()->joinAnterior) {
2146 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2147 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2148 removeFlow(bb->getEntry());
2149 }
2150
2151 return true;
2152 }
2153
2154 // =============================================================================
2155
2156 // Common subexpression elimination. Stupid O^2 implementation.
2157 class LocalCSE : public Pass
2158 {
2159 private:
2160 virtual bool visit(BasicBlock *);
2161
2162 inline bool tryReplace(Instruction **, Instruction *);
2163
2164 DLList ops[OP_LAST + 1];
2165 };
2166
2167 class GlobalCSE : public Pass
2168 {
2169 private:
2170 virtual bool visit(BasicBlock *);
2171 };
2172
2173 bool
2174 Instruction::isActionEqual(const Instruction *that) const
2175 {
2176 if (this->op != that->op ||
2177 this->dType != that->dType ||
2178 this->sType != that->sType)
2179 return false;
2180 if (this->cc != that->cc)
2181 return false;
2182
2183 if (this->asTex()) {
2184 if (memcmp(&this->asTex()->tex,
2185 &that->asTex()->tex,
2186 sizeof(this->asTex()->tex)))
2187 return false;
2188 } else
2189 if (this->asCmp()) {
2190 if (this->asCmp()->setCond != that->asCmp()->setCond)
2191 return false;
2192 } else
2193 if (this->asFlow()) {
2194 return false;
2195 } else {
2196 if (this->ipa != that->ipa ||
2197 this->lanes != that->lanes ||
2198 this->perPatch != that->perPatch)
2199 return false;
2200 if (this->postFactor != that->postFactor)
2201 return false;
2202 }
2203
2204 if (this->subOp != that->subOp ||
2205 this->saturate != that->saturate ||
2206 this->rnd != that->rnd ||
2207 this->ftz != that->ftz ||
2208 this->dnz != that->dnz ||
2209 this->cache != that->cache ||
2210 this->mask != that->mask)
2211 return false;
2212
2213 return true;
2214 }
2215
2216 bool
2217 Instruction::isResultEqual(const Instruction *that) const
2218 {
2219 unsigned int d, s;
2220
2221 // NOTE: location of discard only affects tex with liveOnly and quadops
2222 if (!this->defExists(0) && this->op != OP_DISCARD)
2223 return false;
2224
2225 if (!isActionEqual(that))
2226 return false;
2227
2228 if (this->predSrc != that->predSrc)
2229 return false;
2230
2231 for (d = 0; this->defExists(d); ++d) {
2232 if (!that->defExists(d) ||
2233 !this->getDef(d)->equals(that->getDef(d), false))
2234 return false;
2235 }
2236 if (that->defExists(d))
2237 return false;
2238
2239 for (s = 0; this->srcExists(s); ++s) {
2240 if (!that->srcExists(s))
2241 return false;
2242 if (this->src(s).mod != that->src(s).mod)
2243 return false;
2244 if (!this->getSrc(s)->equals(that->getSrc(s), true))
2245 return false;
2246 }
2247 if (that->srcExists(s))
2248 return false;
2249
2250 if (op == OP_LOAD || op == OP_VFETCH) {
2251 switch (src(0).getFile()) {
2252 case FILE_MEMORY_CONST:
2253 case FILE_SHADER_INPUT:
2254 return true;
2255 default:
2256 return false;
2257 }
2258 }
2259
2260 return true;
2261 }
2262
2263 // pull through common expressions from different in-blocks
2264 bool
2265 GlobalCSE::visit(BasicBlock *bb)
2266 {
2267 Instruction *phi, *next, *ik;
2268 int s;
2269
2270 // TODO: maybe do this with OP_UNION, too
2271
2272 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2273 next = phi->next;
2274 if (phi->getSrc(0)->refCount() > 1)
2275 continue;
2276 ik = phi->getSrc(0)->getInsn();
2277 if (!ik)
2278 continue; // probably a function input
2279 for (s = 1; phi->srcExists(s); ++s) {
2280 if (phi->getSrc(s)->refCount() > 1)
2281 break;
2282 if (!phi->getSrc(s)->getInsn() ||
2283 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
2284 break;
2285 }
2286 if (!phi->srcExists(s)) {
2287 Instruction *entry = bb->getEntry();
2288 ik->bb->remove(ik);
2289 if (!entry || entry->op != OP_JOIN)
2290 bb->insertHead(ik);
2291 else
2292 bb->insertAfter(entry, ik);
2293 ik->setDef(0, phi->getDef(0));
2294 delete_Instruction(prog, phi);
2295 }
2296 }
2297
2298 return true;
2299 }
2300
2301 bool
2302 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2303 {
2304 Instruction *old = *ptr;
2305
2306 // TODO: maybe relax this later (causes trouble with OP_UNION)
2307 if (i->isPredicated())
2308 return false;
2309
2310 if (!old->isResultEqual(i))
2311 return false;
2312
2313 for (int d = 0; old->defExists(d); ++d)
2314 old->def(d).replace(i->getDef(d), false);
2315 delete_Instruction(prog, old);
2316 *ptr = NULL;
2317 return true;
2318 }
2319
2320 bool
2321 LocalCSE::visit(BasicBlock *bb)
2322 {
2323 unsigned int replaced;
2324
2325 do {
2326 Instruction *ir, *next;
2327
2328 replaced = 0;
2329
2330 // will need to know the order of instructions
2331 int serial = 0;
2332 for (ir = bb->getFirst(); ir; ir = ir->next)
2333 ir->serial = serial++;
2334
2335 for (ir = bb->getEntry(); ir; ir = next) {
2336 int s;
2337 Value *src = NULL;
2338
2339 next = ir->next;
2340
2341 if (ir->fixed) {
2342 ops[ir->op].insert(ir);
2343 continue;
2344 }
2345
2346 for (s = 0; ir->srcExists(s); ++s)
2347 if (ir->getSrc(s)->asLValue())
2348 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2349 src = ir->getSrc(s);
2350
2351 if (src) {
2352 for (Value::UseIterator it = src->uses.begin();
2353 it != src->uses.end(); ++it) {
2354 Instruction *ik = (*it)->getInsn();
2355 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
2356 if (tryReplace(&ir, ik))
2357 break;
2358 }
2359 } else {
2360 DLLIST_FOR_EACH(&ops[ir->op], iter)
2361 {
2362 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2363 if (tryReplace(&ir, ik))
2364 break;
2365 }
2366 }
2367
2368 if (ir)
2369 ops[ir->op].insert(ir);
2370 else
2371 ++replaced;
2372 }
2373 for (unsigned int i = 0; i <= OP_LAST; ++i)
2374 ops[i].clear();
2375
2376 } while (replaced);
2377
2378 return true;
2379 }
2380
2381 // =============================================================================
2382
2383 // Remove computations of unused values.
2384 class DeadCodeElim : public Pass
2385 {
2386 public:
2387 bool buryAll(Program *);
2388
2389 private:
2390 virtual bool visit(BasicBlock *);
2391
2392 void checkSplitLoad(Instruction *ld); // for partially dead loads
2393
2394 unsigned int deadCount;
2395 };
2396
2397 bool
2398 DeadCodeElim::buryAll(Program *prog)
2399 {
2400 do {
2401 deadCount = 0;
2402 if (!this->run(prog, false, false))
2403 return false;
2404 } while (deadCount);
2405
2406 return true;
2407 }
2408
2409 bool
2410 DeadCodeElim::visit(BasicBlock *bb)
2411 {
2412 Instruction *next;
2413
2414 for (Instruction *i = bb->getFirst(); i; i = next) {
2415 next = i->next;
2416 if (i->isDead()) {
2417 ++deadCount;
2418 delete_Instruction(prog, i);
2419 } else
2420 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2421 checkSplitLoad(i);
2422 } else
2423 if (i->defExists(0) && !i->getDef(0)->refCount()) {
2424 if (i->op == OP_ATOM ||
2425 i->op == OP_SUREDP ||
2426 i->op == OP_SUREDB)
2427 i->setDef(0, NULL);
2428 }
2429 }
2430 return true;
2431 }
2432
2433 void
2434 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2435 {
2436 Instruction *ld2 = NULL; // can get at most 2 loads
2437 Value *def1[4];
2438 Value *def2[4];
2439 int32_t addr1, addr2;
2440 int32_t size1, size2;
2441 int d, n1, n2;
2442 uint32_t mask = 0xffffffff;
2443
2444 for (d = 0; ld1->defExists(d); ++d)
2445 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2446 mask &= ~(1 << d);
2447 if (mask == 0xffffffff)
2448 return;
2449
2450 addr1 = ld1->getSrc(0)->reg.data.offset;
2451 n1 = n2 = 0;
2452 size1 = size2 = 0;
2453 for (d = 0; ld1->defExists(d); ++d) {
2454 if (mask & (1 << d)) {
2455 if (size1 && (addr1 & 0x7))
2456 break;
2457 def1[n1] = ld1->getDef(d);
2458 size1 += def1[n1++]->reg.size;
2459 } else
2460 if (!n1) {
2461 addr1 += ld1->getDef(d)->reg.size;
2462 } else {
2463 break;
2464 }
2465 }
2466 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2467 if (mask & (1 << d)) {
2468 def2[n2] = ld1->getDef(d);
2469 size2 += def2[n2++]->reg.size;
2470 } else {
2471 assert(!n2);
2472 addr2 += ld1->getDef(d)->reg.size;
2473 }
2474 }
2475
2476 updateLdStOffset(ld1, addr1, func);
2477 ld1->setType(typeOfSize(size1));
2478 for (d = 0; d < 4; ++d)
2479 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2480
2481 if (!n2)
2482 return;
2483
2484 ld2 = cloneShallow(func, ld1);
2485 updateLdStOffset(ld2, addr2, func);
2486 ld2->setType(typeOfSize(size2));
2487 for (d = 0; d < 4; ++d)
2488 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2489
2490 ld1->bb->insertAfter(ld1, ld2);
2491 }
2492
2493 // =============================================================================
2494
2495 #define RUN_PASS(l, n, f) \
2496 if (level >= (l)) { \
2497 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2498 INFO("PEEPHOLE: %s\n", #n); \
2499 n pass; \
2500 if (!pass.f(this)) \
2501 return false; \
2502 }
2503
2504 bool
2505 Program::optimizeSSA(int level)
2506 {
2507 RUN_PASS(1, DeadCodeElim, buryAll);
2508 RUN_PASS(1, CopyPropagation, run);
2509 RUN_PASS(2, GlobalCSE, run);
2510 RUN_PASS(1, LocalCSE, run);
2511 RUN_PASS(2, AlgebraicOpt, run);
2512 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2513 RUN_PASS(1, ConstantFolding, foldAll);
2514 RUN_PASS(1, LoadPropagation, run);
2515 RUN_PASS(2, MemoryOpt, run);
2516 RUN_PASS(2, LocalCSE, run);
2517 RUN_PASS(0, DeadCodeElim, buryAll);
2518
2519 return true;
2520 }
2521
2522 bool
2523 Program::optimizePostRA(int level)
2524 {
2525 RUN_PASS(2, FlatteningPass, run);
2526 return true;
2527 }
2528
2529 }