nv50/ir: optimize imul/imad to xmads
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative) {
195 if (insn->op != OP_SET && insn->op != OP_SLCT &&
196 insn->op != OP_SUB && insn->op != OP_XMAD)
197 return;
198 // XMAD is only commutative if both the CBCC and MRG flags are not set.
199 if (insn->op == OP_XMAD &&
200 (insn->subOp & NV50_IR_SUBOP_XMAD_CMODE_MASK) == NV50_IR_SUBOP_XMAD_CBCC)
201 return;
202 if (insn->op == OP_XMAD && (insn->subOp & NV50_IR_SUBOP_XMAD_MRG))
203 return;
204 }
205 if (insn->src(1).getFile() != FILE_GPR)
206 return;
207 // This is the special OP_SET used for alphatesting, we can't reverse its
208 // arguments as that will confuse the fixup code.
209 if (insn->op == OP_SET && insn->subOp)
210 return;
211
212 Instruction *i0 = insn->getSrc(0)->getInsn();
213 Instruction *i1 = insn->getSrc(1)->getInsn();
214
215 // Swap sources to inline the less frequently used source. That way,
216 // optimistically, it will eventually be able to remove the instruction.
217 int i0refs = insn->getSrc(0)->refCount();
218 int i1refs = insn->getSrc(1)->refCount();
219
220 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
221 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
222 !targ->insnCanLoad(insn, 1, i1) ||
223 i0refs < i1refs)
224 insn->swapSources(0, 1);
225 else
226 return;
227 } else
228 if (isAttribOrSharedLoad(i1)) {
229 if (!isAttribOrSharedLoad(i0))
230 insn->swapSources(0, 1);
231 else
232 return;
233 } else {
234 return;
235 }
236
237 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
238 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
239 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
240 else
241 if (insn->op == OP_SLCT)
242 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
243 else
244 if (insn->op == OP_SUB) {
245 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
246 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
247 } else
248 if (insn->op == OP_XMAD) {
249 // swap h1 flags
250 uint16_t h1 = (insn->subOp >> 1 & NV50_IR_SUBOP_XMAD_H1(0)) |
251 (insn->subOp << 1 & NV50_IR_SUBOP_XMAD_H1(1));
252 insn->subOp = (insn->subOp & ~NV50_IR_SUBOP_XMAD_H1_MASK) | h1;
253 }
254 }
255
256 bool
257 LoadPropagation::visit(BasicBlock *bb)
258 {
259 const Target *targ = prog->getTarget();
260 Instruction *next;
261
262 for (Instruction *i = bb->getEntry(); i; i = next) {
263 next = i->next;
264
265 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
266 continue;
267
268 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
269 continue;
270
271 if (i->srcExists(1))
272 checkSwapSrc01(i);
273
274 for (int s = 0; i->srcExists(s); ++s) {
275 Instruction *ld = i->getSrc(s)->getInsn();
276
277 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
278 continue;
279 if (!targ->insnCanLoad(i, s, ld))
280 continue;
281
282 // propagate !
283 i->setSrc(s, ld->getSrc(0));
284 if (ld->src(0).isIndirect(0))
285 i->setIndirect(s, 0, ld->getIndirect(0, 0));
286
287 if (ld->getDef(0)->refCount() == 0)
288 delete_Instruction(prog, ld);
289 }
290 }
291 return true;
292 }
293
294 // =============================================================================
295
296 class IndirectPropagation : public Pass
297 {
298 private:
299 virtual bool visit(BasicBlock *);
300
301 BuildUtil bld;
302 };
303
304 bool
305 IndirectPropagation::visit(BasicBlock *bb)
306 {
307 const Target *targ = prog->getTarget();
308 Instruction *next;
309
310 for (Instruction *i = bb->getEntry(); i; i = next) {
311 next = i->next;
312
313 bld.setPosition(i, false);
314
315 for (int s = 0; i->srcExists(s); ++s) {
316 Instruction *insn;
317 ImmediateValue imm;
318 if (!i->src(s).isIndirect(0))
319 continue;
320 insn = i->getIndirect(s, 0)->getInsn();
321 if (!insn)
322 continue;
323 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
324 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
325 !insn->src(1).getImmediate(imm) ||
326 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
327 continue;
328 i->setIndirect(s, 0, insn->getSrc(0));
329 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
330 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
331 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
332 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
333 !insn->src(1).getImmediate(imm) ||
334 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
335 continue;
336 i->setIndirect(s, 0, insn->getSrc(0));
337 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
338 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
339 } else if (insn->op == OP_MOV) {
340 if (!insn->src(0).getImmediate(imm) ||
341 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
342 continue;
343 i->setIndirect(s, 0, NULL);
344 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
345 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
346 } else if (insn->op == OP_SHLADD) {
347 if (!insn->src(2).getImmediate(imm) ||
348 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
349 continue;
350 i->setIndirect(s, 0, bld.mkOp2v(
351 OP_SHL, TYPE_U32, bld.getSSA(), insn->getSrc(0), insn->getSrc(1)));
352 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
353 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
354 }
355 }
356 }
357 return true;
358 }
359
360 // =============================================================================
361
362 // Evaluate constant expressions.
363 class ConstantFolding : public Pass
364 {
365 public:
366 bool foldAll(Program *);
367
368 private:
369 virtual bool visit(BasicBlock *);
370
371 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
372 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
373 void opnd(Instruction *, ImmediateValue&, int s);
374 void opnd3(Instruction *, ImmediateValue&);
375
376 void unary(Instruction *, const ImmediateValue&);
377
378 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
379
380 CmpInstruction *findOriginForTestWithZero(Value *);
381
382 unsigned int foldCount;
383
384 BuildUtil bld;
385 };
386
387 // TODO: remember generated immediates and only revisit these
388 bool
389 ConstantFolding::foldAll(Program *prog)
390 {
391 unsigned int iterCount = 0;
392 do {
393 foldCount = 0;
394 if (!run(prog))
395 return false;
396 } while (foldCount && ++iterCount < 2);
397 return true;
398 }
399
400 bool
401 ConstantFolding::visit(BasicBlock *bb)
402 {
403 Instruction *i, *next;
404
405 for (i = bb->getEntry(); i; i = next) {
406 next = i->next;
407 if (i->op == OP_MOV || i->op == OP_CALL)
408 continue;
409
410 ImmediateValue src0, src1, src2;
411
412 if (i->srcExists(2) &&
413 i->src(0).getImmediate(src0) &&
414 i->src(1).getImmediate(src1) &&
415 i->src(2).getImmediate(src2))
416 expr(i, src0, src1, src2);
417 else
418 if (i->srcExists(1) &&
419 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
420 expr(i, src0, src1);
421 else
422 if (i->srcExists(0) && i->src(0).getImmediate(src0))
423 opnd(i, src0, 0);
424 else
425 if (i->srcExists(1) && i->src(1).getImmediate(src1))
426 opnd(i, src1, 1);
427 if (i->srcExists(2) && i->src(2).getImmediate(src2))
428 opnd3(i, src2);
429 }
430 return true;
431 }
432
433 CmpInstruction *
434 ConstantFolding::findOriginForTestWithZero(Value *value)
435 {
436 if (!value)
437 return NULL;
438 Instruction *insn = value->getInsn();
439 if (!insn)
440 return NULL;
441
442 if (insn->asCmp() && insn->op != OP_SLCT)
443 return insn->asCmp();
444
445 /* Sometimes mov's will sneak in as a result of other folding. This gets
446 * cleaned up later.
447 */
448 if (insn->op == OP_MOV)
449 return findOriginForTestWithZero(insn->getSrc(0));
450
451 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
452 if (insn->op == OP_AND) {
453 int s = 0;
454 ImmediateValue imm;
455 if (!insn->src(s).getImmediate(imm)) {
456 s = 1;
457 if (!insn->src(s).getImmediate(imm))
458 return NULL;
459 }
460 if (imm.reg.data.f32 != 1.0f)
461 return NULL;
462 /* TODO: Come up with a way to handle the condition being inverted */
463 if (insn->src(!s).mod != Modifier(0))
464 return NULL;
465 return findOriginForTestWithZero(insn->getSrc(!s));
466 }
467
468 return NULL;
469 }
470
471 void
472 Modifier::applyTo(ImmediateValue& imm) const
473 {
474 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
475 return;
476 switch (imm.reg.type) {
477 case TYPE_F32:
478 if (bits & NV50_IR_MOD_ABS)
479 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
480 if (bits & NV50_IR_MOD_NEG)
481 imm.reg.data.f32 = -imm.reg.data.f32;
482 if (bits & NV50_IR_MOD_SAT) {
483 if (imm.reg.data.f32 < 0.0f)
484 imm.reg.data.f32 = 0.0f;
485 else
486 if (imm.reg.data.f32 > 1.0f)
487 imm.reg.data.f32 = 1.0f;
488 }
489 assert(!(bits & NV50_IR_MOD_NOT));
490 break;
491
492 case TYPE_S8: // NOTE: will be extended
493 case TYPE_S16:
494 case TYPE_S32:
495 case TYPE_U8: // NOTE: treated as signed
496 case TYPE_U16:
497 case TYPE_U32:
498 if (bits & NV50_IR_MOD_ABS)
499 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
500 imm.reg.data.s32 : -imm.reg.data.s32;
501 if (bits & NV50_IR_MOD_NEG)
502 imm.reg.data.s32 = -imm.reg.data.s32;
503 if (bits & NV50_IR_MOD_NOT)
504 imm.reg.data.s32 = ~imm.reg.data.s32;
505 break;
506
507 case TYPE_F64:
508 if (bits & NV50_IR_MOD_ABS)
509 imm.reg.data.f64 = fabs(imm.reg.data.f64);
510 if (bits & NV50_IR_MOD_NEG)
511 imm.reg.data.f64 = -imm.reg.data.f64;
512 if (bits & NV50_IR_MOD_SAT) {
513 if (imm.reg.data.f64 < 0.0)
514 imm.reg.data.f64 = 0.0;
515 else
516 if (imm.reg.data.f64 > 1.0)
517 imm.reg.data.f64 = 1.0;
518 }
519 assert(!(bits & NV50_IR_MOD_NOT));
520 break;
521
522 default:
523 assert(!"invalid/unhandled type");
524 imm.reg.data.u64 = 0;
525 break;
526 }
527 }
528
529 operation
530 Modifier::getOp() const
531 {
532 switch (bits) {
533 case NV50_IR_MOD_ABS: return OP_ABS;
534 case NV50_IR_MOD_NEG: return OP_NEG;
535 case NV50_IR_MOD_SAT: return OP_SAT;
536 case NV50_IR_MOD_NOT: return OP_NOT;
537 case 0:
538 return OP_MOV;
539 default:
540 return OP_CVT;
541 }
542 }
543
544 void
545 ConstantFolding::expr(Instruction *i,
546 ImmediateValue &imm0, ImmediateValue &imm1)
547 {
548 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
549 struct Storage res;
550 DataType type = i->dType;
551
552 memset(&res.data, 0, sizeof(res.data));
553
554 switch (i->op) {
555 case OP_MAD:
556 case OP_FMA:
557 case OP_MUL:
558 if (i->dnz && i->dType == TYPE_F32) {
559 if (!isfinite(a->data.f32))
560 a->data.f32 = 0.0f;
561 if (!isfinite(b->data.f32))
562 b->data.f32 = 0.0f;
563 }
564 switch (i->dType) {
565 case TYPE_F32:
566 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
567 break;
568 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
569 case TYPE_S32:
570 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
571 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
572 break;
573 }
574 /* fallthrough */
575 case TYPE_U32:
576 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
577 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
578 break;
579 }
580 res.data.u32 = a->data.u32 * b->data.u32; break;
581 default:
582 return;
583 }
584 break;
585 case OP_DIV:
586 if (b->data.u32 == 0)
587 break;
588 switch (i->dType) {
589 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
590 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
591 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
592 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
593 default:
594 return;
595 }
596 break;
597 case OP_ADD:
598 switch (i->dType) {
599 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
600 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
601 case TYPE_S32:
602 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
603 default:
604 return;
605 }
606 break;
607 case OP_SUB:
608 switch (i->dType) {
609 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
610 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
611 case TYPE_S32:
612 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
613 default:
614 return;
615 }
616 break;
617 case OP_POW:
618 switch (i->dType) {
619 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
620 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
621 default:
622 return;
623 }
624 break;
625 case OP_MAX:
626 switch (i->dType) {
627 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
628 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
629 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
630 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
631 default:
632 return;
633 }
634 break;
635 case OP_MIN:
636 switch (i->dType) {
637 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
638 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
639 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
640 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
641 default:
642 return;
643 }
644 break;
645 case OP_AND:
646 res.data.u64 = a->data.u64 & b->data.u64;
647 break;
648 case OP_OR:
649 res.data.u64 = a->data.u64 | b->data.u64;
650 break;
651 case OP_XOR:
652 res.data.u64 = a->data.u64 ^ b->data.u64;
653 break;
654 case OP_SHL:
655 res.data.u32 = a->data.u32 << b->data.u32;
656 break;
657 case OP_SHR:
658 switch (i->dType) {
659 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
660 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
661 default:
662 return;
663 }
664 break;
665 case OP_SLCT:
666 if (a->data.u32 != b->data.u32)
667 return;
668 res.data.u32 = a->data.u32;
669 break;
670 case OP_EXTBF: {
671 int offset = b->data.u32 & 0xff;
672 int width = (b->data.u32 >> 8) & 0xff;
673 int rshift = offset;
674 int lshift = 0;
675 if (width == 0) {
676 res.data.u32 = 0;
677 break;
678 }
679 if (width + offset < 32) {
680 rshift = 32 - width;
681 lshift = 32 - width - offset;
682 }
683 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
684 res.data.u32 = util_bitreverse(a->data.u32);
685 else
686 res.data.u32 = a->data.u32;
687 switch (i->dType) {
688 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
689 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
690 default:
691 return;
692 }
693 break;
694 }
695 case OP_POPCNT:
696 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
697 break;
698 case OP_PFETCH:
699 // The two arguments to pfetch are logically added together. Normally
700 // the second argument will not be constant, but that can happen.
701 res.data.u32 = a->data.u32 + b->data.u32;
702 type = TYPE_U32;
703 break;
704 case OP_MERGE:
705 switch (i->dType) {
706 case TYPE_U64:
707 case TYPE_S64:
708 case TYPE_F64:
709 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
710 break;
711 default:
712 return;
713 }
714 break;
715 default:
716 return;
717 }
718 ++foldCount;
719
720 i->src(0).mod = Modifier(0);
721 i->src(1).mod = Modifier(0);
722 i->postFactor = 0;
723
724 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
725 i->setSrc(1, NULL);
726
727 i->getSrc(0)->reg.data = res.data;
728 i->getSrc(0)->reg.type = type;
729 i->getSrc(0)->reg.size = typeSizeof(type);
730
731 switch (i->op) {
732 case OP_MAD:
733 case OP_FMA: {
734 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
735
736 // Move the immediate into position 1, where we know it might be
737 // emittable. However it might not be anyways, as there may be other
738 // restrictions, so move it into a separate LValue.
739 bld.setPosition(i, false);
740 i->op = OP_ADD;
741 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
742 i->setSrc(0, i->getSrc(2));
743 i->src(0).mod = i->src(2).mod;
744 i->setSrc(2, NULL);
745
746 if (i->src(0).getImmediate(src0))
747 expr(i, src0, src1);
748 else
749 opnd(i, src1, 1);
750 break;
751 }
752 case OP_PFETCH:
753 // Leave PFETCH alone... we just folded its 2 args into 1.
754 break;
755 default:
756 i->op = i->saturate ? OP_SAT : OP_MOV;
757 if (i->saturate)
758 unary(i, *i->getSrc(0)->asImm());
759 break;
760 }
761 i->subOp = 0;
762 }
763
764 void
765 ConstantFolding::expr(Instruction *i,
766 ImmediateValue &imm0,
767 ImmediateValue &imm1,
768 ImmediateValue &imm2)
769 {
770 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
771 struct Storage res;
772
773 memset(&res.data, 0, sizeof(res.data));
774
775 switch (i->op) {
776 case OP_INSBF: {
777 int offset = b->data.u32 & 0xff;
778 int width = (b->data.u32 >> 8) & 0xff;
779 unsigned bitmask = ((1 << width) - 1) << offset;
780 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
781 break;
782 }
783 case OP_MAD:
784 case OP_FMA: {
785 switch (i->dType) {
786 case TYPE_F32:
787 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
788 c->data.f32;
789 break;
790 case TYPE_F64:
791 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
792 break;
793 case TYPE_S32:
794 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
795 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
796 break;
797 }
798 /* fallthrough */
799 case TYPE_U32:
800 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
801 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
802 break;
803 }
804 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
805 break;
806 default:
807 return;
808 }
809 break;
810 }
811 case OP_SHLADD:
812 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
813 break;
814 default:
815 return;
816 }
817
818 ++foldCount;
819 i->src(0).mod = Modifier(0);
820 i->src(1).mod = Modifier(0);
821 i->src(2).mod = Modifier(0);
822
823 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
824 i->setSrc(1, NULL);
825 i->setSrc(2, NULL);
826
827 i->getSrc(0)->reg.data = res.data;
828 i->getSrc(0)->reg.type = i->dType;
829 i->getSrc(0)->reg.size = typeSizeof(i->dType);
830
831 i->op = OP_MOV;
832 }
833
834 void
835 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
836 {
837 Storage res;
838
839 if (i->dType != TYPE_F32)
840 return;
841 switch (i->op) {
842 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
843 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
844 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
845 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
846 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
847 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
848 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
849 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
850 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
851 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
852 case OP_PRESIN:
853 case OP_PREEX2:
854 // these should be handled in subsequent OP_SIN/COS/EX2
855 res.data.f32 = imm.reg.data.f32;
856 break;
857 default:
858 return;
859 }
860 i->op = OP_MOV;
861 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
862 i->src(0).mod = Modifier(0);
863 }
864
865 void
866 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
867 const int s, ImmediateValue& imm2)
868 {
869 const int t = s ? 0 : 1;
870 Instruction *insn;
871 Instruction *mul1 = NULL; // mul1 before mul2
872 int e = 0;
873 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
874 ImmediateValue imm1;
875
876 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
877
878 if (mul2->getSrc(t)->refCount() == 1) {
879 insn = mul2->getSrc(t)->getInsn();
880 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
881 mul1 = insn;
882 if (mul1 && !mul1->saturate) {
883 int s1;
884
885 if (mul1->src(s1 = 0).getImmediate(imm1) ||
886 mul1->src(s1 = 1).getImmediate(imm1)) {
887 bld.setPosition(mul1, false);
888 // a = mul r, imm1
889 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
890 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
891 mul1->src(s1).mod = Modifier(0);
892 mul2->def(0).replace(mul1->getDef(0), false);
893 mul1->saturate = mul2->saturate;
894 } else
895 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
896 // c = mul a, b
897 // d = mul c, imm -> d = mul_x_imm a, b
898 mul1->postFactor = e;
899 mul2->def(0).replace(mul1->getDef(0), false);
900 if (f < 0)
901 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
902 mul1->saturate = mul2->saturate;
903 }
904 return;
905 }
906 }
907 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
908 // b = mul a, imm
909 // d = mul b, c -> d = mul_x_imm a, c
910 int s2, t2;
911 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
912 if (!insn)
913 return;
914 mul1 = mul2;
915 mul2 = NULL;
916 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
917 t2 = s2 ? 0 : 1;
918 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
919 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
920 mul2 = insn;
921 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
922 mul2->postFactor = e;
923 mul2->setSrc(s2, mul1->src(t));
924 if (f < 0)
925 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
926 }
927 }
928 }
929
930 void
931 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
932 {
933 switch (i->op) {
934 case OP_MAD:
935 case OP_FMA:
936 if (imm2.isInteger(0)) {
937 i->op = OP_MUL;
938 i->setSrc(2, NULL);
939 foldCount++;
940 return;
941 }
942 break;
943 case OP_SHLADD:
944 if (imm2.isInteger(0)) {
945 i->op = OP_SHL;
946 i->setSrc(2, NULL);
947 foldCount++;
948 return;
949 }
950 break;
951 default:
952 return;
953 }
954 }
955
956 void
957 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
958 {
959 const Target *target = prog->getTarget();
960 const int t = !s;
961 const operation op = i->op;
962 Instruction *newi = i;
963
964 switch (i->op) {
965 case OP_SPLIT: {
966 bld.setPosition(i, false);
967
968 uint8_t size = i->getDef(0)->reg.size;
969 uint8_t bitsize = size * 8;
970 uint32_t mask = (1ULL << bitsize) - 1;
971 assert(bitsize <= 32);
972
973 uint64_t val = imm0.reg.data.u64;
974 for (int8_t d = 0; i->defExists(d); ++d) {
975 Value *def = i->getDef(d);
976 assert(def->reg.size == size);
977
978 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
979 val >>= bitsize;
980 }
981 delete_Instruction(prog, i);
982 break;
983 }
984 case OP_MUL:
985 if (i->dType == TYPE_F32)
986 tryCollapseChainedMULs(i, s, imm0);
987
988 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
989 assert(!isFloatType(i->sType));
990 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
991 bld.setPosition(i, false);
992 // Need to set to the sign value, which is a compare.
993 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
994 TYPE_S32, i->getSrc(t), bld.mkImm(0));
995 delete_Instruction(prog, i);
996 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
997 // The high bits can't be set in this case (either mul by 0 or
998 // unsigned by 1)
999 i->op = OP_MOV;
1000 i->subOp = 0;
1001 i->setSrc(0, new_ImmediateValue(prog, 0u));
1002 i->src(0).mod = Modifier(0);
1003 i->setSrc(1, NULL);
1004 } else if (!imm0.isNegative() && imm0.isPow2()) {
1005 // Translate into a shift
1006 imm0.applyLog2();
1007 i->op = OP_SHR;
1008 i->subOp = 0;
1009 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
1010 i->setSrc(0, i->getSrc(t));
1011 i->src(0).mod = i->src(t).mod;
1012 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1013 i->src(1).mod = 0;
1014 }
1015 } else
1016 if (imm0.isInteger(0)) {
1017 i->op = OP_MOV;
1018 i->setSrc(0, new_ImmediateValue(prog, 0u));
1019 i->src(0).mod = Modifier(0);
1020 i->postFactor = 0;
1021 i->setSrc(1, NULL);
1022 } else
1023 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
1024 if (imm0.isNegative())
1025 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1026 i->op = i->src(t).mod.getOp();
1027 if (s == 0) {
1028 i->setSrc(0, i->getSrc(1));
1029 i->src(0).mod = i->src(1).mod;
1030 i->src(1).mod = 0;
1031 }
1032 if (i->op != OP_CVT)
1033 i->src(0).mod = 0;
1034 i->setSrc(1, NULL);
1035 } else
1036 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1037 if (imm0.isNegative())
1038 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1039 i->op = OP_ADD;
1040 i->setSrc(s, i->getSrc(t));
1041 i->src(s).mod = i->src(t).mod;
1042 } else
1043 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
1044 i->op = OP_SHL;
1045 imm0.applyLog2();
1046 i->setSrc(0, i->getSrc(t));
1047 i->src(0).mod = i->src(t).mod;
1048 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1049 i->src(1).mod = 0;
1050 } else
1051 if (i->postFactor && i->sType == TYPE_F32) {
1052 /* Can't emit a postfactor with an immediate, have to fold it in */
1053 i->setSrc(s, new_ImmediateValue(
1054 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1055 i->postFactor = 0;
1056 }
1057 break;
1058 case OP_FMA:
1059 case OP_MAD:
1060 if (imm0.isInteger(0)) {
1061 i->setSrc(0, i->getSrc(2));
1062 i->src(0).mod = i->src(2).mod;
1063 i->setSrc(1, NULL);
1064 i->setSrc(2, NULL);
1065 i->op = i->src(0).mod.getOp();
1066 if (i->op != OP_CVT)
1067 i->src(0).mod = 0;
1068 } else
1069 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1070 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1071 if (imm0.isNegative())
1072 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1073 if (s == 0) {
1074 i->setSrc(0, i->getSrc(1));
1075 i->src(0).mod = i->src(1).mod;
1076 }
1077 i->setSrc(1, i->getSrc(2));
1078 i->src(1).mod = i->src(2).mod;
1079 i->setSrc(2, NULL);
1080 i->op = OP_ADD;
1081 } else
1082 if (s == 1 && !imm0.isNegative() && imm0.isPow2() &&
1083 !isFloatType(i->dType) &&
1084 target->isOpSupported(OP_SHLADD, i->dType) &&
1085 !i->subOp) {
1086 i->op = OP_SHLADD;
1087 imm0.applyLog2();
1088 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1089 }
1090 break;
1091 case OP_SUB:
1092 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1093 !isFloatType(i->dType))
1094 break;
1095 /* fallthrough */
1096 case OP_ADD:
1097 if (i->usesFlags())
1098 break;
1099 if (imm0.isInteger(0)) {
1100 if (s == 0) {
1101 i->setSrc(0, i->getSrc(1));
1102 i->src(0).mod = i->src(1).mod;
1103 if (i->op == OP_SUB)
1104 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1105 }
1106 i->setSrc(1, NULL);
1107 i->op = i->src(0).mod.getOp();
1108 if (i->op != OP_CVT)
1109 i->src(0).mod = Modifier(0);
1110 }
1111 break;
1112
1113 case OP_DIV:
1114 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1115 break;
1116 bld.setPosition(i, false);
1117 if (imm0.reg.data.u32 == 0) {
1118 break;
1119 } else
1120 if (imm0.reg.data.u32 == 1) {
1121 i->op = OP_MOV;
1122 i->setSrc(1, NULL);
1123 } else
1124 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1125 i->op = OP_SHR;
1126 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1127 } else
1128 if (i->dType == TYPE_U32) {
1129 Instruction *mul;
1130 Value *tA, *tB;
1131 const uint32_t d = imm0.reg.data.u32;
1132 uint32_t m;
1133 int r, s;
1134 uint32_t l = util_logbase2(d);
1135 if (((uint32_t)1 << l) < d)
1136 ++l;
1137 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1138 r = l ? 1 : 0;
1139 s = l ? (l - 1) : 0;
1140
1141 tA = bld.getSSA();
1142 tB = bld.getSSA();
1143 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1144 bld.loadImm(NULL, m));
1145 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1146 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1147 tA = bld.getSSA();
1148 if (r)
1149 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1150 else
1151 tA = tB;
1152 tB = s ? bld.getSSA() : i->getDef(0);
1153 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1154 if (s)
1155 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1156
1157 delete_Instruction(prog, i);
1158 } else
1159 if (imm0.reg.data.s32 == -1) {
1160 i->op = OP_NEG;
1161 i->setSrc(1, NULL);
1162 } else {
1163 LValue *tA, *tB;
1164 LValue *tD;
1165 const int32_t d = imm0.reg.data.s32;
1166 int32_t m;
1167 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1168 if ((1 << l) < abs(d))
1169 ++l;
1170 if (!l)
1171 l = 1;
1172 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1173
1174 tA = bld.getSSA();
1175 tB = bld.getSSA();
1176 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1177 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1178 if (l > 1)
1179 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1180 else
1181 tB = tA;
1182 tA = bld.getSSA();
1183 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1184 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1185 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1186 if (d < 0)
1187 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1188
1189 delete_Instruction(prog, i);
1190 }
1191 break;
1192
1193 case OP_MOD:
1194 if (s == 1 && imm0.isPow2()) {
1195 bld.setPosition(i, false);
1196 if (i->sType == TYPE_U32) {
1197 i->op = OP_AND;
1198 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1199 } else if (i->sType == TYPE_S32) {
1200 // Do it on the absolute value of the input, and then restore the
1201 // sign. The only odd case is MIN_INT, but that should work out
1202 // as well, since MIN_INT mod any power of 2 is 0.
1203 //
1204 // Technically we don't have to do any of this since MOD is
1205 // undefined with negative arguments in GLSL, but this seems like
1206 // the nice thing to do.
1207 Value *abs = bld.mkOp1v(OP_ABS, TYPE_S32, bld.getSSA(), i->getSrc(0));
1208 Value *neg, *v1, *v2;
1209 bld.mkCmp(OP_SET, CC_LT, TYPE_S32,
1210 (neg = bld.getSSA(1, prog->getTarget()->nativeFile(FILE_PREDICATE))),
1211 TYPE_S32, i->getSrc(0), bld.loadImm(NULL, 0));
1212 Value *mod = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), abs,
1213 bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1214 bld.mkOp1(OP_NEG, TYPE_S32, (v1 = bld.getSSA()), mod)
1215 ->setPredicate(CC_P, neg);
1216 bld.mkOp1(OP_MOV, TYPE_S32, (v2 = bld.getSSA()), mod)
1217 ->setPredicate(CC_NOT_P, neg);
1218 newi = bld.mkOp2(OP_UNION, TYPE_S32, i->getDef(0), v1, v2);
1219
1220 delete_Instruction(prog, i);
1221 }
1222 } else if (s == 1) {
1223 // In this case, we still want the optimized lowering that we get
1224 // from having division by an immediate.
1225 //
1226 // a % b == a - (a/b) * b
1227 bld.setPosition(i, false);
1228 Value *div = bld.mkOp2v(OP_DIV, i->sType, bld.getSSA(),
1229 i->getSrc(0), i->getSrc(1));
1230 newi = bld.mkOp2(OP_ADD, i->sType, i->getDef(0), i->getSrc(0),
1231 bld.mkOp2v(OP_MUL, i->sType, bld.getSSA(), div, i->getSrc(1)));
1232 // TODO: Check that target supports this. In this case, we know that
1233 // all backends do.
1234 newi->src(1).mod = Modifier(NV50_IR_MOD_NEG);
1235
1236 delete_Instruction(prog, i);
1237 }
1238 break;
1239
1240 case OP_SET: // TODO: SET_AND,OR,XOR
1241 {
1242 /* This optimizes the case where the output of a set is being compared
1243 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1244 * can be a lot cleverer in our comparison.
1245 */
1246 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1247 CondCode cc, ccZ;
1248 if (imm0.reg.data.u32 != 0 || !si)
1249 return;
1250 cc = si->setCond;
1251 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1252 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1253 // first.
1254 if (s == 0)
1255 ccZ = reverseCondCode(ccZ);
1256 // If there is a negative modifier, we need to undo that, by flipping
1257 // the comparison to zero.
1258 if (i->src(t).mod.neg())
1259 ccZ = reverseCondCode(ccZ);
1260 // If this is a signed comparison, we expect the input to be a regular
1261 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1262 // is positive, so just flip the sign.
1263 if (i->sType == TYPE_S32) {
1264 assert(!isFloatType(si->dType));
1265 ccZ = reverseCondCode(ccZ);
1266 }
1267 switch (ccZ) {
1268 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1269 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1270 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1271 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1272 case CC_GT: break; // bool > 0 -- bool
1273 case CC_NE: break; // bool != 0 -- bool
1274 default:
1275 return;
1276 }
1277
1278 // Update the condition of this SET to be identical to the origin set,
1279 // but with the updated condition code. The original SET should get
1280 // DCE'd, ideally.
1281 i->op = si->op;
1282 i->asCmp()->setCond = cc;
1283 i->setSrc(0, si->src(0));
1284 i->setSrc(1, si->src(1));
1285 if (si->srcExists(2))
1286 i->setSrc(2, si->src(2));
1287 i->sType = si->sType;
1288 }
1289 break;
1290
1291 case OP_AND:
1292 {
1293 Instruction *src = i->getSrc(t)->getInsn();
1294 ImmediateValue imm1;
1295 if (imm0.reg.data.u32 == 0) {
1296 i->op = OP_MOV;
1297 i->setSrc(0, new_ImmediateValue(prog, 0u));
1298 i->src(0).mod = Modifier(0);
1299 i->setSrc(1, NULL);
1300 } else if (imm0.reg.data.u32 == ~0U) {
1301 i->op = i->src(t).mod.getOp();
1302 if (t) {
1303 i->setSrc(0, i->getSrc(t));
1304 i->src(0).mod = i->src(t).mod;
1305 }
1306 i->setSrc(1, NULL);
1307 } else if (src->asCmp()) {
1308 CmpInstruction *cmp = src->asCmp();
1309 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1310 return;
1311 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1312 return;
1313 if (imm0.reg.data.f32 != 1.0)
1314 return;
1315 if (cmp->dType != TYPE_U32)
1316 return;
1317
1318 cmp->dType = TYPE_F32;
1319 if (i->src(t).mod != Modifier(0)) {
1320 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1321 i->src(t).mod = Modifier(0);
1322 cmp->setCond = inverseCondCode(cmp->setCond);
1323 }
1324 i->op = OP_MOV;
1325 i->setSrc(s, NULL);
1326 if (t) {
1327 i->setSrc(0, i->getSrc(t));
1328 i->setSrc(t, NULL);
1329 }
1330 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1331 src->op == OP_SHR &&
1332 src->src(1).getImmediate(imm1) &&
1333 i->src(t).mod == Modifier(0) &&
1334 util_is_power_of_two_or_zero(imm0.reg.data.u32 + 1)) {
1335 // low byte = offset, high byte = width
1336 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1337 i->op = OP_EXTBF;
1338 i->setSrc(0, src->getSrc(0));
1339 i->setSrc(1, new_ImmediateValue(prog, ext));
1340 } else if (src->op == OP_SHL &&
1341 src->src(1).getImmediate(imm1) &&
1342 i->src(t).mod == Modifier(0) &&
1343 util_is_power_of_two_or_zero(~imm0.reg.data.u32 + 1) &&
1344 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1345 i->op = OP_MOV;
1346 i->setSrc(s, NULL);
1347 if (t) {
1348 i->setSrc(0, i->getSrc(t));
1349 i->setSrc(t, NULL);
1350 }
1351 }
1352 }
1353 break;
1354
1355 case OP_SHL:
1356 {
1357 if (s != 1 || i->src(0).mod != Modifier(0))
1358 break;
1359 // try to concatenate shifts
1360 Instruction *si = i->getSrc(0)->getInsn();
1361 if (!si)
1362 break;
1363 ImmediateValue imm1;
1364 switch (si->op) {
1365 case OP_SHL:
1366 if (si->src(1).getImmediate(imm1)) {
1367 bld.setPosition(i, false);
1368 i->setSrc(0, si->getSrc(0));
1369 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1370 }
1371 break;
1372 case OP_SHR:
1373 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1374 bld.setPosition(i, false);
1375 i->op = OP_AND;
1376 i->setSrc(0, si->getSrc(0));
1377 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1378 }
1379 break;
1380 case OP_MUL:
1381 int muls;
1382 if (isFloatType(si->dType))
1383 return;
1384 if (si->src(1).getImmediate(imm1))
1385 muls = 1;
1386 else if (si->src(0).getImmediate(imm1))
1387 muls = 0;
1388 else
1389 return;
1390
1391 bld.setPosition(i, false);
1392 i->op = OP_MUL;
1393 i->setSrc(0, si->getSrc(!muls));
1394 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1395 break;
1396 case OP_SUB:
1397 case OP_ADD:
1398 int adds;
1399 if (isFloatType(si->dType))
1400 return;
1401 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1402 adds = 0;
1403 else if (si->src(1).getImmediate(imm1))
1404 adds = 1;
1405 else
1406 return;
1407 if (si->src(!adds).mod != Modifier(0))
1408 return;
1409 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1410
1411 // This is more operations, but if one of x, y is an immediate, then
1412 // we can get a situation where (a) we can use ISCADD, or (b)
1413 // propagate the add bit into an indirect load.
1414 bld.setPosition(i, false);
1415 i->op = si->op;
1416 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1417 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1418 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1419 si->getSrc(!adds),
1420 bld.mkImm(imm0.reg.data.u32)));
1421 break;
1422 default:
1423 return;
1424 }
1425 }
1426 break;
1427
1428 case OP_ABS:
1429 case OP_NEG:
1430 case OP_SAT:
1431 case OP_LG2:
1432 case OP_RCP:
1433 case OP_SQRT:
1434 case OP_RSQ:
1435 case OP_PRESIN:
1436 case OP_SIN:
1437 case OP_COS:
1438 case OP_PREEX2:
1439 case OP_EX2:
1440 unary(i, imm0);
1441 break;
1442 case OP_BFIND: {
1443 int32_t res;
1444 switch (i->dType) {
1445 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1446 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1447 default:
1448 return;
1449 }
1450 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1451 res = 31 - res;
1452 bld.setPosition(i, false); /* make sure bld is init'ed */
1453 i->setSrc(0, bld.mkImm(res));
1454 i->setSrc(1, NULL);
1455 i->op = OP_MOV;
1456 i->subOp = 0;
1457 break;
1458 }
1459 case OP_POPCNT: {
1460 // Only deal with 1-arg POPCNT here
1461 if (i->srcExists(1))
1462 break;
1463 uint32_t res = util_bitcount(imm0.reg.data.u32);
1464 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1465 i->setSrc(1, NULL);
1466 i->op = OP_MOV;
1467 break;
1468 }
1469 case OP_CVT: {
1470 Storage res;
1471
1472 // TODO: handle 64-bit values properly
1473 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1474 return;
1475
1476 // TODO: handle single byte/word extractions
1477 if (i->subOp)
1478 return;
1479
1480 bld.setPosition(i, true); /* make sure bld is init'ed */
1481
1482 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1483 case type: \
1484 switch (i->sType) { \
1485 case TYPE_F64: \
1486 res.data.dst = util_iround(i->saturate ? \
1487 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1488 imm0.reg.data.f64); \
1489 break; \
1490 case TYPE_F32: \
1491 res.data.dst = util_iround(i->saturate ? \
1492 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1493 imm0.reg.data.f32); \
1494 break; \
1495 case TYPE_S32: \
1496 res.data.dst = i->saturate ? \
1497 CLAMP(imm0.reg.data.s32, imin, imax) : \
1498 imm0.reg.data.s32; \
1499 break; \
1500 case TYPE_U32: \
1501 res.data.dst = i->saturate ? \
1502 CLAMP(imm0.reg.data.u32, umin, umax) : \
1503 imm0.reg.data.u32; \
1504 break; \
1505 case TYPE_S16: \
1506 res.data.dst = i->saturate ? \
1507 CLAMP(imm0.reg.data.s16, imin, imax) : \
1508 imm0.reg.data.s16; \
1509 break; \
1510 case TYPE_U16: \
1511 res.data.dst = i->saturate ? \
1512 CLAMP(imm0.reg.data.u16, umin, umax) : \
1513 imm0.reg.data.u16; \
1514 break; \
1515 default: return; \
1516 } \
1517 i->setSrc(0, bld.mkImm(res.data.dst)); \
1518 break
1519
1520 switch(i->dType) {
1521 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1522 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1523 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1524 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1525 case TYPE_F32:
1526 switch (i->sType) {
1527 case TYPE_F64:
1528 res.data.f32 = i->saturate ?
1529 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1530 imm0.reg.data.f64;
1531 break;
1532 case TYPE_F32:
1533 res.data.f32 = i->saturate ?
1534 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1535 imm0.reg.data.f32;
1536 break;
1537 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1538 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1539 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1540 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1541 default:
1542 return;
1543 }
1544 i->setSrc(0, bld.mkImm(res.data.f32));
1545 break;
1546 case TYPE_F64:
1547 switch (i->sType) {
1548 case TYPE_F64:
1549 res.data.f64 = i->saturate ?
1550 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1551 imm0.reg.data.f64;
1552 break;
1553 case TYPE_F32:
1554 res.data.f64 = i->saturate ?
1555 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1556 imm0.reg.data.f32;
1557 break;
1558 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1559 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1560 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1561 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1562 default:
1563 return;
1564 }
1565 i->setSrc(0, bld.mkImm(res.data.f64));
1566 break;
1567 default:
1568 return;
1569 }
1570 #undef CASE
1571
1572 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1573 i->op = OP_MOV;
1574 i->saturate = 0;
1575 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1576 break;
1577 }
1578 default:
1579 return;
1580 }
1581
1582 // This can get left behind some of the optimizations which simplify
1583 // saturatable values.
1584 if (newi->op == OP_MOV && newi->saturate) {
1585 ImmediateValue tmp;
1586 newi->saturate = 0;
1587 newi->op = OP_SAT;
1588 if (newi->src(0).getImmediate(tmp))
1589 unary(newi, tmp);
1590 }
1591
1592 if (newi->op != op)
1593 foldCount++;
1594 }
1595
1596 // =============================================================================
1597
1598 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1599 class ModifierFolding : public Pass
1600 {
1601 private:
1602 virtual bool visit(BasicBlock *);
1603 };
1604
1605 bool
1606 ModifierFolding::visit(BasicBlock *bb)
1607 {
1608 const Target *target = prog->getTarget();
1609
1610 Instruction *i, *next, *mi;
1611 Modifier mod;
1612
1613 for (i = bb->getEntry(); i; i = next) {
1614 next = i->next;
1615
1616 if (0 && i->op == OP_SUB) {
1617 // turn "sub" into "add neg" (do we really want this ?)
1618 i->op = OP_ADD;
1619 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1620 }
1621
1622 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1623 mi = i->getSrc(s)->getInsn();
1624 if (!mi ||
1625 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1626 continue;
1627 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1628 if ((i->op != OP_ADD &&
1629 i->op != OP_MUL) ||
1630 (mi->op != OP_ABS &&
1631 mi->op != OP_NEG))
1632 continue;
1633 } else
1634 if (i->sType != mi->dType) {
1635 continue;
1636 }
1637 if ((mod = Modifier(mi->op)) == Modifier(0))
1638 continue;
1639 mod *= mi->src(0).mod;
1640
1641 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1642 // abs neg [abs] = abs
1643 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1644 } else
1645 if ((i->op == OP_NEG) && mod.neg()) {
1646 assert(s == 0);
1647 // neg as both opcode and modifier on same insn is prohibited
1648 // neg neg abs = abs, neg neg = identity
1649 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1650 i->op = mod.getOp();
1651 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1652 if (mod == Modifier(0))
1653 i->op = OP_MOV;
1654 }
1655
1656 if (target->isModSupported(i, s, mod)) {
1657 i->setSrc(s, mi->getSrc(0));
1658 i->src(s).mod *= mod;
1659 }
1660 }
1661
1662 if (i->op == OP_SAT) {
1663 mi = i->getSrc(0)->getInsn();
1664 if (mi &&
1665 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1666 mi->saturate = 1;
1667 mi->setDef(0, i->getDef(0));
1668 delete_Instruction(prog, i);
1669 }
1670 }
1671 }
1672
1673 return true;
1674 }
1675
1676 // =============================================================================
1677
1678 // MUL + ADD -> MAD/FMA
1679 // MIN/MAX(a, a) -> a, etc.
1680 // SLCT(a, b, const) -> cc(const) ? a : b
1681 // RCP(RCP(a)) -> a
1682 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1683 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
1684 class AlgebraicOpt : public Pass
1685 {
1686 private:
1687 virtual bool visit(BasicBlock *);
1688
1689 void handleABS(Instruction *);
1690 bool handleADD(Instruction *);
1691 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1692 void handleMINMAX(Instruction *);
1693 void handleRCP(Instruction *);
1694 void handleSLCT(Instruction *);
1695 void handleLOGOP(Instruction *);
1696 void handleCVT_NEG(Instruction *);
1697 void handleCVT_CVT(Instruction *);
1698 void handleCVT_EXTBF(Instruction *);
1699 void handleSUCLAMP(Instruction *);
1700 void handleNEG(Instruction *);
1701 void handleEXTBF_RDSV(Instruction *);
1702
1703 BuildUtil bld;
1704 };
1705
1706 void
1707 AlgebraicOpt::handleABS(Instruction *abs)
1708 {
1709 Instruction *sub = abs->getSrc(0)->getInsn();
1710 DataType ty;
1711 if (!sub ||
1712 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1713 return;
1714 // expect not to have mods yet, if we do, bail
1715 if (sub->src(0).mod || sub->src(1).mod)
1716 return;
1717 // hidden conversion ?
1718 ty = intTypeToSigned(sub->dType);
1719 if (abs->dType != abs->sType || ty != abs->sType)
1720 return;
1721
1722 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1723 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1724 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1725 return;
1726
1727 Value *src0 = sub->getSrc(0);
1728 Value *src1 = sub->getSrc(1);
1729
1730 if (sub->op == OP_ADD) {
1731 Instruction *neg = sub->getSrc(1)->getInsn();
1732 if (neg && neg->op != OP_NEG) {
1733 neg = sub->getSrc(0)->getInsn();
1734 src0 = sub->getSrc(1);
1735 }
1736 if (!neg || neg->op != OP_NEG ||
1737 neg->dType != neg->sType || neg->sType != ty)
1738 return;
1739 src1 = neg->getSrc(0);
1740 }
1741
1742 // found ABS(SUB))
1743 abs->moveSources(1, 2); // move sources >=1 up by 2
1744 abs->op = OP_SAD;
1745 abs->setType(sub->dType);
1746 abs->setSrc(0, src0);
1747 abs->setSrc(1, src1);
1748 bld.setPosition(abs, false);
1749 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1750 }
1751
1752 bool
1753 AlgebraicOpt::handleADD(Instruction *add)
1754 {
1755 Value *src0 = add->getSrc(0);
1756 Value *src1 = add->getSrc(1);
1757
1758 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1759 return false;
1760
1761 bool changed = false;
1762 // we can't optimize to MAD if the add is precise
1763 if (!add->precise && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1764 changed = tryADDToMADOrSAD(add, OP_MAD);
1765 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1766 changed = tryADDToMADOrSAD(add, OP_SAD);
1767 return changed;
1768 }
1769
1770 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1771 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1772 bool
1773 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1774 {
1775 Value *src0 = add->getSrc(0);
1776 Value *src1 = add->getSrc(1);
1777 Value *src;
1778 int s;
1779 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1780 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1781 Modifier mod[4];
1782
1783 if (src0->refCount() == 1 &&
1784 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1785 s = 0;
1786 else
1787 if (src1->refCount() == 1 &&
1788 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1789 s = 1;
1790 else
1791 return false;
1792
1793 src = add->getSrc(s);
1794
1795 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1796 return false;
1797
1798 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1799 src->getInsn()->dnz || src->getInsn()->precise)
1800 return false;
1801
1802 if (toOp == OP_SAD) {
1803 ImmediateValue imm;
1804 if (!src->getInsn()->src(2).getImmediate(imm))
1805 return false;
1806 if (!imm.isInteger(0))
1807 return false;
1808 }
1809
1810 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1811 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1812 return false;
1813
1814 mod[0] = add->src(0).mod;
1815 mod[1] = add->src(1).mod;
1816 mod[2] = src->getUniqueInsn()->src(0).mod;
1817 mod[3] = src->getUniqueInsn()->src(1).mod;
1818
1819 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1820 return false;
1821
1822 add->op = toOp;
1823 add->subOp = src->getInsn()->subOp; // potentially mul-high
1824 add->dnz = src->getInsn()->dnz;
1825 add->dType = src->getInsn()->dType; // sign matters for imad hi
1826 add->sType = src->getInsn()->sType;
1827
1828 add->setSrc(2, add->src(s ? 0 : 1));
1829
1830 add->setSrc(0, src->getInsn()->getSrc(0));
1831 add->src(0).mod = mod[2] ^ mod[s];
1832 add->setSrc(1, src->getInsn()->getSrc(1));
1833 add->src(1).mod = mod[3];
1834
1835 return true;
1836 }
1837
1838 void
1839 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1840 {
1841 Value *src0 = minmax->getSrc(0);
1842 Value *src1 = minmax->getSrc(1);
1843
1844 if (src0 != src1 || src0->reg.file != FILE_GPR)
1845 return;
1846 if (minmax->src(0).mod == minmax->src(1).mod) {
1847 if (minmax->def(0).mayReplace(minmax->src(0))) {
1848 minmax->def(0).replace(minmax->src(0), false);
1849 minmax->bb->remove(minmax);
1850 } else {
1851 minmax->op = OP_CVT;
1852 minmax->setSrc(1, NULL);
1853 }
1854 } else {
1855 // TODO:
1856 // min(x, -x) = -abs(x)
1857 // min(x, -abs(x)) = -abs(x)
1858 // min(x, abs(x)) = x
1859 // max(x, -abs(x)) = x
1860 // max(x, abs(x)) = abs(x)
1861 // max(x, -x) = abs(x)
1862 }
1863 }
1864
1865 // rcp(rcp(a)) = a
1866 // rcp(sqrt(a)) = rsq(a)
1867 void
1868 AlgebraicOpt::handleRCP(Instruction *rcp)
1869 {
1870 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1871
1872 if (!si)
1873 return;
1874
1875 if (si->op == OP_RCP) {
1876 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1877 rcp->op = mod.getOp();
1878 rcp->setSrc(0, si->getSrc(0));
1879 } else if (si->op == OP_SQRT) {
1880 rcp->op = OP_RSQ;
1881 rcp->setSrc(0, si->getSrc(0));
1882 rcp->src(0).mod = rcp->src(0).mod * si->src(0).mod;
1883 }
1884 }
1885
1886 void
1887 AlgebraicOpt::handleSLCT(Instruction *slct)
1888 {
1889 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1890 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1891 slct->setSrc(0, slct->getSrc(1));
1892 } else
1893 if (slct->getSrc(0) != slct->getSrc(1)) {
1894 return;
1895 }
1896 slct->op = OP_MOV;
1897 slct->setSrc(1, NULL);
1898 slct->setSrc(2, NULL);
1899 }
1900
1901 void
1902 AlgebraicOpt::handleLOGOP(Instruction *logop)
1903 {
1904 Value *src0 = logop->getSrc(0);
1905 Value *src1 = logop->getSrc(1);
1906
1907 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1908 return;
1909
1910 if (src0 == src1) {
1911 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1912 logop->def(0).mayReplace(logop->src(0))) {
1913 logop->def(0).replace(logop->src(0), false);
1914 delete_Instruction(prog, logop);
1915 }
1916 } else {
1917 // try AND(SET, SET) -> SET_AND(SET)
1918 Instruction *set0 = src0->getInsn();
1919 Instruction *set1 = src1->getInsn();
1920
1921 if (!set0 || set0->fixed || !set1 || set1->fixed)
1922 return;
1923 if (set1->op != OP_SET) {
1924 Instruction *xchg = set0;
1925 set0 = set1;
1926 set1 = xchg;
1927 if (set1->op != OP_SET)
1928 return;
1929 }
1930 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1931 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1932 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1933 return;
1934 if (set0->op != OP_SET &&
1935 set0->op != OP_SET_AND &&
1936 set0->op != OP_SET_OR &&
1937 set0->op != OP_SET_XOR)
1938 return;
1939 if (set0->getDef(0)->refCount() > 1 &&
1940 set1->getDef(0)->refCount() > 1)
1941 return;
1942 if (set0->getPredicate() || set1->getPredicate())
1943 return;
1944 // check that they don't source each other
1945 for (int s = 0; s < 2; ++s)
1946 if (set0->getSrc(s) == set1->getDef(0) ||
1947 set1->getSrc(s) == set0->getDef(0))
1948 return;
1949
1950 set0 = cloneForward(func, set0);
1951 set1 = cloneShallow(func, set1);
1952 logop->bb->insertAfter(logop, set1);
1953 logop->bb->insertAfter(logop, set0);
1954
1955 set0->dType = TYPE_U8;
1956 set0->getDef(0)->reg.file = FILE_PREDICATE;
1957 set0->getDef(0)->reg.size = 1;
1958 set1->setSrc(2, set0->getDef(0));
1959 set1->op = redOp;
1960 set1->setDef(0, logop->getDef(0));
1961 delete_Instruction(prog, logop);
1962 }
1963 }
1964
1965 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1966 // nv50:
1967 // F2I(NEG(I2F(ABS(SET))))
1968 void
1969 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1970 {
1971 Instruction *insn = cvt->getSrc(0)->getInsn();
1972 if (cvt->sType != TYPE_F32 ||
1973 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1974 return;
1975 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1976 return;
1977 if (insn->src(0).mod != Modifier(0))
1978 return;
1979 insn = insn->getSrc(0)->getInsn();
1980
1981 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1982 if (insn && insn->op == OP_CVT &&
1983 insn->dType == TYPE_F32 &&
1984 insn->sType == TYPE_S32) {
1985 insn = insn->getSrc(0)->getInsn();
1986 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1987 insn->src(0).mod)
1988 return;
1989 insn = insn->getSrc(0)->getInsn();
1990 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1991 return;
1992 } else
1993 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1994 return;
1995 }
1996
1997 Instruction *bset = cloneShallow(func, insn);
1998 bset->dType = TYPE_U32;
1999 bset->setDef(0, cvt->getDef(0));
2000 cvt->bb->insertAfter(cvt, bset);
2001 delete_Instruction(prog, cvt);
2002 }
2003
2004 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
2005 // does a type conversion, this becomes trickier as there might be range
2006 // changes/etc. We could handle those in theory as long as the range was being
2007 // reduced or kept the same.
2008 void
2009 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
2010 {
2011 Instruction *insn = cvt->getSrc(0)->getInsn();
2012 RoundMode rnd = insn->rnd;
2013
2014 if (insn->saturate ||
2015 insn->subOp ||
2016 insn->dType != insn->sType ||
2017 insn->dType != cvt->sType)
2018 return;
2019
2020 switch (insn->op) {
2021 case OP_CEIL:
2022 rnd = ROUND_PI;
2023 break;
2024 case OP_FLOOR:
2025 rnd = ROUND_MI;
2026 break;
2027 case OP_TRUNC:
2028 rnd = ROUND_ZI;
2029 break;
2030 case OP_CVT:
2031 break;
2032 default:
2033 return;
2034 }
2035
2036 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
2037 rnd = (RoundMode)(rnd & 3);
2038
2039 cvt->rnd = rnd;
2040 cvt->setSrc(0, insn->getSrc(0));
2041 cvt->src(0).mod *= insn->src(0).mod;
2042 cvt->sType = insn->sType;
2043 }
2044
2045 // Some shaders extract packed bytes out of words and convert them to
2046 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
2047 // nv50 for word sizes.
2048 //
2049 // CVT(EXTBF(x, byte/word))
2050 // CVT(AND(bytemask, x))
2051 // CVT(AND(bytemask, SHR(x, 8/16/24)))
2052 // CVT(SHR(x, 16/24))
2053 void
2054 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
2055 {
2056 Instruction *insn = cvt->getSrc(0)->getInsn();
2057 ImmediateValue imm;
2058 Value *arg = NULL;
2059 unsigned width, offset;
2060 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
2061 return;
2062 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
2063 width = (imm.reg.data.u32 >> 8) & 0xff;
2064 offset = imm.reg.data.u32 & 0xff;
2065 arg = insn->getSrc(0);
2066
2067 if (width != 8 && width != 16)
2068 return;
2069 if (width == 8 && offset & 0x7)
2070 return;
2071 if (width == 16 && offset & 0xf)
2072 return;
2073 } else if (insn->op == OP_AND) {
2074 int s;
2075 if (insn->src(0).getImmediate(imm))
2076 s = 0;
2077 else if (insn->src(1).getImmediate(imm))
2078 s = 1;
2079 else
2080 return;
2081
2082 if (imm.reg.data.u32 == 0xff)
2083 width = 8;
2084 else if (imm.reg.data.u32 == 0xffff)
2085 width = 16;
2086 else
2087 return;
2088
2089 arg = insn->getSrc(!s);
2090 Instruction *shift = arg->getInsn();
2091 offset = 0;
2092 if (shift && shift->op == OP_SHR &&
2093 shift->sType == cvt->sType &&
2094 shift->src(1).getImmediate(imm) &&
2095 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2096 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2097 arg = shift->getSrc(0);
2098 offset = imm.reg.data.u32;
2099 }
2100 // We just AND'd the high bits away, which means this is effectively an
2101 // unsigned value.
2102 cvt->sType = TYPE_U32;
2103 } else if (insn->op == OP_SHR &&
2104 insn->sType == cvt->sType &&
2105 insn->src(1).getImmediate(imm)) {
2106 arg = insn->getSrc(0);
2107 if (imm.reg.data.u32 == 24) {
2108 width = 8;
2109 offset = 24;
2110 } else if (imm.reg.data.u32 == 16) {
2111 width = 16;
2112 offset = 16;
2113 } else {
2114 return;
2115 }
2116 }
2117
2118 if (!arg)
2119 return;
2120
2121 // Irrespective of what came earlier, we can undo a shift on the argument
2122 // by adjusting the offset.
2123 Instruction *shift = arg->getInsn();
2124 if (shift && shift->op == OP_SHL &&
2125 shift->src(1).getImmediate(imm) &&
2126 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2127 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2128 imm.reg.data.u32 <= offset) {
2129 arg = shift->getSrc(0);
2130 offset -= imm.reg.data.u32;
2131 }
2132
2133 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2134 // annoying to detect them.
2135
2136 if (width == 8) {
2137 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2138 } else {
2139 assert(width == 16);
2140 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2141 }
2142 cvt->setSrc(0, arg);
2143 cvt->subOp = offset >> 3;
2144 }
2145
2146 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2147 void
2148 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2149 {
2150 ImmediateValue imm;
2151 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2152 int s;
2153 Instruction *add;
2154
2155 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2156
2157 // look for ADD (TODO: only count references by non-SUCLAMP)
2158 if (insn->getSrc(0)->refCount() > 1)
2159 return;
2160 add = insn->getSrc(0)->getInsn();
2161 if (!add || add->op != OP_ADD ||
2162 (add->dType != TYPE_U32 &&
2163 add->dType != TYPE_S32))
2164 return;
2165
2166 // look for immediate
2167 for (s = 0; s < 2; ++s)
2168 if (add->src(s).getImmediate(imm))
2169 break;
2170 if (s >= 2)
2171 return;
2172 s = s ? 0 : 1;
2173 // determine if immediate fits
2174 val += imm.reg.data.s32;
2175 if (val > 31 || val < -32)
2176 return;
2177 // determine if other addend fits
2178 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2179 return;
2180
2181 bld.setPosition(insn, false); // make sure bld is init'ed
2182 // replace sources
2183 insn->setSrc(2, bld.mkImm(val));
2184 insn->setSrc(0, add->getSrc(s));
2185 }
2186
2187 // NEG(AND(SET, 1)) -> SET
2188 void
2189 AlgebraicOpt::handleNEG(Instruction *i) {
2190 Instruction *src = i->getSrc(0)->getInsn();
2191 ImmediateValue imm;
2192 int b;
2193
2194 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2195 return;
2196
2197 if (src->src(0).getImmediate(imm))
2198 b = 1;
2199 else if (src->src(1).getImmediate(imm))
2200 b = 0;
2201 else
2202 return;
2203
2204 if (!imm.isInteger(1))
2205 return;
2206
2207 Instruction *set = src->getSrc(b)->getInsn();
2208 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2209 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2210 !isFloatType(set->dType)) {
2211 i->def(0).replace(set->getDef(0), false);
2212 }
2213 }
2214
2215 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
2216 void
2217 AlgebraicOpt::handleEXTBF_RDSV(Instruction *i)
2218 {
2219 Instruction *rdsv = i->getSrc(0)->getUniqueInsn();
2220 if (rdsv->op != OP_RDSV ||
2221 rdsv->getSrc(0)->asSym()->reg.data.sv.sv != SV_COMBINED_TID)
2222 return;
2223 // Avoid creating more RDSV instructions
2224 if (rdsv->getDef(0)->refCount() > 1)
2225 return;
2226
2227 ImmediateValue imm;
2228 if (!i->src(1).getImmediate(imm))
2229 return;
2230
2231 int index;
2232 if (imm.isInteger(0x1000))
2233 index = 0;
2234 else
2235 if (imm.isInteger(0x0a10))
2236 index = 1;
2237 else
2238 if (imm.isInteger(0x061a))
2239 index = 2;
2240 else
2241 return;
2242
2243 bld.setPosition(i, false);
2244
2245 i->op = OP_RDSV;
2246 i->setSrc(0, bld.mkSysVal(SV_TID, index));
2247 i->setSrc(1, NULL);
2248 }
2249
2250 bool
2251 AlgebraicOpt::visit(BasicBlock *bb)
2252 {
2253 Instruction *next;
2254 for (Instruction *i = bb->getEntry(); i; i = next) {
2255 next = i->next;
2256 switch (i->op) {
2257 case OP_ABS:
2258 handleABS(i);
2259 break;
2260 case OP_ADD:
2261 handleADD(i);
2262 break;
2263 case OP_RCP:
2264 handleRCP(i);
2265 break;
2266 case OP_MIN:
2267 case OP_MAX:
2268 handleMINMAX(i);
2269 break;
2270 case OP_SLCT:
2271 handleSLCT(i);
2272 break;
2273 case OP_AND:
2274 case OP_OR:
2275 case OP_XOR:
2276 handleLOGOP(i);
2277 break;
2278 case OP_CVT:
2279 handleCVT_NEG(i);
2280 handleCVT_CVT(i);
2281 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2282 handleCVT_EXTBF(i);
2283 break;
2284 case OP_SUCLAMP:
2285 handleSUCLAMP(i);
2286 break;
2287 case OP_NEG:
2288 handleNEG(i);
2289 break;
2290 case OP_EXTBF:
2291 handleEXTBF_RDSV(i);
2292 break;
2293 default:
2294 break;
2295 }
2296 }
2297
2298 return true;
2299 }
2300
2301 // =============================================================================
2302
2303 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2304 // MUL(a, b) -> a few XMADs
2305 // MAD/FMA(a, b, c) -> a few XMADs
2306 class LateAlgebraicOpt : public Pass
2307 {
2308 private:
2309 virtual bool visit(Instruction *);
2310
2311 void handleADD(Instruction *);
2312 void handleMULMAD(Instruction *);
2313 bool tryADDToSHLADD(Instruction *);
2314
2315 BuildUtil bld;
2316 };
2317
2318 void
2319 LateAlgebraicOpt::handleADD(Instruction *add)
2320 {
2321 Value *src0 = add->getSrc(0);
2322 Value *src1 = add->getSrc(1);
2323
2324 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2325 return;
2326
2327 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2328 tryADDToSHLADD(add);
2329 }
2330
2331 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2332 bool
2333 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2334 {
2335 Value *src0 = add->getSrc(0);
2336 Value *src1 = add->getSrc(1);
2337 ImmediateValue imm;
2338 Instruction *shl;
2339 Value *src;
2340 int s;
2341
2342 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2343 || isFloatType(add->dType))
2344 return false;
2345
2346 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2347 s = 0;
2348 else
2349 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2350 s = 1;
2351 else
2352 return false;
2353
2354 src = add->getSrc(s);
2355 shl = src->getUniqueInsn();
2356
2357 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2358 return false;
2359
2360 if (!shl->src(1).getImmediate(imm))
2361 return false;
2362
2363 add->op = OP_SHLADD;
2364 add->setSrc(2, add->src(!s));
2365 // SHL can't have any modifiers, but the ADD source may have had
2366 // one. Preserve it.
2367 add->setSrc(0, shl->getSrc(0));
2368 if (s == 1)
2369 add->src(0).mod = add->src(1).mod;
2370 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2371 add->src(1).mod = Modifier(0);
2372
2373 return true;
2374 }
2375
2376 // MUL(a, b) -> a few XMADs
2377 // MAD/FMA(a, b, c) -> a few XMADs
2378 void
2379 LateAlgebraicOpt::handleMULMAD(Instruction *i)
2380 {
2381 // TODO: handle NV50_IR_SUBOP_MUL_HIGH
2382 if (!prog->getTarget()->isOpSupported(OP_XMAD, TYPE_U32))
2383 return;
2384 if (isFloatType(i->dType) || typeSizeof(i->dType) != 4)
2385 return;
2386 if (i->subOp || i->usesFlags() || i->flagsDef >= 0)
2387 return;
2388
2389 assert(!i->src(0).mod);
2390 assert(!i->src(1).mod);
2391 assert(i->op == OP_MUL ? 1 : !i->src(2).mod);
2392
2393 bld.setPosition(i, false);
2394
2395 Value *a = i->getSrc(0);
2396 Value *b = i->getSrc(1);
2397 Value *c = i->op == OP_MUL ? bld.mkImm(0) : i->getSrc(2);
2398
2399 Value *tmp0 = bld.getSSA();
2400 Value *tmp1 = bld.getSSA();
2401
2402 Instruction *insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp0, b, a, c);
2403 insn->setPredicate(i->cc, i->getPredicate());
2404
2405 insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp1, b, a, bld.mkImm(0));
2406 insn->setPredicate(i->cc, i->getPredicate());
2407 insn->subOp = NV50_IR_SUBOP_XMAD_MRG | NV50_IR_SUBOP_XMAD_H1(1);
2408
2409 Value *pred = i->getPredicate();
2410 i->setPredicate(i->cc, NULL);
2411
2412 i->op = OP_XMAD;
2413 i->setSrc(0, b);
2414 i->setSrc(1, tmp1);
2415 i->setSrc(2, tmp0);
2416 i->subOp = NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_CBCC;
2417 i->subOp |= NV50_IR_SUBOP_XMAD_H1(0) | NV50_IR_SUBOP_XMAD_H1(1);
2418
2419 i->setPredicate(i->cc, pred);
2420 }
2421
2422 bool
2423 LateAlgebraicOpt::visit(Instruction *i)
2424 {
2425 switch (i->op) {
2426 case OP_ADD:
2427 handleADD(i);
2428 break;
2429 case OP_MUL:
2430 case OP_MAD:
2431 case OP_FMA:
2432 handleMULMAD(i);
2433 break;
2434 default:
2435 break;
2436 }
2437
2438 return true;
2439 }
2440
2441 // =============================================================================
2442
2443 // Split 64-bit MUL and MAD
2444 class Split64BitOpPreRA : public Pass
2445 {
2446 private:
2447 virtual bool visit(BasicBlock *);
2448 void split64MulMad(Function *, Instruction *, DataType);
2449
2450 BuildUtil bld;
2451 };
2452
2453 bool
2454 Split64BitOpPreRA::visit(BasicBlock *bb)
2455 {
2456 Instruction *i, *next;
2457 Modifier mod;
2458
2459 for (i = bb->getEntry(); i; i = next) {
2460 next = i->next;
2461
2462 DataType hTy;
2463 switch (i->dType) {
2464 case TYPE_U64: hTy = TYPE_U32; break;
2465 case TYPE_S64: hTy = TYPE_S32; break;
2466 default:
2467 continue;
2468 }
2469
2470 if (i->op == OP_MAD || i->op == OP_MUL)
2471 split64MulMad(func, i, hTy);
2472 }
2473
2474 return true;
2475 }
2476
2477 void
2478 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2479 {
2480 assert(i->op == OP_MAD || i->op == OP_MUL);
2481 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2482 assert(typeSizeof(hTy) == 4);
2483
2484 bld.setPosition(i, true);
2485
2486 Value *zero = bld.mkImm(0u);
2487 Value *carry = bld.getSSA(1, FILE_FLAGS);
2488
2489 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2490 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2491 // gives the following operations:
2492 // * `d.low = low(a.low * b.low) (+ c.low)?`
2493 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2494 // + high(a.low * b.low) (+ c.high)?`
2495 //
2496 // To compute the high bits, we can split in the following operations:
2497 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2498 // * `tmp2 = low(a.low * b.high) + tmp1`
2499 // * `d.high = high(a.low * b.low) + tmp2`
2500 //
2501 // mkSplit put lower bits at index 0 and higher bits at index 1
2502
2503 Value *op1[2];
2504 if (i->getSrc(0)->reg.size == 8)
2505 bld.mkSplit(op1, 4, i->getSrc(0));
2506 else {
2507 op1[0] = i->getSrc(0);
2508 op1[1] = zero;
2509 }
2510 Value *op2[2];
2511 if (i->getSrc(1)->reg.size == 8)
2512 bld.mkSplit(op2, 4, i->getSrc(1));
2513 else {
2514 op2[0] = i->getSrc(1);
2515 op2[1] = zero;
2516 }
2517
2518 Value *op3[2] = { NULL, NULL };
2519 if (i->op == OP_MAD) {
2520 if (i->getSrc(2)->reg.size == 8)
2521 bld.mkSplit(op3, 4, i->getSrc(2));
2522 else {
2523 op3[0] = i->getSrc(2);
2524 op3[1] = zero;
2525 }
2526 }
2527
2528 Value *tmpRes1Hi = bld.getSSA();
2529 if (i->op == OP_MAD)
2530 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2531 else
2532 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2533
2534 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2535
2536 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2537
2538 // If it was a MAD, add the carry from the low bits
2539 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2540 // d.high
2541 if (i->op == OP_MAD)
2542 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2543 else
2544 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2545
2546 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2547 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2548 if (i->op == OP_MAD)
2549 hiPart3->setFlagsSrc(3, carry);
2550
2551 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2552
2553 delete_Instruction(fn->getProgram(), i);
2554 }
2555
2556 // =============================================================================
2557
2558 static inline void
2559 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2560 {
2561 if (offset != ldst->getSrc(0)->reg.data.offset) {
2562 if (ldst->getSrc(0)->refCount() > 1)
2563 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2564 ldst->getSrc(0)->reg.data.offset = offset;
2565 }
2566 }
2567
2568 // Combine loads and stores, forward stores to loads where possible.
2569 class MemoryOpt : public Pass
2570 {
2571 private:
2572 class Record
2573 {
2574 public:
2575 Record *next;
2576 Instruction *insn;
2577 const Value *rel[2];
2578 const Value *base;
2579 int32_t offset;
2580 int8_t fileIndex;
2581 uint8_t size;
2582 bool locked;
2583 Record *prev;
2584
2585 bool overlaps(const Instruction *ldst) const;
2586
2587 inline void link(Record **);
2588 inline void unlink(Record **);
2589 inline void set(const Instruction *ldst);
2590 };
2591
2592 public:
2593 MemoryOpt();
2594
2595 Record *loads[DATA_FILE_COUNT];
2596 Record *stores[DATA_FILE_COUNT];
2597
2598 MemoryPool recordPool;
2599
2600 private:
2601 virtual bool visit(BasicBlock *);
2602 bool runOpt(BasicBlock *);
2603
2604 Record **getList(const Instruction *);
2605
2606 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2607
2608 // merge @insn into load/store instruction from @rec
2609 bool combineLd(Record *rec, Instruction *ld);
2610 bool combineSt(Record *rec, Instruction *st);
2611
2612 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2613 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2614 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2615
2616 void addRecord(Instruction *ldst);
2617 void purgeRecords(Instruction *const st, DataFile);
2618 void lockStores(Instruction *const ld);
2619 void reset();
2620
2621 private:
2622 Record *prevRecord;
2623 };
2624
2625 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2626 {
2627 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2628 loads[i] = NULL;
2629 stores[i] = NULL;
2630 }
2631 prevRecord = NULL;
2632 }
2633
2634 void
2635 MemoryOpt::reset()
2636 {
2637 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2638 Record *it, *next;
2639 for (it = loads[i]; it; it = next) {
2640 next = it->next;
2641 recordPool.release(it);
2642 }
2643 loads[i] = NULL;
2644 for (it = stores[i]; it; it = next) {
2645 next = it->next;
2646 recordPool.release(it);
2647 }
2648 stores[i] = NULL;
2649 }
2650 }
2651
2652 bool
2653 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2654 {
2655 int32_t offRc = rec->offset;
2656 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2657 int sizeRc = rec->size;
2658 int sizeLd = typeSizeof(ld->dType);
2659 int size = sizeRc + sizeLd;
2660 int d, j;
2661
2662 if (!prog->getTarget()->
2663 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2664 return false;
2665 // no unaligned loads
2666 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2667 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2668 return false;
2669 // for compute indirect loads are not guaranteed to be aligned
2670 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2671 return false;
2672
2673 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2674
2675 // lock any stores that overlap with the load being merged into the
2676 // existing record.
2677 lockStores(ld);
2678
2679 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2680
2681 if (offLd < offRc) {
2682 int sz;
2683 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2684 // d: nr of definitions in ld
2685 // j: nr of definitions in rec->insn, move:
2686 for (d = d + j - 1; j > 0; --j, --d)
2687 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2688
2689 if (rec->insn->getSrc(0)->refCount() > 1)
2690 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2691 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2692
2693 d = 0;
2694 } else {
2695 d = j;
2696 }
2697 // move definitions of @ld to @rec->insn
2698 for (j = 0; sizeLd; ++j, ++d) {
2699 sizeLd -= ld->getDef(j)->reg.size;
2700 rec->insn->setDef(d, ld->getDef(j));
2701 }
2702
2703 rec->size = size;
2704 rec->insn->getSrc(0)->reg.size = size;
2705 rec->insn->setType(typeOfSize(size));
2706
2707 delete_Instruction(prog, ld);
2708
2709 return true;
2710 }
2711
2712 bool
2713 MemoryOpt::combineSt(Record *rec, Instruction *st)
2714 {
2715 int32_t offRc = rec->offset;
2716 int32_t offSt = st->getSrc(0)->reg.data.offset;
2717 int sizeRc = rec->size;
2718 int sizeSt = typeSizeof(st->dType);
2719 int s = sizeSt / 4;
2720 int size = sizeRc + sizeSt;
2721 int j, k;
2722 Value *src[4]; // no modifiers in ValueRef allowed for st
2723 Value *extra[3];
2724
2725 if (!prog->getTarget()->
2726 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2727 return false;
2728 // no unaligned stores
2729 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2730 return false;
2731 // for compute indirect stores are not guaranteed to be aligned
2732 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2733 return false;
2734
2735 // remove any existing load/store records for the store being merged into
2736 // the existing record.
2737 purgeRecords(st, DATA_FILE_COUNT);
2738
2739 st->takeExtraSources(0, extra); // save predicate and indirect address
2740
2741 if (offRc < offSt) {
2742 // save values from @st
2743 for (s = 0; sizeSt; ++s) {
2744 sizeSt -= st->getSrc(s + 1)->reg.size;
2745 src[s] = st->getSrc(s + 1);
2746 }
2747 // set record's values as low sources of @st
2748 for (j = 1; sizeRc; ++j) {
2749 sizeRc -= rec->insn->getSrc(j)->reg.size;
2750 st->setSrc(j, rec->insn->getSrc(j));
2751 }
2752 // set saved values as high sources of @st
2753 for (k = j, j = 0; j < s; ++j)
2754 st->setSrc(k++, src[j]);
2755
2756 updateLdStOffset(st, offRc, func);
2757 } else {
2758 for (j = 1; sizeSt; ++j)
2759 sizeSt -= st->getSrc(j)->reg.size;
2760 for (s = 1; sizeRc; ++j, ++s) {
2761 sizeRc -= rec->insn->getSrc(s)->reg.size;
2762 st->setSrc(j, rec->insn->getSrc(s));
2763 }
2764 rec->offset = offSt;
2765 }
2766 st->putExtraSources(0, extra); // restore pointer and predicate
2767
2768 delete_Instruction(prog, rec->insn);
2769 rec->insn = st;
2770 rec->size = size;
2771 rec->insn->getSrc(0)->reg.size = size;
2772 rec->insn->setType(typeOfSize(size));
2773 return true;
2774 }
2775
2776 void
2777 MemoryOpt::Record::set(const Instruction *ldst)
2778 {
2779 const Symbol *mem = ldst->getSrc(0)->asSym();
2780 fileIndex = mem->reg.fileIndex;
2781 rel[0] = ldst->getIndirect(0, 0);
2782 rel[1] = ldst->getIndirect(0, 1);
2783 offset = mem->reg.data.offset;
2784 base = mem->getBase();
2785 size = typeSizeof(ldst->sType);
2786 }
2787
2788 void
2789 MemoryOpt::Record::link(Record **list)
2790 {
2791 next = *list;
2792 if (next)
2793 next->prev = this;
2794 prev = NULL;
2795 *list = this;
2796 }
2797
2798 void
2799 MemoryOpt::Record::unlink(Record **list)
2800 {
2801 if (next)
2802 next->prev = prev;
2803 if (prev)
2804 prev->next = next;
2805 else
2806 *list = next;
2807 }
2808
2809 MemoryOpt::Record **
2810 MemoryOpt::getList(const Instruction *insn)
2811 {
2812 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2813 return &loads[insn->src(0).getFile()];
2814 return &stores[insn->src(0).getFile()];
2815 }
2816
2817 void
2818 MemoryOpt::addRecord(Instruction *i)
2819 {
2820 Record **list = getList(i);
2821 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2822
2823 it->link(list);
2824 it->set(i);
2825 it->insn = i;
2826 it->locked = false;
2827 }
2828
2829 MemoryOpt::Record *
2830 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2831 {
2832 const Symbol *sym = insn->getSrc(0)->asSym();
2833 const int size = typeSizeof(insn->sType);
2834 Record *rec = NULL;
2835 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2836
2837 for (; it; it = it->next) {
2838 if (it->locked && insn->op != OP_LOAD && insn->op != OP_VFETCH)
2839 continue;
2840 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2841 it->rel[0] != insn->getIndirect(0, 0) ||
2842 it->fileIndex != sym->reg.fileIndex ||
2843 it->rel[1] != insn->getIndirect(0, 1))
2844 continue;
2845
2846 if (it->offset < sym->reg.data.offset) {
2847 if (it->offset + it->size >= sym->reg.data.offset) {
2848 isAdj = (it->offset + it->size == sym->reg.data.offset);
2849 if (!isAdj)
2850 return it;
2851 if (!(it->offset & 0x7))
2852 rec = it;
2853 }
2854 } else {
2855 isAdj = it->offset != sym->reg.data.offset;
2856 if (size <= it->size && !isAdj)
2857 return it;
2858 else
2859 if (!(sym->reg.data.offset & 0x7))
2860 if (it->offset - size <= sym->reg.data.offset)
2861 rec = it;
2862 }
2863 }
2864 return rec;
2865 }
2866
2867 bool
2868 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2869 {
2870 Instruction *st = rec->insn;
2871 int32_t offSt = rec->offset;
2872 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2873 int d, s;
2874
2875 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2876 offSt += st->getSrc(s)->reg.size;
2877 if (offSt != offLd)
2878 return false;
2879
2880 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2881 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2882 return false;
2883 if (st->getSrc(s)->reg.file != FILE_GPR)
2884 return false;
2885 ld->def(d).replace(st->src(s), false);
2886 }
2887 ld->bb->remove(ld);
2888 return true;
2889 }
2890
2891 bool
2892 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2893 {
2894 Instruction *ldR = rec->insn;
2895 int32_t offR = rec->offset;
2896 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2897 int dR, dE;
2898
2899 assert(offR <= offE);
2900 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2901 offR += ldR->getDef(dR)->reg.size;
2902 if (offR != offE)
2903 return false;
2904
2905 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2906 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2907 return false;
2908 ldE->def(dE).replace(ldR->getDef(dR), false);
2909 }
2910
2911 delete_Instruction(prog, ldE);
2912 return true;
2913 }
2914
2915 bool
2916 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2917 {
2918 const Instruction *const ri = rec->insn;
2919 Value *extra[3];
2920
2921 int32_t offS = st->getSrc(0)->reg.data.offset;
2922 int32_t offR = rec->offset;
2923 int32_t endS = offS + typeSizeof(st->dType);
2924 int32_t endR = offR + typeSizeof(ri->dType);
2925
2926 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2927
2928 st->takeExtraSources(0, extra);
2929
2930 if (offR < offS) {
2931 Value *vals[10];
2932 int s, n;
2933 int k = 0;
2934 // get non-replaced sources of ri
2935 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2936 vals[k++] = ri->getSrc(s);
2937 n = s;
2938 // get replaced sources of st
2939 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2940 vals[k++] = st->getSrc(s);
2941 // skip replaced sources of ri
2942 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2943 // get non-replaced sources after values covered by st
2944 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2945 vals[k++] = ri->getSrc(s);
2946 assert((unsigned int)k <= ARRAY_SIZE(vals));
2947 for (s = 0; s < k; ++s)
2948 st->setSrc(s + 1, vals[s]);
2949 st->setSrc(0, ri->getSrc(0));
2950 } else
2951 if (endR > endS) {
2952 int j, s;
2953 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2954 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2955 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2956 st->setSrc(s++, ri->getSrc(j));
2957 }
2958 st->putExtraSources(0, extra);
2959
2960 delete_Instruction(prog, rec->insn);
2961
2962 rec->insn = st;
2963 rec->offset = st->getSrc(0)->reg.data.offset;
2964
2965 st->setType(typeOfSize(rec->size));
2966
2967 return true;
2968 }
2969
2970 bool
2971 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2972 {
2973 Record that;
2974 that.set(ldst);
2975
2976 // This assumes that images/buffers can't overlap. They can.
2977 // TODO: Plumb the restrict logic through, and only skip when it's a
2978 // restrict situation, or there can implicitly be no writes.
2979 if (this->fileIndex != that.fileIndex && this->rel[1] == that.rel[1])
2980 return false;
2981
2982 if (this->rel[0] || that.rel[0])
2983 return this->base == that.base;
2984
2985 return
2986 (this->offset < that.offset + that.size) &&
2987 (this->offset + this->size > that.offset);
2988 }
2989
2990 // We must not eliminate stores that affect the result of @ld if
2991 // we find later stores to the same location, and we may no longer
2992 // merge them with later stores.
2993 // The stored value can, however, still be used to determine the value
2994 // returned by future loads.
2995 void
2996 MemoryOpt::lockStores(Instruction *const ld)
2997 {
2998 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2999 if (!r->locked && r->overlaps(ld))
3000 r->locked = true;
3001 }
3002
3003 // Prior loads from the location of @st are no longer valid.
3004 // Stores to the location of @st may no longer be used to derive
3005 // the value at it nor be coalesced into later stores.
3006 void
3007 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
3008 {
3009 if (st)
3010 f = st->src(0).getFile();
3011
3012 for (Record *r = loads[f]; r; r = r->next)
3013 if (!st || r->overlaps(st))
3014 r->unlink(&loads[f]);
3015
3016 for (Record *r = stores[f]; r; r = r->next)
3017 if (!st || r->overlaps(st))
3018 r->unlink(&stores[f]);
3019 }
3020
3021 bool
3022 MemoryOpt::visit(BasicBlock *bb)
3023 {
3024 bool ret = runOpt(bb);
3025 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
3026 // where 96 bit memory operations are forbidden.
3027 if (ret)
3028 ret = runOpt(bb);
3029 return ret;
3030 }
3031
3032 bool
3033 MemoryOpt::runOpt(BasicBlock *bb)
3034 {
3035 Instruction *ldst, *next;
3036 Record *rec;
3037 bool isAdjacent = true;
3038
3039 for (ldst = bb->getEntry(); ldst; ldst = next) {
3040 bool keep = true;
3041 bool isLoad = true;
3042 next = ldst->next;
3043
3044 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
3045 if (ldst->isDead()) {
3046 // might have been produced by earlier optimization
3047 delete_Instruction(prog, ldst);
3048 continue;
3049 }
3050 } else
3051 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
3052 if (typeSizeof(ldst->dType) == 4 &&
3053 ldst->src(1).getFile() == FILE_GPR &&
3054 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
3055 delete_Instruction(prog, ldst);
3056 continue;
3057 }
3058 isLoad = false;
3059 } else {
3060 // TODO: maybe have all fixed ops act as barrier ?
3061 if (ldst->op == OP_CALL ||
3062 ldst->op == OP_BAR ||
3063 ldst->op == OP_MEMBAR) {
3064 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3065 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3066 purgeRecords(NULL, FILE_MEMORY_SHARED);
3067 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3068 } else
3069 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
3070 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
3071 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3072 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3073 purgeRecords(NULL, FILE_MEMORY_SHARED);
3074 } else {
3075 purgeRecords(NULL, ldst->src(0).getFile());
3076 }
3077 } else
3078 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
3079 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3080 }
3081 continue;
3082 }
3083 if (ldst->getPredicate()) // TODO: handle predicated ld/st
3084 continue;
3085 if (ldst->perPatch) // TODO: create separate per-patch lists
3086 continue;
3087
3088 if (isLoad) {
3089 DataFile file = ldst->src(0).getFile();
3090
3091 // if ld l[]/g[] look for previous store to eliminate the reload
3092 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
3093 // TODO: shared memory ?
3094 rec = findRecord(ldst, false, isAdjacent);
3095 if (rec && !isAdjacent)
3096 keep = !replaceLdFromSt(ldst, rec);
3097 }
3098
3099 // or look for ld from the same location and replace this one
3100 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
3101 if (rec) {
3102 if (!isAdjacent)
3103 keep = !replaceLdFromLd(ldst, rec);
3104 else
3105 // or combine a previous load with this one
3106 keep = !combineLd(rec, ldst);
3107 }
3108 if (keep)
3109 lockStores(ldst);
3110 } else {
3111 rec = findRecord(ldst, false, isAdjacent);
3112 if (rec) {
3113 if (!isAdjacent)
3114 keep = !replaceStFromSt(ldst, rec);
3115 else
3116 keep = !combineSt(rec, ldst);
3117 }
3118 if (keep)
3119 purgeRecords(ldst, DATA_FILE_COUNT);
3120 }
3121 if (keep)
3122 addRecord(ldst);
3123 }
3124 reset();
3125
3126 return true;
3127 }
3128
3129 // =============================================================================
3130
3131 // Turn control flow into predicated instructions (after register allocation !).
3132 // TODO:
3133 // Could move this to before register allocation on NVC0 and also handle nested
3134 // constructs.
3135 class FlatteningPass : public Pass
3136 {
3137 private:
3138 virtual bool visit(Function *);
3139 virtual bool visit(BasicBlock *);
3140
3141 bool tryPredicateConditional(BasicBlock *);
3142 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
3143 void tryPropagateBranch(BasicBlock *);
3144 inline bool isConstantCondition(Value *pred);
3145 inline bool mayPredicate(const Instruction *, const Value *pred) const;
3146 inline void removeFlow(Instruction *);
3147
3148 uint8_t gpr_unit;
3149 };
3150
3151 bool
3152 FlatteningPass::isConstantCondition(Value *pred)
3153 {
3154 Instruction *insn = pred->getUniqueInsn();
3155 assert(insn);
3156 if (insn->op != OP_SET || insn->srcExists(2))
3157 return false;
3158
3159 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
3160 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
3161 DataFile file;
3162 if (ld) {
3163 if (ld->op != OP_MOV && ld->op != OP_LOAD)
3164 return false;
3165 if (ld->src(0).isIndirect(0))
3166 return false;
3167 file = ld->src(0).getFile();
3168 } else {
3169 file = insn->src(s).getFile();
3170 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
3171 // in register "units", which can vary between targets.
3172 if (file == FILE_GPR) {
3173 Value *v = insn->getSrc(s);
3174 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
3175 int units = bytes >> gpr_unit;
3176 if (units > prog->maxGPR)
3177 file = FILE_IMMEDIATE;
3178 }
3179 }
3180 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
3181 return false;
3182 }
3183 return true;
3184 }
3185
3186 void
3187 FlatteningPass::removeFlow(Instruction *insn)
3188 {
3189 FlowInstruction *term = insn ? insn->asFlow() : NULL;
3190 if (!term)
3191 return;
3192 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
3193
3194 if (term->op == OP_BRA) {
3195 // TODO: this might get more difficult when we get arbitrary BRAs
3196 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
3197 return;
3198 } else
3199 if (term->op != OP_JOIN)
3200 return;
3201
3202 Value *pred = term->getPredicate();
3203
3204 delete_Instruction(prog, term);
3205
3206 if (pred && pred->refCount() == 0) {
3207 Instruction *pSet = pred->getUniqueInsn();
3208 pred->join->reg.data.id = -1; // deallocate
3209 if (pSet->isDead())
3210 delete_Instruction(prog, pSet);
3211 }
3212 }
3213
3214 void
3215 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3216 {
3217 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3218 if (i->isNop())
3219 continue;
3220 assert(!i->getPredicate());
3221 i->setPredicate(cc, pred);
3222 }
3223 removeFlow(bb->getExit());
3224 }
3225
3226 bool
3227 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3228 {
3229 if (insn->isPseudo())
3230 return true;
3231 // TODO: calls where we don't know which registers are modified
3232
3233 if (!prog->getTarget()->mayPredicate(insn, pred))
3234 return false;
3235 for (int d = 0; insn->defExists(d); ++d)
3236 if (insn->getDef(d)->equals(pred))
3237 return false;
3238 return true;
3239 }
3240
3241 // If we jump to BRA/RET/EXIT, replace the jump with it.
3242 // NOTE: We do not update the CFG anymore here !
3243 //
3244 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3245 // BB:0
3246 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3247 // BB1:
3248 // bra BB:3
3249 // BB2:
3250 // ...
3251 // BB3:
3252 // ...
3253 void
3254 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3255 {
3256 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3257 BasicBlock *bf = i->asFlow()->target.bb;
3258
3259 if (bf->getInsnCount() != 1)
3260 continue;
3261
3262 FlowInstruction *bra = i->asFlow();
3263 FlowInstruction *rep = bf->getExit()->asFlow();
3264
3265 if (!rep || rep->getPredicate())
3266 continue;
3267 if (rep->op != OP_BRA &&
3268 rep->op != OP_JOIN &&
3269 rep->op != OP_EXIT)
3270 continue;
3271
3272 // TODO: If there are multiple branches to @rep, only the first would
3273 // be replaced, so only remove them after this pass is done ?
3274 // Also, need to check all incident blocks for fall-through exits and
3275 // add the branch there.
3276 bra->op = rep->op;
3277 bra->target.bb = rep->target.bb;
3278 if (bf->cfg.incidentCount() == 1)
3279 bf->remove(rep);
3280 }
3281 }
3282
3283 bool
3284 FlatteningPass::visit(Function *fn)
3285 {
3286 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3287
3288 return true;
3289 }
3290
3291 bool
3292 FlatteningPass::visit(BasicBlock *bb)
3293 {
3294 if (tryPredicateConditional(bb))
3295 return true;
3296
3297 // try to attach join to previous instruction
3298 if (prog->getTarget()->hasJoin) {
3299 Instruction *insn = bb->getExit();
3300 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3301 insn = insn->prev;
3302 if (insn && !insn->getPredicate() &&
3303 !insn->asFlow() &&
3304 insn->op != OP_DISCARD &&
3305 insn->op != OP_TEXBAR &&
3306 !isTextureOp(insn->op) && // probably just nve4
3307 !isSurfaceOp(insn->op) && // not confirmed
3308 insn->op != OP_LINTERP && // probably just nve4
3309 insn->op != OP_PINTERP && // probably just nve4
3310 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3311 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3312 !insn->isNop()) {
3313 insn->join = 1;
3314 bb->remove(bb->getExit());
3315 return true;
3316 }
3317 }
3318 }
3319
3320 tryPropagateBranch(bb);
3321
3322 return true;
3323 }
3324
3325 bool
3326 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3327 {
3328 BasicBlock *bL = NULL, *bR = NULL;
3329 unsigned int nL = 0, nR = 0, limit = 12;
3330 Instruction *insn;
3331 unsigned int mask;
3332
3333 mask = bb->initiatesSimpleConditional();
3334 if (!mask)
3335 return false;
3336
3337 assert(bb->getExit());
3338 Value *pred = bb->getExit()->getPredicate();
3339 assert(pred);
3340
3341 if (isConstantCondition(pred))
3342 limit = 4;
3343
3344 Graph::EdgeIterator ei = bb->cfg.outgoing();
3345
3346 if (mask & 1) {
3347 bL = BasicBlock::get(ei.getNode());
3348 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3349 if (!mayPredicate(insn, pred))
3350 return false;
3351 if (nL > limit)
3352 return false; // too long, do a real branch
3353 }
3354 ei.next();
3355
3356 if (mask & 2) {
3357 bR = BasicBlock::get(ei.getNode());
3358 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3359 if (!mayPredicate(insn, pred))
3360 return false;
3361 if (nR > limit)
3362 return false; // too long, do a real branch
3363 }
3364
3365 if (bL)
3366 predicateInstructions(bL, pred, bb->getExit()->cc);
3367 if (bR)
3368 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3369
3370 if (bb->joinAt) {
3371 bb->remove(bb->joinAt);
3372 bb->joinAt = NULL;
3373 }
3374 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3375
3376 // remove potential join operations at the end of the conditional
3377 if (prog->getTarget()->joinAnterior) {
3378 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3379 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3380 removeFlow(bb->getEntry());
3381 }
3382
3383 return true;
3384 }
3385
3386 // =============================================================================
3387
3388 // Fold Immediate into MAD; must be done after register allocation due to
3389 // constraint SDST == SSRC2
3390 // TODO:
3391 // Does NVC0+ have other situations where this pass makes sense?
3392 class PostRaLoadPropagation : public Pass
3393 {
3394 private:
3395 virtual bool visit(Instruction *);
3396
3397 void handleMADforNV50(Instruction *);
3398 void handleMADforNVC0(Instruction *);
3399 };
3400
3401 static bool
3402 post_ra_dead(Instruction *i)
3403 {
3404 for (int d = 0; i->defExists(d); ++d)
3405 if (i->getDef(d)->refCount())
3406 return false;
3407 return true;
3408 }
3409
3410 // Fold Immediate into MAD; must be done after register allocation due to
3411 // constraint SDST == SSRC2
3412 void
3413 PostRaLoadPropagation::handleMADforNV50(Instruction *i)
3414 {
3415 if (i->def(0).getFile() != FILE_GPR ||
3416 i->src(0).getFile() != FILE_GPR ||
3417 i->src(1).getFile() != FILE_GPR ||
3418 i->src(2).getFile() != FILE_GPR ||
3419 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3420 return;
3421
3422 if (i->getDef(0)->reg.data.id >= 64 ||
3423 i->getSrc(0)->reg.data.id >= 64)
3424 return;
3425
3426 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3427 return;
3428
3429 if (i->getPredicate())
3430 return;
3431
3432 Value *vtmp;
3433 Instruction *def = i->getSrc(1)->getInsn();
3434
3435 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3436 def = def->getSrc(0)->getInsn();
3437 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3438 vtmp = i->getSrc(1);
3439 if (isFloatType(i->sType)) {
3440 i->setSrc(1, def->getSrc(0));
3441 } else {
3442 ImmediateValue val;
3443 // getImmediate() has side-effects on the argument so this *shouldn't*
3444 // be folded into the assert()
3445 MAYBE_UNUSED bool ret = def->src(0).getImmediate(val);
3446 assert(ret);
3447 if (i->getSrc(1)->reg.data.id & 1)
3448 val.reg.data.u32 >>= 16;
3449 val.reg.data.u32 &= 0xffff;
3450 i->setSrc(1, new_ImmediateValue(prog, val.reg.data.u32));
3451 }
3452
3453 /* There's no post-RA dead code elimination, so do it here
3454 * XXX: if we add more code-removing post-RA passes, we might
3455 * want to create a post-RA dead-code elim pass */
3456 if (post_ra_dead(vtmp->getInsn())) {
3457 Value *src = vtmp->getInsn()->getSrc(0);
3458 // Careful -- splits will have already been removed from the
3459 // functions. Don't double-delete.
3460 if (vtmp->getInsn()->bb)
3461 delete_Instruction(prog, vtmp->getInsn());
3462 if (src->getInsn() && post_ra_dead(src->getInsn()))
3463 delete_Instruction(prog, src->getInsn());
3464 }
3465 }
3466 }
3467
3468 void
3469 PostRaLoadPropagation::handleMADforNVC0(Instruction *i)
3470 {
3471 if (i->def(0).getFile() != FILE_GPR ||
3472 i->src(0).getFile() != FILE_GPR ||
3473 i->src(1).getFile() != FILE_GPR ||
3474 i->src(2).getFile() != FILE_GPR ||
3475 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3476 return;
3477
3478 // TODO: gm107 can also do this for S32, maybe other chipsets as well
3479 if (i->dType != TYPE_F32)
3480 return;
3481
3482 if ((i->src(2).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3483 return;
3484
3485 ImmediateValue val;
3486 int s;
3487
3488 if (i->src(0).getImmediate(val))
3489 s = 1;
3490 else if (i->src(1).getImmediate(val))
3491 s = 0;
3492 else
3493 return;
3494
3495 if ((i->src(s).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3496 return;
3497
3498 if (s == 1)
3499 i->swapSources(0, 1);
3500
3501 Instruction *imm = i->getSrc(1)->getInsn();
3502 i->setSrc(1, imm->getSrc(0));
3503 if (post_ra_dead(imm))
3504 delete_Instruction(prog, imm);
3505 }
3506
3507 bool
3508 PostRaLoadPropagation::visit(Instruction *i)
3509 {
3510 switch (i->op) {
3511 case OP_FMA:
3512 case OP_MAD:
3513 if (prog->getTarget()->getChipset() < 0xc0)
3514 handleMADforNV50(i);
3515 else
3516 handleMADforNVC0(i);
3517 break;
3518 default:
3519 break;
3520 }
3521
3522 return true;
3523 }
3524
3525 // =============================================================================
3526
3527 // Common subexpression elimination. Stupid O^2 implementation.
3528 class LocalCSE : public Pass
3529 {
3530 private:
3531 virtual bool visit(BasicBlock *);
3532
3533 inline bool tryReplace(Instruction **, Instruction *);
3534
3535 DLList ops[OP_LAST + 1];
3536 };
3537
3538 class GlobalCSE : public Pass
3539 {
3540 private:
3541 virtual bool visit(BasicBlock *);
3542 };
3543
3544 bool
3545 Instruction::isActionEqual(const Instruction *that) const
3546 {
3547 if (this->op != that->op ||
3548 this->dType != that->dType ||
3549 this->sType != that->sType)
3550 return false;
3551 if (this->cc != that->cc)
3552 return false;
3553
3554 if (this->asTex()) {
3555 if (memcmp(&this->asTex()->tex,
3556 &that->asTex()->tex,
3557 sizeof(this->asTex()->tex)))
3558 return false;
3559 } else
3560 if (this->asCmp()) {
3561 if (this->asCmp()->setCond != that->asCmp()->setCond)
3562 return false;
3563 } else
3564 if (this->asFlow()) {
3565 return false;
3566 } else
3567 if (this->op == OP_PHI && this->bb != that->bb) {
3568 /* TODO: we could probably be a bit smarter here by following the
3569 * control flow, but honestly, it is quite painful to check */
3570 return false;
3571 } else {
3572 if (this->ipa != that->ipa ||
3573 this->lanes != that->lanes ||
3574 this->perPatch != that->perPatch)
3575 return false;
3576 if (this->postFactor != that->postFactor)
3577 return false;
3578 }
3579
3580 if (this->subOp != that->subOp ||
3581 this->saturate != that->saturate ||
3582 this->rnd != that->rnd ||
3583 this->ftz != that->ftz ||
3584 this->dnz != that->dnz ||
3585 this->cache != that->cache ||
3586 this->mask != that->mask)
3587 return false;
3588
3589 return true;
3590 }
3591
3592 bool
3593 Instruction::isResultEqual(const Instruction *that) const
3594 {
3595 unsigned int d, s;
3596
3597 // NOTE: location of discard only affects tex with liveOnly and quadops
3598 if (!this->defExists(0) && this->op != OP_DISCARD)
3599 return false;
3600
3601 if (!isActionEqual(that))
3602 return false;
3603
3604 if (this->predSrc != that->predSrc)
3605 return false;
3606
3607 for (d = 0; this->defExists(d); ++d) {
3608 if (!that->defExists(d) ||
3609 !this->getDef(d)->equals(that->getDef(d), false))
3610 return false;
3611 }
3612 if (that->defExists(d))
3613 return false;
3614
3615 for (s = 0; this->srcExists(s); ++s) {
3616 if (!that->srcExists(s))
3617 return false;
3618 if (this->src(s).mod != that->src(s).mod)
3619 return false;
3620 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3621 return false;
3622 }
3623 if (that->srcExists(s))
3624 return false;
3625
3626 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3627 switch (src(0).getFile()) {
3628 case FILE_MEMORY_CONST:
3629 case FILE_SHADER_INPUT:
3630 return true;
3631 case FILE_SHADER_OUTPUT:
3632 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3633 default:
3634 return false;
3635 }
3636 }
3637
3638 return true;
3639 }
3640
3641 // pull through common expressions from different in-blocks
3642 bool
3643 GlobalCSE::visit(BasicBlock *bb)
3644 {
3645 Instruction *phi, *next, *ik;
3646 int s;
3647
3648 // TODO: maybe do this with OP_UNION, too
3649
3650 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3651 next = phi->next;
3652 if (phi->getSrc(0)->refCount() > 1)
3653 continue;
3654 ik = phi->getSrc(0)->getInsn();
3655 if (!ik)
3656 continue; // probably a function input
3657 if (ik->defCount(0xff) > 1)
3658 continue; // too painful to check if we can really push this forward
3659 for (s = 1; phi->srcExists(s); ++s) {
3660 if (phi->getSrc(s)->refCount() > 1)
3661 break;
3662 if (!phi->getSrc(s)->getInsn() ||
3663 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3664 break;
3665 }
3666 if (!phi->srcExists(s)) {
3667 assert(ik->op != OP_PHI);
3668 Instruction *entry = bb->getEntry();
3669 ik->bb->remove(ik);
3670 if (!entry || entry->op != OP_JOIN)
3671 bb->insertHead(ik);
3672 else
3673 bb->insertAfter(entry, ik);
3674 ik->setDef(0, phi->getDef(0));
3675 delete_Instruction(prog, phi);
3676 }
3677 }
3678
3679 return true;
3680 }
3681
3682 bool
3683 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3684 {
3685 Instruction *old = *ptr;
3686
3687 // TODO: maybe relax this later (causes trouble with OP_UNION)
3688 if (i->isPredicated())
3689 return false;
3690
3691 if (!old->isResultEqual(i))
3692 return false;
3693
3694 for (int d = 0; old->defExists(d); ++d)
3695 old->def(d).replace(i->getDef(d), false);
3696 delete_Instruction(prog, old);
3697 *ptr = NULL;
3698 return true;
3699 }
3700
3701 bool
3702 LocalCSE::visit(BasicBlock *bb)
3703 {
3704 unsigned int replaced;
3705
3706 do {
3707 Instruction *ir, *next;
3708
3709 replaced = 0;
3710
3711 // will need to know the order of instructions
3712 int serial = 0;
3713 for (ir = bb->getFirst(); ir; ir = ir->next)
3714 ir->serial = serial++;
3715
3716 for (ir = bb->getFirst(); ir; ir = next) {
3717 int s;
3718 Value *src = NULL;
3719
3720 next = ir->next;
3721
3722 if (ir->fixed) {
3723 ops[ir->op].insert(ir);
3724 continue;
3725 }
3726
3727 for (s = 0; ir->srcExists(s); ++s)
3728 if (ir->getSrc(s)->asLValue())
3729 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3730 src = ir->getSrc(s);
3731
3732 if (src) {
3733 for (Value::UseIterator it = src->uses.begin();
3734 it != src->uses.end(); ++it) {
3735 Instruction *ik = (*it)->getInsn();
3736 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3737 if (tryReplace(&ir, ik))
3738 break;
3739 }
3740 } else {
3741 DLLIST_FOR_EACH(&ops[ir->op], iter)
3742 {
3743 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3744 if (tryReplace(&ir, ik))
3745 break;
3746 }
3747 }
3748
3749 if (ir)
3750 ops[ir->op].insert(ir);
3751 else
3752 ++replaced;
3753 }
3754 for (unsigned int i = 0; i <= OP_LAST; ++i)
3755 ops[i].clear();
3756
3757 } while (replaced);
3758
3759 return true;
3760 }
3761
3762 // =============================================================================
3763
3764 // Remove computations of unused values.
3765 class DeadCodeElim : public Pass
3766 {
3767 public:
3768 bool buryAll(Program *);
3769
3770 private:
3771 virtual bool visit(BasicBlock *);
3772
3773 void checkSplitLoad(Instruction *ld); // for partially dead loads
3774
3775 unsigned int deadCount;
3776 };
3777
3778 bool
3779 DeadCodeElim::buryAll(Program *prog)
3780 {
3781 do {
3782 deadCount = 0;
3783 if (!this->run(prog, false, false))
3784 return false;
3785 } while (deadCount);
3786
3787 return true;
3788 }
3789
3790 bool
3791 DeadCodeElim::visit(BasicBlock *bb)
3792 {
3793 Instruction *prev;
3794
3795 for (Instruction *i = bb->getExit(); i; i = prev) {
3796 prev = i->prev;
3797 if (i->isDead()) {
3798 ++deadCount;
3799 delete_Instruction(prog, i);
3800 } else
3801 if (i->defExists(1) &&
3802 i->subOp == 0 &&
3803 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3804 checkSplitLoad(i);
3805 } else
3806 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3807 if (i->op == OP_ATOM ||
3808 i->op == OP_SUREDP ||
3809 i->op == OP_SUREDB) {
3810 i->setDef(0, NULL);
3811 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3812 i->cache = CACHE_CV;
3813 i->op = OP_STORE;
3814 i->subOp = 0;
3815 }
3816 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3817 i->setDef(0, i->getDef(1));
3818 i->setDef(1, NULL);
3819 }
3820 }
3821 }
3822 return true;
3823 }
3824
3825 // Each load can go into up to 4 destinations, any of which might potentially
3826 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3827 // of where the holes are. We find the first contiguous region, put it into
3828 // the first load, and then put the second contiguous region into the second
3829 // load. There can be at most 2 contiguous regions.
3830 //
3831 // Note that there are some restrictions, for example it's not possible to do
3832 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3833 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3834 // split into a 64-bit and 32-bit load.
3835 void
3836 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3837 {
3838 Instruction *ld2 = NULL; // can get at most 2 loads
3839 Value *def1[4];
3840 Value *def2[4];
3841 int32_t addr1, addr2;
3842 int32_t size1, size2;
3843 int d, n1, n2;
3844 uint32_t mask = 0xffffffff;
3845
3846 for (d = 0; ld1->defExists(d); ++d)
3847 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3848 mask &= ~(1 << d);
3849 if (mask == 0xffffffff)
3850 return;
3851
3852 addr1 = ld1->getSrc(0)->reg.data.offset;
3853 n1 = n2 = 0;
3854 size1 = size2 = 0;
3855
3856 // Compute address/width for first load
3857 for (d = 0; ld1->defExists(d); ++d) {
3858 if (mask & (1 << d)) {
3859 if (size1 && (addr1 & 0x7))
3860 break;
3861 def1[n1] = ld1->getDef(d);
3862 size1 += def1[n1++]->reg.size;
3863 } else
3864 if (!n1) {
3865 addr1 += ld1->getDef(d)->reg.size;
3866 } else {
3867 break;
3868 }
3869 }
3870
3871 // Scale back the size of the first load until it can be loaded. This
3872 // typically happens for TYPE_B96 loads.
3873 while (n1 &&
3874 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3875 typeOfSize(size1))) {
3876 size1 -= def1[--n1]->reg.size;
3877 d--;
3878 }
3879
3880 // Compute address/width for second load
3881 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3882 if (mask & (1 << d)) {
3883 assert(!size2 || !(addr2 & 0x7));
3884 def2[n2] = ld1->getDef(d);
3885 size2 += def2[n2++]->reg.size;
3886 } else if (!n2) {
3887 assert(!n2);
3888 addr2 += ld1->getDef(d)->reg.size;
3889 } else {
3890 break;
3891 }
3892 }
3893
3894 // Make sure that we've processed all the values
3895 for (; ld1->defExists(d); ++d)
3896 assert(!(mask & (1 << d)));
3897
3898 updateLdStOffset(ld1, addr1, func);
3899 ld1->setType(typeOfSize(size1));
3900 for (d = 0; d < 4; ++d)
3901 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3902
3903 if (!n2)
3904 return;
3905
3906 ld2 = cloneShallow(func, ld1);
3907 updateLdStOffset(ld2, addr2, func);
3908 ld2->setType(typeOfSize(size2));
3909 for (d = 0; d < 4; ++d)
3910 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3911
3912 ld1->bb->insertAfter(ld1, ld2);
3913 }
3914
3915 // =============================================================================
3916
3917 #define RUN_PASS(l, n, f) \
3918 if (level >= (l)) { \
3919 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3920 INFO("PEEPHOLE: %s\n", #n); \
3921 n pass; \
3922 if (!pass.f(this)) \
3923 return false; \
3924 }
3925
3926 bool
3927 Program::optimizeSSA(int level)
3928 {
3929 RUN_PASS(1, DeadCodeElim, buryAll);
3930 RUN_PASS(1, CopyPropagation, run);
3931 RUN_PASS(1, MergeSplits, run);
3932 RUN_PASS(2, GlobalCSE, run);
3933 RUN_PASS(1, LocalCSE, run);
3934 RUN_PASS(2, AlgebraicOpt, run);
3935 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
3936 RUN_PASS(1, ConstantFolding, foldAll);
3937 RUN_PASS(1, Split64BitOpPreRA, run);
3938 RUN_PASS(2, LateAlgebraicOpt, run);
3939 RUN_PASS(1, LoadPropagation, run);
3940 RUN_PASS(1, IndirectPropagation, run);
3941 RUN_PASS(2, MemoryOpt, run);
3942 RUN_PASS(2, LocalCSE, run);
3943 RUN_PASS(0, DeadCodeElim, buryAll);
3944
3945 return true;
3946 }
3947
3948 bool
3949 Program::optimizePostRA(int level)
3950 {
3951 RUN_PASS(2, FlatteningPass, run);
3952 RUN_PASS(2, PostRaLoadPropagation, run);
3953
3954 return true;
3955 }
3956
3957 }