nv50/ir: fold fma/mad when all 3 args are immediates
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174 return ld->src(0).getFile() == FILE_IMMEDIATE;
175 }
176
177 bool
178 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
179 {
180 return ld &&
181 (ld->op == OP_VFETCH ||
182 (ld->op == OP_LOAD &&
183 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
184 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
185 }
186
187 void
188 LoadPropagation::checkSwapSrc01(Instruction *insn)
189 {
190 if (!prog->getTarget()->getOpInfo(insn).commutative)
191 if (insn->op != OP_SET && insn->op != OP_SLCT)
192 return;
193 if (insn->src(1).getFile() != FILE_GPR)
194 return;
195
196 Instruction *i0 = insn->getSrc(0)->getInsn();
197 Instruction *i1 = insn->getSrc(1)->getInsn();
198
199 if (isCSpaceLoad(i0)) {
200 if (!isCSpaceLoad(i1))
201 insn->swapSources(0, 1);
202 else
203 return;
204 } else
205 if (isImmdLoad(i0)) {
206 if (!isCSpaceLoad(i1) && !isImmdLoad(i1))
207 insn->swapSources(0, 1);
208 else
209 return;
210 } else
211 if (isAttribOrSharedLoad(i1)) {
212 if (!isAttribOrSharedLoad(i0))
213 insn->swapSources(0, 1);
214 else
215 return;
216 } else {
217 return;
218 }
219
220 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
221 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
222 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
223 else
224 if (insn->op == OP_SLCT)
225 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
226 }
227
228 bool
229 LoadPropagation::visit(BasicBlock *bb)
230 {
231 const Target *targ = prog->getTarget();
232 Instruction *next;
233
234 for (Instruction *i = bb->getEntry(); i; i = next) {
235 next = i->next;
236
237 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
238 continue;
239
240 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
241 continue;
242
243 if (i->srcExists(1))
244 checkSwapSrc01(i);
245
246 for (int s = 0; i->srcExists(s); ++s) {
247 Instruction *ld = i->getSrc(s)->getInsn();
248
249 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
250 continue;
251 if (!targ->insnCanLoad(i, s, ld))
252 continue;
253
254 // propagate !
255 i->setSrc(s, ld->getSrc(0));
256 if (ld->src(0).isIndirect(0))
257 i->setIndirect(s, 0, ld->getIndirect(0, 0));
258
259 if (ld->getDef(0)->refCount() == 0)
260 delete_Instruction(prog, ld);
261 }
262 }
263 return true;
264 }
265
266 // =============================================================================
267
268 // Evaluate constant expressions.
269 class ConstantFolding : public Pass
270 {
271 public:
272 bool foldAll(Program *);
273
274 private:
275 virtual bool visit(BasicBlock *);
276
277 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
278 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
279 void opnd(Instruction *, ImmediateValue&, int s);
280
281 void unary(Instruction *, const ImmediateValue&);
282
283 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
284
285 CmpInstruction *findOriginForTestWithZero(Value *);
286
287 unsigned int foldCount;
288
289 BuildUtil bld;
290 };
291
292 // TODO: remember generated immediates and only revisit these
293 bool
294 ConstantFolding::foldAll(Program *prog)
295 {
296 unsigned int iterCount = 0;
297 do {
298 foldCount = 0;
299 if (!run(prog))
300 return false;
301 } while (foldCount && ++iterCount < 2);
302 return true;
303 }
304
305 bool
306 ConstantFolding::visit(BasicBlock *bb)
307 {
308 Instruction *i, *next;
309
310 for (i = bb->getEntry(); i; i = next) {
311 next = i->next;
312 if (i->op == OP_MOV || i->op == OP_CALL)
313 continue;
314
315 ImmediateValue src0, src1, src2;
316
317 if (i->srcExists(2) &&
318 i->src(0).getImmediate(src0) &&
319 i->src(1).getImmediate(src1) &&
320 i->src(2).getImmediate(src2))
321 expr(i, src0, src1, src2);
322 else
323 if (i->srcExists(1) &&
324 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
325 expr(i, src0, src1);
326 else
327 if (i->srcExists(0) && i->src(0).getImmediate(src0))
328 opnd(i, src0, 0);
329 else
330 if (i->srcExists(1) && i->src(1).getImmediate(src1))
331 opnd(i, src1, 1);
332 }
333 return true;
334 }
335
336 CmpInstruction *
337 ConstantFolding::findOriginForTestWithZero(Value *value)
338 {
339 if (!value)
340 return NULL;
341 Instruction *insn = value->getInsn();
342
343 if (insn->asCmp() && insn->op != OP_SLCT)
344 return insn->asCmp();
345
346 /* Sometimes mov's will sneak in as a result of other folding. This gets
347 * cleaned up later.
348 */
349 if (insn->op == OP_MOV)
350 return findOriginForTestWithZero(insn->getSrc(0));
351
352 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
353 if (insn->op == OP_AND) {
354 int s = 0;
355 ImmediateValue imm;
356 if (!insn->src(s).getImmediate(imm)) {
357 s = 1;
358 if (!insn->src(s).getImmediate(imm))
359 return NULL;
360 }
361 if (imm.reg.data.f32 != 1.0f)
362 return NULL;
363 /* TODO: Come up with a way to handle the condition being inverted */
364 if (insn->src(!s).mod != Modifier(0))
365 return NULL;
366 return findOriginForTestWithZero(insn->getSrc(!s));
367 }
368
369 return NULL;
370 }
371
372 void
373 Modifier::applyTo(ImmediateValue& imm) const
374 {
375 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
376 return;
377 switch (imm.reg.type) {
378 case TYPE_F32:
379 if (bits & NV50_IR_MOD_ABS)
380 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
381 if (bits & NV50_IR_MOD_NEG)
382 imm.reg.data.f32 = -imm.reg.data.f32;
383 if (bits & NV50_IR_MOD_SAT) {
384 if (imm.reg.data.f32 < 0.0f)
385 imm.reg.data.f32 = 0.0f;
386 else
387 if (imm.reg.data.f32 > 1.0f)
388 imm.reg.data.f32 = 1.0f;
389 }
390 assert(!(bits & NV50_IR_MOD_NOT));
391 break;
392
393 case TYPE_S8: // NOTE: will be extended
394 case TYPE_S16:
395 case TYPE_S32:
396 case TYPE_U8: // NOTE: treated as signed
397 case TYPE_U16:
398 case TYPE_U32:
399 if (bits & NV50_IR_MOD_ABS)
400 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
401 imm.reg.data.s32 : -imm.reg.data.s32;
402 if (bits & NV50_IR_MOD_NEG)
403 imm.reg.data.s32 = -imm.reg.data.s32;
404 if (bits & NV50_IR_MOD_NOT)
405 imm.reg.data.s32 = ~imm.reg.data.s32;
406 break;
407
408 case TYPE_F64:
409 if (bits & NV50_IR_MOD_ABS)
410 imm.reg.data.f64 = fabs(imm.reg.data.f64);
411 if (bits & NV50_IR_MOD_NEG)
412 imm.reg.data.f64 = -imm.reg.data.f64;
413 if (bits & NV50_IR_MOD_SAT) {
414 if (imm.reg.data.f64 < 0.0)
415 imm.reg.data.f64 = 0.0;
416 else
417 if (imm.reg.data.f64 > 1.0)
418 imm.reg.data.f64 = 1.0;
419 }
420 assert(!(bits & NV50_IR_MOD_NOT));
421 break;
422
423 default:
424 assert(!"invalid/unhandled type");
425 imm.reg.data.u64 = 0;
426 break;
427 }
428 }
429
430 operation
431 Modifier::getOp() const
432 {
433 switch (bits) {
434 case NV50_IR_MOD_ABS: return OP_ABS;
435 case NV50_IR_MOD_NEG: return OP_NEG;
436 case NV50_IR_MOD_SAT: return OP_SAT;
437 case NV50_IR_MOD_NOT: return OP_NOT;
438 case 0:
439 return OP_MOV;
440 default:
441 return OP_CVT;
442 }
443 }
444
445 void
446 ConstantFolding::expr(Instruction *i,
447 ImmediateValue &imm0, ImmediateValue &imm1)
448 {
449 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
450 struct Storage res;
451 DataType type = i->dType;
452
453 memset(&res.data, 0, sizeof(res.data));
454
455 switch (i->op) {
456 case OP_MAD:
457 case OP_FMA:
458 case OP_MUL:
459 if (i->dnz && i->dType == TYPE_F32) {
460 if (!isfinite(a->data.f32))
461 a->data.f32 = 0.0f;
462 if (!isfinite(b->data.f32))
463 b->data.f32 = 0.0f;
464 }
465 switch (i->dType) {
466 case TYPE_F32:
467 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
468 break;
469 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
470 case TYPE_S32:
471 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
472 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
473 break;
474 }
475 /* fallthrough */
476 case TYPE_U32:
477 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
478 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
479 break;
480 }
481 res.data.u32 = a->data.u32 * b->data.u32; break;
482 default:
483 return;
484 }
485 break;
486 case OP_DIV:
487 if (b->data.u32 == 0)
488 break;
489 switch (i->dType) {
490 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
491 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
492 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
493 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
494 default:
495 return;
496 }
497 break;
498 case OP_ADD:
499 switch (i->dType) {
500 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
501 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
502 case TYPE_S32:
503 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
504 default:
505 return;
506 }
507 break;
508 case OP_POW:
509 switch (i->dType) {
510 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
511 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
512 default:
513 return;
514 }
515 break;
516 case OP_MAX:
517 switch (i->dType) {
518 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
519 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
520 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
521 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
522 default:
523 return;
524 }
525 break;
526 case OP_MIN:
527 switch (i->dType) {
528 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
529 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
530 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
531 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
532 default:
533 return;
534 }
535 break;
536 case OP_AND:
537 res.data.u64 = a->data.u64 & b->data.u64;
538 break;
539 case OP_OR:
540 res.data.u64 = a->data.u64 | b->data.u64;
541 break;
542 case OP_XOR:
543 res.data.u64 = a->data.u64 ^ b->data.u64;
544 break;
545 case OP_SHL:
546 res.data.u32 = a->data.u32 << b->data.u32;
547 break;
548 case OP_SHR:
549 switch (i->dType) {
550 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
551 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
552 default:
553 return;
554 }
555 break;
556 case OP_SLCT:
557 if (a->data.u32 != b->data.u32)
558 return;
559 res.data.u32 = a->data.u32;
560 break;
561 case OP_EXTBF: {
562 int offset = b->data.u32 & 0xff;
563 int width = (b->data.u32 >> 8) & 0xff;
564 int rshift = offset;
565 int lshift = 0;
566 if (width == 0) {
567 res.data.u32 = 0;
568 break;
569 }
570 if (width + offset < 32) {
571 rshift = 32 - width;
572 lshift = 32 - width - offset;
573 }
574 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
575 res.data.u32 = util_bitreverse(a->data.u32);
576 else
577 res.data.u32 = a->data.u32;
578 switch (i->dType) {
579 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
580 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
581 default:
582 return;
583 }
584 break;
585 }
586 case OP_POPCNT:
587 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
588 break;
589 case OP_PFETCH:
590 // The two arguments to pfetch are logically added together. Normally
591 // the second argument will not be constant, but that can happen.
592 res.data.u32 = a->data.u32 + b->data.u32;
593 type = TYPE_U32;
594 break;
595 case OP_MERGE:
596 switch (i->dType) {
597 case TYPE_U64:
598 case TYPE_S64:
599 case TYPE_F64:
600 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
601 break;
602 default:
603 return;
604 }
605 break;
606 default:
607 return;
608 }
609 ++foldCount;
610
611 i->src(0).mod = Modifier(0);
612 i->src(1).mod = Modifier(0);
613 i->postFactor = 0;
614
615 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
616 i->setSrc(1, NULL);
617
618 i->getSrc(0)->reg.data = res.data;
619 i->getSrc(0)->reg.type = type;
620 i->getSrc(0)->reg.size = typeSizeof(type);
621
622 switch (i->op) {
623 case OP_MAD:
624 case OP_FMA: {
625 i->op = OP_ADD;
626
627 /* Move the immediate to the second arg, otherwise the ADD operation
628 * won't be emittable
629 */
630 i->setSrc(1, i->getSrc(0));
631 i->setSrc(0, i->getSrc(2));
632 i->src(0).mod = i->src(2).mod;
633 i->setSrc(2, NULL);
634
635 ImmediateValue src0;
636 if (i->src(0).getImmediate(src0))
637 expr(i, src0, *i->getSrc(1)->asImm());
638 if (i->saturate && !prog->getTarget()->isSatSupported(i)) {
639 bld.setPosition(i, false);
640 i->setSrc(1, bld.loadImm(NULL, res.data.u32));
641 }
642 break;
643 }
644 case OP_PFETCH:
645 // Leave PFETCH alone... we just folded its 2 args into 1.
646 break;
647 default:
648 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
649 break;
650 }
651 i->subOp = 0;
652 }
653
654 void
655 ConstantFolding::expr(Instruction *i,
656 ImmediateValue &imm0,
657 ImmediateValue &imm1,
658 ImmediateValue &imm2)
659 {
660 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
661 struct Storage res;
662
663 memset(&res.data, 0, sizeof(res.data));
664
665 switch (i->op) {
666 case OP_INSBF: {
667 int offset = b->data.u32 & 0xff;
668 int width = (b->data.u32 >> 8) & 0xff;
669 unsigned bitmask = ((1 << width) - 1) << offset;
670 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
671 break;
672 }
673 case OP_MAD:
674 case OP_FMA: {
675 switch (i->dType) {
676 case TYPE_F32:
677 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
678 c->data.f32;
679 break;
680 case TYPE_F64:
681 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
682 break;
683 case TYPE_S32:
684 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
685 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
686 break;
687 }
688 /* fallthrough */
689 case TYPE_U32:
690 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
691 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
692 break;
693 }
694 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
695 break;
696 default:
697 return;
698 }
699 break;
700 }
701 default:
702 return;
703 }
704
705 ++foldCount;
706 i->src(0).mod = Modifier(0);
707 i->src(1).mod = Modifier(0);
708 i->src(2).mod = Modifier(0);
709
710 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
711 i->setSrc(1, NULL);
712 i->setSrc(2, NULL);
713
714 i->getSrc(0)->reg.data = res.data;
715 i->getSrc(0)->reg.type = i->dType;
716 i->getSrc(0)->reg.size = typeSizeof(i->dType);
717
718 i->op = OP_MOV;
719 }
720
721 void
722 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
723 {
724 Storage res;
725
726 if (i->dType != TYPE_F32)
727 return;
728 switch (i->op) {
729 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
730 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
731 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
732 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
733 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
734 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
735 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
736 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
737 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
738 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
739 case OP_PRESIN:
740 case OP_PREEX2:
741 // these should be handled in subsequent OP_SIN/COS/EX2
742 res.data.f32 = imm.reg.data.f32;
743 break;
744 default:
745 return;
746 }
747 i->op = OP_MOV;
748 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
749 i->src(0).mod = Modifier(0);
750 }
751
752 void
753 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
754 const int s, ImmediateValue& imm2)
755 {
756 const int t = s ? 0 : 1;
757 Instruction *insn;
758 Instruction *mul1 = NULL; // mul1 before mul2
759 int e = 0;
760 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
761 ImmediateValue imm1;
762
763 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
764
765 if (mul2->getSrc(t)->refCount() == 1) {
766 insn = mul2->getSrc(t)->getInsn();
767 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
768 mul1 = insn;
769 if (mul1 && !mul1->saturate) {
770 int s1;
771
772 if (mul1->src(s1 = 0).getImmediate(imm1) ||
773 mul1->src(s1 = 1).getImmediate(imm1)) {
774 bld.setPosition(mul1, false);
775 // a = mul r, imm1
776 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
777 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
778 mul1->src(s1).mod = Modifier(0);
779 mul2->def(0).replace(mul1->getDef(0), false);
780 mul1->saturate = mul2->saturate;
781 } else
782 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
783 // c = mul a, b
784 // d = mul c, imm -> d = mul_x_imm a, b
785 mul1->postFactor = e;
786 mul2->def(0).replace(mul1->getDef(0), false);
787 if (f < 0)
788 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
789 mul1->saturate = mul2->saturate;
790 }
791 return;
792 }
793 }
794 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
795 // b = mul a, imm
796 // d = mul b, c -> d = mul_x_imm a, c
797 int s2, t2;
798 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
799 if (!insn)
800 return;
801 mul1 = mul2;
802 mul2 = NULL;
803 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
804 t2 = s2 ? 0 : 1;
805 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
806 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
807 mul2 = insn;
808 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
809 mul2->postFactor = e;
810 mul2->setSrc(s2, mul1->src(t));
811 if (f < 0)
812 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
813 }
814 }
815 }
816
817 void
818 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
819 {
820 const int t = !s;
821 const operation op = i->op;
822 Instruction *newi = i;
823
824 switch (i->op) {
825 case OP_MUL:
826 if (i->dType == TYPE_F32)
827 tryCollapseChainedMULs(i, s, imm0);
828
829 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
830 assert(!isFloatType(i->sType));
831 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
832 bld.setPosition(i, false);
833 // Need to set to the sign value, which is a compare.
834 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
835 TYPE_S32, i->getSrc(t), bld.mkImm(0));
836 delete_Instruction(prog, i);
837 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
838 // The high bits can't be set in this case (either mul by 0 or
839 // unsigned by 1)
840 i->op = OP_MOV;
841 i->subOp = 0;
842 i->setSrc(0, new_ImmediateValue(prog, 0u));
843 i->src(0).mod = Modifier(0);
844 i->setSrc(1, NULL);
845 } else if (!imm0.isNegative() && imm0.isPow2()) {
846 // Translate into a shift
847 imm0.applyLog2();
848 i->op = OP_SHR;
849 i->subOp = 0;
850 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
851 i->setSrc(0, i->getSrc(t));
852 i->src(0).mod = i->src(t).mod;
853 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
854 i->src(1).mod = 0;
855 }
856 } else
857 if (imm0.isInteger(0)) {
858 i->op = OP_MOV;
859 i->setSrc(0, new_ImmediateValue(prog, 0u));
860 i->src(0).mod = Modifier(0);
861 i->postFactor = 0;
862 i->setSrc(1, NULL);
863 } else
864 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
865 if (imm0.isNegative())
866 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
867 i->op = i->src(t).mod.getOp();
868 if (s == 0) {
869 i->setSrc(0, i->getSrc(1));
870 i->src(0).mod = i->src(1).mod;
871 i->src(1).mod = 0;
872 }
873 if (i->op != OP_CVT)
874 i->src(0).mod = 0;
875 i->setSrc(1, NULL);
876 } else
877 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
878 if (imm0.isNegative())
879 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
880 i->op = OP_ADD;
881 i->setSrc(s, i->getSrc(t));
882 i->src(s).mod = i->src(t).mod;
883 } else
884 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
885 i->op = OP_SHL;
886 imm0.applyLog2();
887 i->setSrc(0, i->getSrc(t));
888 i->src(0).mod = i->src(t).mod;
889 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
890 i->src(1).mod = 0;
891 } else
892 if (i->postFactor && i->sType == TYPE_F32) {
893 /* Can't emit a postfactor with an immediate, have to fold it in */
894 i->setSrc(s, new_ImmediateValue(
895 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
896 i->postFactor = 0;
897 }
898 break;
899 case OP_MAD:
900 if (imm0.isInteger(0)) {
901 i->setSrc(0, i->getSrc(2));
902 i->src(0).mod = i->src(2).mod;
903 i->setSrc(1, NULL);
904 i->setSrc(2, NULL);
905 i->op = i->src(0).mod.getOp();
906 if (i->op != OP_CVT)
907 i->src(0).mod = 0;
908 } else
909 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
910 if (imm0.isNegative())
911 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
912 if (s == 0) {
913 i->setSrc(0, i->getSrc(1));
914 i->src(0).mod = i->src(1).mod;
915 }
916 i->setSrc(1, i->getSrc(2));
917 i->src(1).mod = i->src(2).mod;
918 i->setSrc(2, NULL);
919 i->op = OP_ADD;
920 }
921 break;
922 case OP_ADD:
923 if (i->usesFlags())
924 break;
925 if (imm0.isInteger(0)) {
926 if (s == 0) {
927 i->setSrc(0, i->getSrc(1));
928 i->src(0).mod = i->src(1).mod;
929 }
930 i->setSrc(1, NULL);
931 i->op = i->src(0).mod.getOp();
932 if (i->op != OP_CVT)
933 i->src(0).mod = Modifier(0);
934 }
935 break;
936
937 case OP_DIV:
938 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
939 break;
940 bld.setPosition(i, false);
941 if (imm0.reg.data.u32 == 0) {
942 break;
943 } else
944 if (imm0.reg.data.u32 == 1) {
945 i->op = OP_MOV;
946 i->setSrc(1, NULL);
947 } else
948 if (i->dType == TYPE_U32 && imm0.isPow2()) {
949 i->op = OP_SHR;
950 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
951 } else
952 if (i->dType == TYPE_U32) {
953 Instruction *mul;
954 Value *tA, *tB;
955 const uint32_t d = imm0.reg.data.u32;
956 uint32_t m;
957 int r, s;
958 uint32_t l = util_logbase2(d);
959 if (((uint32_t)1 << l) < d)
960 ++l;
961 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
962 r = l ? 1 : 0;
963 s = l ? (l - 1) : 0;
964
965 tA = bld.getSSA();
966 tB = bld.getSSA();
967 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
968 bld.loadImm(NULL, m));
969 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
970 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
971 tA = bld.getSSA();
972 if (r)
973 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
974 else
975 tA = tB;
976 tB = s ? bld.getSSA() : i->getDef(0);
977 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
978 if (s)
979 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
980
981 delete_Instruction(prog, i);
982 } else
983 if (imm0.reg.data.s32 == -1) {
984 i->op = OP_NEG;
985 i->setSrc(1, NULL);
986 } else {
987 LValue *tA, *tB;
988 LValue *tD;
989 const int32_t d = imm0.reg.data.s32;
990 int32_t m;
991 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
992 if ((1 << l) < abs(d))
993 ++l;
994 if (!l)
995 l = 1;
996 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
997
998 tA = bld.getSSA();
999 tB = bld.getSSA();
1000 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1001 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1002 if (l > 1)
1003 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1004 else
1005 tB = tA;
1006 tA = bld.getSSA();
1007 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1008 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1009 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1010 if (d < 0)
1011 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1012
1013 delete_Instruction(prog, i);
1014 }
1015 break;
1016
1017 case OP_MOD:
1018 if (i->sType == TYPE_U32 && imm0.isPow2()) {
1019 bld.setPosition(i, false);
1020 i->op = OP_AND;
1021 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1022 }
1023 break;
1024
1025 case OP_SET: // TODO: SET_AND,OR,XOR
1026 {
1027 /* This optimizes the case where the output of a set is being compared
1028 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1029 * can be a lot cleverer in our comparison.
1030 */
1031 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1032 CondCode cc, ccZ;
1033 if (imm0.reg.data.u32 != 0 || !si)
1034 return;
1035 cc = si->setCond;
1036 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1037 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1038 // first.
1039 if (s == 0)
1040 ccZ = reverseCondCode(ccZ);
1041 // If there is a negative modifier, we need to undo that, by flipping
1042 // the comparison to zero.
1043 if (i->src(t).mod.neg())
1044 ccZ = reverseCondCode(ccZ);
1045 // If this is a signed comparison, we expect the input to be a regular
1046 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1047 // is positive, so just flip the sign.
1048 if (i->sType == TYPE_S32) {
1049 assert(!isFloatType(si->dType));
1050 ccZ = reverseCondCode(ccZ);
1051 }
1052 switch (ccZ) {
1053 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1054 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1055 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1056 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1057 case CC_GT: break; // bool > 0 -- bool
1058 case CC_NE: break; // bool != 0 -- bool
1059 default:
1060 return;
1061 }
1062
1063 // Update the condition of this SET to be identical to the origin set,
1064 // but with the updated condition code. The original SET should get
1065 // DCE'd, ideally.
1066 i->op = si->op;
1067 i->asCmp()->setCond = cc;
1068 i->setSrc(0, si->src(0));
1069 i->setSrc(1, si->src(1));
1070 if (si->srcExists(2))
1071 i->setSrc(2, si->src(2));
1072 i->sType = si->sType;
1073 }
1074 break;
1075
1076 case OP_AND:
1077 {
1078 Instruction *src = i->getSrc(t)->getInsn();
1079 ImmediateValue imm1;
1080 if (imm0.reg.data.u32 == 0) {
1081 i->op = OP_MOV;
1082 i->setSrc(0, new_ImmediateValue(prog, 0u));
1083 i->src(0).mod = Modifier(0);
1084 i->setSrc(1, NULL);
1085 } else if (imm0.reg.data.u32 == ~0U) {
1086 i->op = i->src(t).mod.getOp();
1087 if (t) {
1088 i->setSrc(0, i->getSrc(t));
1089 i->src(0).mod = i->src(t).mod;
1090 }
1091 i->setSrc(1, NULL);
1092 } else if (src->asCmp()) {
1093 CmpInstruction *cmp = src->asCmp();
1094 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1095 return;
1096 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1097 return;
1098 if (imm0.reg.data.f32 != 1.0)
1099 return;
1100 if (cmp->dType != TYPE_U32)
1101 return;
1102
1103 cmp->dType = TYPE_F32;
1104 if (i->src(t).mod != Modifier(0)) {
1105 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1106 i->src(t).mod = Modifier(0);
1107 cmp->setCond = inverseCondCode(cmp->setCond);
1108 }
1109 i->op = OP_MOV;
1110 i->setSrc(s, NULL);
1111 if (t) {
1112 i->setSrc(0, i->getSrc(t));
1113 i->setSrc(t, NULL);
1114 }
1115 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1116 src->op == OP_SHR &&
1117 src->src(1).getImmediate(imm1) &&
1118 i->src(t).mod == Modifier(0) &&
1119 util_is_power_of_two(imm0.reg.data.u32 + 1)) {
1120 // low byte = offset, high byte = width
1121 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1122 i->op = OP_EXTBF;
1123 i->setSrc(0, src->getSrc(0));
1124 i->setSrc(1, new_ImmediateValue(prog, ext));
1125 }
1126 }
1127 break;
1128
1129 case OP_SHL:
1130 {
1131 if (s != 1 || i->src(0).mod != Modifier(0))
1132 break;
1133 // try to concatenate shifts
1134 Instruction *si = i->getSrc(0)->getInsn();
1135 if (!si || si->op != OP_SHL)
1136 break;
1137 ImmediateValue imm1;
1138 if (si->src(1).getImmediate(imm1)) {
1139 bld.setPosition(i, false);
1140 i->setSrc(0, si->getSrc(0));
1141 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1142 }
1143 }
1144 break;
1145
1146 case OP_ABS:
1147 case OP_NEG:
1148 case OP_SAT:
1149 case OP_LG2:
1150 case OP_RCP:
1151 case OP_SQRT:
1152 case OP_RSQ:
1153 case OP_PRESIN:
1154 case OP_SIN:
1155 case OP_COS:
1156 case OP_PREEX2:
1157 case OP_EX2:
1158 unary(i, imm0);
1159 break;
1160 case OP_BFIND: {
1161 int32_t res;
1162 switch (i->dType) {
1163 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1164 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1165 default:
1166 return;
1167 }
1168 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1169 res = 31 - res;
1170 bld.setPosition(i, false); /* make sure bld is init'ed */
1171 i->setSrc(0, bld.mkImm(res));
1172 i->setSrc(1, NULL);
1173 i->op = OP_MOV;
1174 i->subOp = 0;
1175 break;
1176 }
1177 case OP_POPCNT: {
1178 // Only deal with 1-arg POPCNT here
1179 if (i->srcExists(1))
1180 break;
1181 uint32_t res = util_bitcount(imm0.reg.data.u32);
1182 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1183 i->setSrc(1, NULL);
1184 i->op = OP_MOV;
1185 break;
1186 }
1187 case OP_CVT: {
1188 Storage res;
1189
1190 // TODO: handle 64-bit values properly
1191 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1192 return;
1193
1194 // TODO: handle single byte/word extractions
1195 if (i->subOp)
1196 return;
1197
1198 bld.setPosition(i, true); /* make sure bld is init'ed */
1199
1200 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1201 case type: \
1202 switch (i->sType) { \
1203 case TYPE_F64: \
1204 res.data.dst = util_iround(i->saturate ? \
1205 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1206 imm0.reg.data.f64); \
1207 break; \
1208 case TYPE_F32: \
1209 res.data.dst = util_iround(i->saturate ? \
1210 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1211 imm0.reg.data.f32); \
1212 break; \
1213 case TYPE_S32: \
1214 res.data.dst = i->saturate ? \
1215 CLAMP(imm0.reg.data.s32, imin, imax) : \
1216 imm0.reg.data.s32; \
1217 break; \
1218 case TYPE_U32: \
1219 res.data.dst = i->saturate ? \
1220 CLAMP(imm0.reg.data.u32, umin, umax) : \
1221 imm0.reg.data.u32; \
1222 break; \
1223 case TYPE_S16: \
1224 res.data.dst = i->saturate ? \
1225 CLAMP(imm0.reg.data.s16, imin, imax) : \
1226 imm0.reg.data.s16; \
1227 break; \
1228 case TYPE_U16: \
1229 res.data.dst = i->saturate ? \
1230 CLAMP(imm0.reg.data.u16, umin, umax) : \
1231 imm0.reg.data.u16; \
1232 break; \
1233 default: return; \
1234 } \
1235 i->setSrc(0, bld.mkImm(res.data.dst)); \
1236 break
1237
1238 switch(i->dType) {
1239 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1240 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1241 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1242 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1243 case TYPE_F32:
1244 switch (i->sType) {
1245 case TYPE_F64:
1246 res.data.f32 = i->saturate ?
1247 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1248 imm0.reg.data.f64;
1249 break;
1250 case TYPE_F32:
1251 res.data.f32 = i->saturate ?
1252 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1253 imm0.reg.data.f32;
1254 break;
1255 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1256 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1257 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1258 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1259 default:
1260 return;
1261 }
1262 i->setSrc(0, bld.mkImm(res.data.f32));
1263 break;
1264 case TYPE_F64:
1265 switch (i->sType) {
1266 case TYPE_F64:
1267 res.data.f64 = i->saturate ?
1268 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1269 imm0.reg.data.f64;
1270 break;
1271 case TYPE_F32:
1272 res.data.f64 = i->saturate ?
1273 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1274 imm0.reg.data.f32;
1275 break;
1276 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1277 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1278 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1279 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1280 default:
1281 return;
1282 }
1283 i->setSrc(0, bld.mkImm(res.data.f64));
1284 break;
1285 default:
1286 return;
1287 }
1288 #undef CASE
1289
1290 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1291 i->op = OP_MOV;
1292 i->saturate = 0;
1293 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1294 break;
1295 }
1296 default:
1297 return;
1298 }
1299 if (newi->op != op)
1300 foldCount++;
1301 }
1302
1303 // =============================================================================
1304
1305 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1306 class ModifierFolding : public Pass
1307 {
1308 private:
1309 virtual bool visit(BasicBlock *);
1310 };
1311
1312 bool
1313 ModifierFolding::visit(BasicBlock *bb)
1314 {
1315 const Target *target = prog->getTarget();
1316
1317 Instruction *i, *next, *mi;
1318 Modifier mod;
1319
1320 for (i = bb->getEntry(); i; i = next) {
1321 next = i->next;
1322
1323 if (0 && i->op == OP_SUB) {
1324 // turn "sub" into "add neg" (do we really want this ?)
1325 i->op = OP_ADD;
1326 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1327 }
1328
1329 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1330 mi = i->getSrc(s)->getInsn();
1331 if (!mi ||
1332 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1333 continue;
1334 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1335 if ((i->op != OP_ADD &&
1336 i->op != OP_MUL) ||
1337 (mi->op != OP_ABS &&
1338 mi->op != OP_NEG))
1339 continue;
1340 } else
1341 if (i->sType != mi->dType) {
1342 continue;
1343 }
1344 if ((mod = Modifier(mi->op)) == Modifier(0))
1345 continue;
1346 mod *= mi->src(0).mod;
1347
1348 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1349 // abs neg [abs] = abs
1350 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1351 } else
1352 if ((i->op == OP_NEG) && mod.neg()) {
1353 assert(s == 0);
1354 // neg as both opcode and modifier on same insn is prohibited
1355 // neg neg abs = abs, neg neg = identity
1356 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1357 i->op = mod.getOp();
1358 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1359 if (mod == Modifier(0))
1360 i->op = OP_MOV;
1361 }
1362
1363 if (target->isModSupported(i, s, mod)) {
1364 i->setSrc(s, mi->getSrc(0));
1365 i->src(s).mod *= mod;
1366 }
1367 }
1368
1369 if (i->op == OP_SAT) {
1370 mi = i->getSrc(0)->getInsn();
1371 if (mi &&
1372 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1373 mi->saturate = 1;
1374 mi->setDef(0, i->getDef(0));
1375 delete_Instruction(prog, i);
1376 }
1377 }
1378 }
1379
1380 return true;
1381 }
1382
1383 // =============================================================================
1384
1385 // MUL + ADD -> MAD/FMA
1386 // MIN/MAX(a, a) -> a, etc.
1387 // SLCT(a, b, const) -> cc(const) ? a : b
1388 // RCP(RCP(a)) -> a
1389 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1390 class AlgebraicOpt : public Pass
1391 {
1392 private:
1393 virtual bool visit(BasicBlock *);
1394
1395 void handleABS(Instruction *);
1396 bool handleADD(Instruction *);
1397 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1398 void handleMINMAX(Instruction *);
1399 void handleRCP(Instruction *);
1400 void handleSLCT(Instruction *);
1401 void handleLOGOP(Instruction *);
1402 void handleCVT_NEG(Instruction *);
1403 void handleCVT_EXTBF(Instruction *);
1404 void handleSUCLAMP(Instruction *);
1405
1406 BuildUtil bld;
1407 };
1408
1409 void
1410 AlgebraicOpt::handleABS(Instruction *abs)
1411 {
1412 Instruction *sub = abs->getSrc(0)->getInsn();
1413 DataType ty;
1414 if (!sub ||
1415 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1416 return;
1417 // expect not to have mods yet, if we do, bail
1418 if (sub->src(0).mod || sub->src(1).mod)
1419 return;
1420 // hidden conversion ?
1421 ty = intTypeToSigned(sub->dType);
1422 if (abs->dType != abs->sType || ty != abs->sType)
1423 return;
1424
1425 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1426 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1427 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1428 return;
1429
1430 Value *src0 = sub->getSrc(0);
1431 Value *src1 = sub->getSrc(1);
1432
1433 if (sub->op == OP_ADD) {
1434 Instruction *neg = sub->getSrc(1)->getInsn();
1435 if (neg && neg->op != OP_NEG) {
1436 neg = sub->getSrc(0)->getInsn();
1437 src0 = sub->getSrc(1);
1438 }
1439 if (!neg || neg->op != OP_NEG ||
1440 neg->dType != neg->sType || neg->sType != ty)
1441 return;
1442 src1 = neg->getSrc(0);
1443 }
1444
1445 // found ABS(SUB))
1446 abs->moveSources(1, 2); // move sources >=1 up by 2
1447 abs->op = OP_SAD;
1448 abs->setType(sub->dType);
1449 abs->setSrc(0, src0);
1450 abs->setSrc(1, src1);
1451 bld.setPosition(abs, false);
1452 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1453 }
1454
1455 bool
1456 AlgebraicOpt::handleADD(Instruction *add)
1457 {
1458 Value *src0 = add->getSrc(0);
1459 Value *src1 = add->getSrc(1);
1460
1461 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1462 return false;
1463
1464 bool changed = false;
1465 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1466 changed = tryADDToMADOrSAD(add, OP_MAD);
1467 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1468 changed = tryADDToMADOrSAD(add, OP_SAD);
1469 return changed;
1470 }
1471
1472 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1473 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1474 bool
1475 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1476 {
1477 Value *src0 = add->getSrc(0);
1478 Value *src1 = add->getSrc(1);
1479 Value *src;
1480 int s;
1481 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1482 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1483 Modifier mod[4];
1484
1485 if (src0->refCount() == 1 &&
1486 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1487 s = 0;
1488 else
1489 if (src1->refCount() == 1 &&
1490 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1491 s = 1;
1492 else
1493 return false;
1494
1495 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
1496 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
1497 return false;
1498
1499 src = add->getSrc(s);
1500
1501 if (src->getInsn()->postFactor)
1502 return false;
1503 if (toOp == OP_SAD) {
1504 ImmediateValue imm;
1505 if (!src->getInsn()->src(2).getImmediate(imm))
1506 return false;
1507 if (!imm.isInteger(0))
1508 return false;
1509 }
1510
1511 mod[0] = add->src(0).mod;
1512 mod[1] = add->src(1).mod;
1513 mod[2] = src->getUniqueInsn()->src(0).mod;
1514 mod[3] = src->getUniqueInsn()->src(1).mod;
1515
1516 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1517 return false;
1518
1519 add->op = toOp;
1520 add->subOp = src->getInsn()->subOp; // potentially mul-high
1521
1522 add->setSrc(2, add->src(s ? 0 : 1));
1523
1524 add->setSrc(0, src->getInsn()->getSrc(0));
1525 add->src(0).mod = mod[2] ^ mod[s];
1526 add->setSrc(1, src->getInsn()->getSrc(1));
1527 add->src(1).mod = mod[3];
1528
1529 return true;
1530 }
1531
1532 void
1533 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1534 {
1535 Value *src0 = minmax->getSrc(0);
1536 Value *src1 = minmax->getSrc(1);
1537
1538 if (src0 != src1 || src0->reg.file != FILE_GPR)
1539 return;
1540 if (minmax->src(0).mod == minmax->src(1).mod) {
1541 if (minmax->def(0).mayReplace(minmax->src(0))) {
1542 minmax->def(0).replace(minmax->src(0), false);
1543 minmax->bb->remove(minmax);
1544 } else {
1545 minmax->op = OP_CVT;
1546 minmax->setSrc(1, NULL);
1547 }
1548 } else {
1549 // TODO:
1550 // min(x, -x) = -abs(x)
1551 // min(x, -abs(x)) = -abs(x)
1552 // min(x, abs(x)) = x
1553 // max(x, -abs(x)) = x
1554 // max(x, abs(x)) = abs(x)
1555 // max(x, -x) = abs(x)
1556 }
1557 }
1558
1559 void
1560 AlgebraicOpt::handleRCP(Instruction *rcp)
1561 {
1562 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1563
1564 if (si && si->op == OP_RCP) {
1565 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1566 rcp->op = mod.getOp();
1567 rcp->setSrc(0, si->getSrc(0));
1568 }
1569 }
1570
1571 void
1572 AlgebraicOpt::handleSLCT(Instruction *slct)
1573 {
1574 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1575 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1576 slct->setSrc(0, slct->getSrc(1));
1577 } else
1578 if (slct->getSrc(0) != slct->getSrc(1)) {
1579 return;
1580 }
1581 slct->op = OP_MOV;
1582 slct->setSrc(1, NULL);
1583 slct->setSrc(2, NULL);
1584 }
1585
1586 void
1587 AlgebraicOpt::handleLOGOP(Instruction *logop)
1588 {
1589 Value *src0 = logop->getSrc(0);
1590 Value *src1 = logop->getSrc(1);
1591
1592 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1593 return;
1594
1595 if (src0 == src1) {
1596 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1597 logop->def(0).mayReplace(logop->src(0))) {
1598 logop->def(0).replace(logop->src(0), false);
1599 delete_Instruction(prog, logop);
1600 }
1601 } else {
1602 // try AND(SET, SET) -> SET_AND(SET)
1603 Instruction *set0 = src0->getInsn();
1604 Instruction *set1 = src1->getInsn();
1605
1606 if (!set0 || set0->fixed || !set1 || set1->fixed)
1607 return;
1608 if (set1->op != OP_SET) {
1609 Instruction *xchg = set0;
1610 set0 = set1;
1611 set1 = xchg;
1612 if (set1->op != OP_SET)
1613 return;
1614 }
1615 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1616 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1617 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1618 return;
1619 if (set0->op != OP_SET &&
1620 set0->op != OP_SET_AND &&
1621 set0->op != OP_SET_OR &&
1622 set0->op != OP_SET_XOR)
1623 return;
1624 if (set0->getDef(0)->refCount() > 1 &&
1625 set1->getDef(0)->refCount() > 1)
1626 return;
1627 if (set0->getPredicate() || set1->getPredicate())
1628 return;
1629 // check that they don't source each other
1630 for (int s = 0; s < 2; ++s)
1631 if (set0->getSrc(s) == set1->getDef(0) ||
1632 set1->getSrc(s) == set0->getDef(0))
1633 return;
1634
1635 set0 = cloneForward(func, set0);
1636 set1 = cloneShallow(func, set1);
1637 logop->bb->insertAfter(logop, set1);
1638 logop->bb->insertAfter(logop, set0);
1639
1640 set0->dType = TYPE_U8;
1641 set0->getDef(0)->reg.file = FILE_PREDICATE;
1642 set0->getDef(0)->reg.size = 1;
1643 set1->setSrc(2, set0->getDef(0));
1644 set1->op = redOp;
1645 set1->setDef(0, logop->getDef(0));
1646 delete_Instruction(prog, logop);
1647 }
1648 }
1649
1650 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1651 // nv50:
1652 // F2I(NEG(I2F(ABS(SET))))
1653 void
1654 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1655 {
1656 Instruction *insn = cvt->getSrc(0)->getInsn();
1657 if (cvt->sType != TYPE_F32 ||
1658 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1659 return;
1660 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1661 return;
1662 if (insn->src(0).mod != Modifier(0))
1663 return;
1664 insn = insn->getSrc(0)->getInsn();
1665
1666 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1667 if (insn && insn->op == OP_CVT &&
1668 insn->dType == TYPE_F32 &&
1669 insn->sType == TYPE_S32) {
1670 insn = insn->getSrc(0)->getInsn();
1671 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1672 insn->src(0).mod)
1673 return;
1674 insn = insn->getSrc(0)->getInsn();
1675 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1676 return;
1677 } else
1678 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1679 return;
1680 }
1681
1682 Instruction *bset = cloneShallow(func, insn);
1683 bset->dType = TYPE_U32;
1684 bset->setDef(0, cvt->getDef(0));
1685 cvt->bb->insertAfter(cvt, bset);
1686 delete_Instruction(prog, cvt);
1687 }
1688
1689 // Some shaders extract packed bytes out of words and convert them to
1690 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
1691 // nv50 for word sizes.
1692 //
1693 // CVT(EXTBF(x, byte/word))
1694 // CVT(AND(bytemask, x))
1695 // CVT(AND(bytemask, SHR(x, 8/16/24)))
1696 // CVT(SHR(x, 16/24))
1697 void
1698 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
1699 {
1700 Instruction *insn = cvt->getSrc(0)->getInsn();
1701 ImmediateValue imm;
1702 Value *arg = NULL;
1703 unsigned width, offset;
1704 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
1705 return;
1706 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
1707 width = (imm.reg.data.u32 >> 8) & 0xff;
1708 offset = imm.reg.data.u32 & 0xff;
1709 arg = insn->getSrc(0);
1710
1711 if (width != 8 && width != 16)
1712 return;
1713 if (width == 8 && offset & 0x7)
1714 return;
1715 if (width == 16 && offset & 0xf)
1716 return;
1717 } else if (insn->op == OP_AND) {
1718 int s;
1719 if (insn->src(0).getImmediate(imm))
1720 s = 0;
1721 else if (insn->src(1).getImmediate(imm))
1722 s = 1;
1723 else
1724 return;
1725
1726 if (imm.reg.data.u32 == 0xff)
1727 width = 8;
1728 else if (imm.reg.data.u32 == 0xffff)
1729 width = 16;
1730 else
1731 return;
1732
1733 arg = insn->getSrc(!s);
1734 Instruction *shift = arg->getInsn();
1735 offset = 0;
1736 if (shift && shift->op == OP_SHR &&
1737 shift->sType == cvt->sType &&
1738 shift->src(1).getImmediate(imm) &&
1739 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1740 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
1741 arg = shift->getSrc(0);
1742 offset = imm.reg.data.u32;
1743 }
1744 } else if (insn->op == OP_SHR &&
1745 insn->sType == cvt->sType &&
1746 insn->src(1).getImmediate(imm)) {
1747 arg = insn->getSrc(0);
1748 if (imm.reg.data.u32 == 24) {
1749 width = 8;
1750 offset = 24;
1751 } else if (imm.reg.data.u32 == 16) {
1752 width = 16;
1753 offset = 16;
1754 } else {
1755 return;
1756 }
1757 }
1758
1759 if (!arg)
1760 return;
1761
1762 // Irrespective of what came earlier, we can undo a shift on the argument
1763 // by adjusting the offset.
1764 Instruction *shift = arg->getInsn();
1765 if (shift && shift->op == OP_SHL &&
1766 shift->src(1).getImmediate(imm) &&
1767 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1768 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
1769 imm.reg.data.u32 <= offset) {
1770 arg = shift->getSrc(0);
1771 offset -= imm.reg.data.u32;
1772 }
1773
1774 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
1775 // annoying to detect them.
1776
1777 if (width == 8) {
1778 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
1779 } else {
1780 assert(width == 16);
1781 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
1782 }
1783 cvt->setSrc(0, arg);
1784 cvt->subOp = offset >> 3;
1785 }
1786
1787 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1788 void
1789 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1790 {
1791 ImmediateValue imm;
1792 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1793 int s;
1794 Instruction *add;
1795
1796 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1797
1798 // look for ADD (TODO: only count references by non-SUCLAMP)
1799 if (insn->getSrc(0)->refCount() > 1)
1800 return;
1801 add = insn->getSrc(0)->getInsn();
1802 if (!add || add->op != OP_ADD ||
1803 (add->dType != TYPE_U32 &&
1804 add->dType != TYPE_S32))
1805 return;
1806
1807 // look for immediate
1808 for (s = 0; s < 2; ++s)
1809 if (add->src(s).getImmediate(imm))
1810 break;
1811 if (s >= 2)
1812 return;
1813 s = s ? 0 : 1;
1814 // determine if immediate fits
1815 val += imm.reg.data.s32;
1816 if (val > 31 || val < -32)
1817 return;
1818 // determine if other addend fits
1819 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
1820 return;
1821
1822 bld.setPosition(insn, false); // make sure bld is init'ed
1823 // replace sources
1824 insn->setSrc(2, bld.mkImm(val));
1825 insn->setSrc(0, add->getSrc(s));
1826 }
1827
1828 bool
1829 AlgebraicOpt::visit(BasicBlock *bb)
1830 {
1831 Instruction *next;
1832 for (Instruction *i = bb->getEntry(); i; i = next) {
1833 next = i->next;
1834 switch (i->op) {
1835 case OP_ABS:
1836 handleABS(i);
1837 break;
1838 case OP_ADD:
1839 handleADD(i);
1840 break;
1841 case OP_RCP:
1842 handleRCP(i);
1843 break;
1844 case OP_MIN:
1845 case OP_MAX:
1846 handleMINMAX(i);
1847 break;
1848 case OP_SLCT:
1849 handleSLCT(i);
1850 break;
1851 case OP_AND:
1852 case OP_OR:
1853 case OP_XOR:
1854 handleLOGOP(i);
1855 break;
1856 case OP_CVT:
1857 handleCVT_NEG(i);
1858 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
1859 handleCVT_EXTBF(i);
1860 break;
1861 case OP_SUCLAMP:
1862 handleSUCLAMP(i);
1863 break;
1864 default:
1865 break;
1866 }
1867 }
1868
1869 return true;
1870 }
1871
1872 // =============================================================================
1873
1874 static inline void
1875 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1876 {
1877 if (offset != ldst->getSrc(0)->reg.data.offset) {
1878 if (ldst->getSrc(0)->refCount() > 1)
1879 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
1880 ldst->getSrc(0)->reg.data.offset = offset;
1881 }
1882 }
1883
1884 // Combine loads and stores, forward stores to loads where possible.
1885 class MemoryOpt : public Pass
1886 {
1887 private:
1888 class Record
1889 {
1890 public:
1891 Record *next;
1892 Instruction *insn;
1893 const Value *rel[2];
1894 const Value *base;
1895 int32_t offset;
1896 int8_t fileIndex;
1897 uint8_t size;
1898 bool locked;
1899 Record *prev;
1900
1901 bool overlaps(const Instruction *ldst) const;
1902
1903 inline void link(Record **);
1904 inline void unlink(Record **);
1905 inline void set(const Instruction *ldst);
1906 };
1907
1908 public:
1909 MemoryOpt();
1910
1911 Record *loads[DATA_FILE_COUNT];
1912 Record *stores[DATA_FILE_COUNT];
1913
1914 MemoryPool recordPool;
1915
1916 private:
1917 virtual bool visit(BasicBlock *);
1918 bool runOpt(BasicBlock *);
1919
1920 Record **getList(const Instruction *);
1921
1922 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1923
1924 // merge @insn into load/store instruction from @rec
1925 bool combineLd(Record *rec, Instruction *ld);
1926 bool combineSt(Record *rec, Instruction *st);
1927
1928 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1929 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1930 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1931
1932 void addRecord(Instruction *ldst);
1933 void purgeRecords(Instruction *const st, DataFile);
1934 void lockStores(Instruction *const ld);
1935 void reset();
1936
1937 private:
1938 Record *prevRecord;
1939 };
1940
1941 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1942 {
1943 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1944 loads[i] = NULL;
1945 stores[i] = NULL;
1946 }
1947 prevRecord = NULL;
1948 }
1949
1950 void
1951 MemoryOpt::reset()
1952 {
1953 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1954 Record *it, *next;
1955 for (it = loads[i]; it; it = next) {
1956 next = it->next;
1957 recordPool.release(it);
1958 }
1959 loads[i] = NULL;
1960 for (it = stores[i]; it; it = next) {
1961 next = it->next;
1962 recordPool.release(it);
1963 }
1964 stores[i] = NULL;
1965 }
1966 }
1967
1968 bool
1969 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1970 {
1971 int32_t offRc = rec->offset;
1972 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1973 int sizeRc = rec->size;
1974 int sizeLd = typeSizeof(ld->dType);
1975 int size = sizeRc + sizeLd;
1976 int d, j;
1977
1978 if (!prog->getTarget()->
1979 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1980 return false;
1981 // no unaligned loads
1982 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1983 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1984 return false;
1985
1986 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1987
1988 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1989
1990 if (offLd < offRc) {
1991 int sz;
1992 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1993 // d: nr of definitions in ld
1994 // j: nr of definitions in rec->insn, move:
1995 for (d = d + j - 1; j > 0; --j, --d)
1996 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1997
1998 if (rec->insn->getSrc(0)->refCount() > 1)
1999 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2000 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2001
2002 d = 0;
2003 } else {
2004 d = j;
2005 }
2006 // move definitions of @ld to @rec->insn
2007 for (j = 0; sizeLd; ++j, ++d) {
2008 sizeLd -= ld->getDef(j)->reg.size;
2009 rec->insn->setDef(d, ld->getDef(j));
2010 }
2011
2012 rec->size = size;
2013 rec->insn->getSrc(0)->reg.size = size;
2014 rec->insn->setType(typeOfSize(size));
2015
2016 delete_Instruction(prog, ld);
2017
2018 return true;
2019 }
2020
2021 bool
2022 MemoryOpt::combineSt(Record *rec, Instruction *st)
2023 {
2024 int32_t offRc = rec->offset;
2025 int32_t offSt = st->getSrc(0)->reg.data.offset;
2026 int sizeRc = rec->size;
2027 int sizeSt = typeSizeof(st->dType);
2028 int s = sizeSt / 4;
2029 int size = sizeRc + sizeSt;
2030 int j, k;
2031 Value *src[4]; // no modifiers in ValueRef allowed for st
2032 Value *extra[3];
2033
2034 if (!prog->getTarget()->
2035 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2036 return false;
2037 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2038 return false;
2039
2040 st->takeExtraSources(0, extra); // save predicate and indirect address
2041
2042 if (offRc < offSt) {
2043 // save values from @st
2044 for (s = 0; sizeSt; ++s) {
2045 sizeSt -= st->getSrc(s + 1)->reg.size;
2046 src[s] = st->getSrc(s + 1);
2047 }
2048 // set record's values as low sources of @st
2049 for (j = 1; sizeRc; ++j) {
2050 sizeRc -= rec->insn->getSrc(j)->reg.size;
2051 st->setSrc(j, rec->insn->getSrc(j));
2052 }
2053 // set saved values as high sources of @st
2054 for (k = j, j = 0; j < s; ++j)
2055 st->setSrc(k++, src[j]);
2056
2057 updateLdStOffset(st, offRc, func);
2058 } else {
2059 for (j = 1; sizeSt; ++j)
2060 sizeSt -= st->getSrc(j)->reg.size;
2061 for (s = 1; sizeRc; ++j, ++s) {
2062 sizeRc -= rec->insn->getSrc(s)->reg.size;
2063 st->setSrc(j, rec->insn->getSrc(s));
2064 }
2065 rec->offset = offSt;
2066 }
2067 st->putExtraSources(0, extra); // restore pointer and predicate
2068
2069 delete_Instruction(prog, rec->insn);
2070 rec->insn = st;
2071 rec->size = size;
2072 rec->insn->getSrc(0)->reg.size = size;
2073 rec->insn->setType(typeOfSize(size));
2074 return true;
2075 }
2076
2077 void
2078 MemoryOpt::Record::set(const Instruction *ldst)
2079 {
2080 const Symbol *mem = ldst->getSrc(0)->asSym();
2081 fileIndex = mem->reg.fileIndex;
2082 rel[0] = ldst->getIndirect(0, 0);
2083 rel[1] = ldst->getIndirect(0, 1);
2084 offset = mem->reg.data.offset;
2085 base = mem->getBase();
2086 size = typeSizeof(ldst->sType);
2087 }
2088
2089 void
2090 MemoryOpt::Record::link(Record **list)
2091 {
2092 next = *list;
2093 if (next)
2094 next->prev = this;
2095 prev = NULL;
2096 *list = this;
2097 }
2098
2099 void
2100 MemoryOpt::Record::unlink(Record **list)
2101 {
2102 if (next)
2103 next->prev = prev;
2104 if (prev)
2105 prev->next = next;
2106 else
2107 *list = next;
2108 }
2109
2110 MemoryOpt::Record **
2111 MemoryOpt::getList(const Instruction *insn)
2112 {
2113 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2114 return &loads[insn->src(0).getFile()];
2115 return &stores[insn->src(0).getFile()];
2116 }
2117
2118 void
2119 MemoryOpt::addRecord(Instruction *i)
2120 {
2121 Record **list = getList(i);
2122 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2123
2124 it->link(list);
2125 it->set(i);
2126 it->insn = i;
2127 it->locked = false;
2128 }
2129
2130 MemoryOpt::Record *
2131 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2132 {
2133 const Symbol *sym = insn->getSrc(0)->asSym();
2134 const int size = typeSizeof(insn->sType);
2135 Record *rec = NULL;
2136 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2137
2138 for (; it; it = it->next) {
2139 if (it->locked && insn->op != OP_LOAD)
2140 continue;
2141 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2142 it->rel[0] != insn->getIndirect(0, 0) ||
2143 it->fileIndex != sym->reg.fileIndex ||
2144 it->rel[1] != insn->getIndirect(0, 1))
2145 continue;
2146
2147 if (it->offset < sym->reg.data.offset) {
2148 if (it->offset + it->size >= sym->reg.data.offset) {
2149 isAdj = (it->offset + it->size == sym->reg.data.offset);
2150 if (!isAdj)
2151 return it;
2152 if (!(it->offset & 0x7))
2153 rec = it;
2154 }
2155 } else {
2156 isAdj = it->offset != sym->reg.data.offset;
2157 if (size <= it->size && !isAdj)
2158 return it;
2159 else
2160 if (!(sym->reg.data.offset & 0x7))
2161 if (it->offset - size <= sym->reg.data.offset)
2162 rec = it;
2163 }
2164 }
2165 return rec;
2166 }
2167
2168 bool
2169 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2170 {
2171 Instruction *st = rec->insn;
2172 int32_t offSt = rec->offset;
2173 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2174 int d, s;
2175
2176 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2177 offSt += st->getSrc(s)->reg.size;
2178 if (offSt != offLd)
2179 return false;
2180
2181 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2182 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2183 return false;
2184 if (st->getSrc(s)->reg.file != FILE_GPR)
2185 return false;
2186 ld->def(d).replace(st->src(s), false);
2187 }
2188 ld->bb->remove(ld);
2189 return true;
2190 }
2191
2192 bool
2193 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2194 {
2195 Instruction *ldR = rec->insn;
2196 int32_t offR = rec->offset;
2197 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2198 int dR, dE;
2199
2200 assert(offR <= offE);
2201 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2202 offR += ldR->getDef(dR)->reg.size;
2203 if (offR != offE)
2204 return false;
2205
2206 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2207 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2208 return false;
2209 ldE->def(dE).replace(ldR->getDef(dR), false);
2210 }
2211
2212 delete_Instruction(prog, ldE);
2213 return true;
2214 }
2215
2216 bool
2217 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2218 {
2219 const Instruction *const ri = rec->insn;
2220 Value *extra[3];
2221
2222 int32_t offS = st->getSrc(0)->reg.data.offset;
2223 int32_t offR = rec->offset;
2224 int32_t endS = offS + typeSizeof(st->dType);
2225 int32_t endR = offR + typeSizeof(ri->dType);
2226
2227 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2228
2229 st->takeExtraSources(0, extra);
2230
2231 if (offR < offS) {
2232 Value *vals[10];
2233 int s, n;
2234 int k = 0;
2235 // get non-replaced sources of ri
2236 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2237 vals[k++] = ri->getSrc(s);
2238 n = s;
2239 // get replaced sources of st
2240 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2241 vals[k++] = st->getSrc(s);
2242 // skip replaced sources of ri
2243 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2244 // get non-replaced sources after values covered by st
2245 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2246 vals[k++] = ri->getSrc(s);
2247 assert((unsigned int)k <= Elements(vals));
2248 for (s = 0; s < k; ++s)
2249 st->setSrc(s + 1, vals[s]);
2250 st->setSrc(0, ri->getSrc(0));
2251 } else
2252 if (endR > endS) {
2253 int j, s;
2254 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2255 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2256 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2257 st->setSrc(s++, ri->getSrc(j));
2258 }
2259 st->putExtraSources(0, extra);
2260
2261 delete_Instruction(prog, rec->insn);
2262
2263 rec->insn = st;
2264 rec->offset = st->getSrc(0)->reg.data.offset;
2265
2266 st->setType(typeOfSize(rec->size));
2267
2268 return true;
2269 }
2270
2271 bool
2272 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2273 {
2274 Record that;
2275 that.set(ldst);
2276
2277 if (this->fileIndex != that.fileIndex)
2278 return false;
2279
2280 if (this->rel[0] || that.rel[0])
2281 return this->base == that.base;
2282 return
2283 (this->offset < that.offset + that.size) &&
2284 (this->offset + this->size > that.offset);
2285 }
2286
2287 // We must not eliminate stores that affect the result of @ld if
2288 // we find later stores to the same location, and we may no longer
2289 // merge them with later stores.
2290 // The stored value can, however, still be used to determine the value
2291 // returned by future loads.
2292 void
2293 MemoryOpt::lockStores(Instruction *const ld)
2294 {
2295 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2296 if (!r->locked && r->overlaps(ld))
2297 r->locked = true;
2298 }
2299
2300 // Prior loads from the location of @st are no longer valid.
2301 // Stores to the location of @st may no longer be used to derive
2302 // the value at it nor be coalesced into later stores.
2303 void
2304 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
2305 {
2306 if (st)
2307 f = st->src(0).getFile();
2308
2309 for (Record *r = loads[f]; r; r = r->next)
2310 if (!st || r->overlaps(st))
2311 r->unlink(&loads[f]);
2312
2313 for (Record *r = stores[f]; r; r = r->next)
2314 if (!st || r->overlaps(st))
2315 r->unlink(&stores[f]);
2316 }
2317
2318 bool
2319 MemoryOpt::visit(BasicBlock *bb)
2320 {
2321 bool ret = runOpt(bb);
2322 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
2323 // where 96 bit memory operations are forbidden.
2324 if (ret)
2325 ret = runOpt(bb);
2326 return ret;
2327 }
2328
2329 bool
2330 MemoryOpt::runOpt(BasicBlock *bb)
2331 {
2332 Instruction *ldst, *next;
2333 Record *rec;
2334 bool isAdjacent = true;
2335
2336 for (ldst = bb->getEntry(); ldst; ldst = next) {
2337 bool keep = true;
2338 bool isLoad = true;
2339 next = ldst->next;
2340
2341 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
2342 if (ldst->isDead()) {
2343 // might have been produced by earlier optimization
2344 delete_Instruction(prog, ldst);
2345 continue;
2346 }
2347 } else
2348 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
2349 isLoad = false;
2350 } else {
2351 // TODO: maybe have all fixed ops act as barrier ?
2352 if (ldst->op == OP_CALL ||
2353 ldst->op == OP_BAR ||
2354 ldst->op == OP_MEMBAR) {
2355 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2356 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2357 purgeRecords(NULL, FILE_MEMORY_SHARED);
2358 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2359 } else
2360 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
2361 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
2362 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2363 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2364 purgeRecords(NULL, FILE_MEMORY_SHARED);
2365 } else {
2366 purgeRecords(NULL, ldst->src(0).getFile());
2367 }
2368 } else
2369 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
2370 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2371 }
2372 continue;
2373 }
2374 if (ldst->getPredicate()) // TODO: handle predicated ld/st
2375 continue;
2376 if (ldst->perPatch) // TODO: create separate per-patch lists
2377 continue;
2378
2379 if (isLoad) {
2380 DataFile file = ldst->src(0).getFile();
2381
2382 // if ld l[]/g[] look for previous store to eliminate the reload
2383 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
2384 // TODO: shared memory ?
2385 rec = findRecord(ldst, false, isAdjacent);
2386 if (rec && !isAdjacent)
2387 keep = !replaceLdFromSt(ldst, rec);
2388 }
2389
2390 // or look for ld from the same location and replace this one
2391 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
2392 if (rec) {
2393 if (!isAdjacent)
2394 keep = !replaceLdFromLd(ldst, rec);
2395 else
2396 // or combine a previous load with this one
2397 keep = !combineLd(rec, ldst);
2398 }
2399 if (keep)
2400 lockStores(ldst);
2401 } else {
2402 rec = findRecord(ldst, false, isAdjacent);
2403 if (rec) {
2404 if (!isAdjacent)
2405 keep = !replaceStFromSt(ldst, rec);
2406 else
2407 keep = !combineSt(rec, ldst);
2408 }
2409 if (keep)
2410 purgeRecords(ldst, DATA_FILE_COUNT);
2411 }
2412 if (keep)
2413 addRecord(ldst);
2414 }
2415 reset();
2416
2417 return true;
2418 }
2419
2420 // =============================================================================
2421
2422 // Turn control flow into predicated instructions (after register allocation !).
2423 // TODO:
2424 // Could move this to before register allocation on NVC0 and also handle nested
2425 // constructs.
2426 class FlatteningPass : public Pass
2427 {
2428 private:
2429 virtual bool visit(BasicBlock *);
2430
2431 bool tryPredicateConditional(BasicBlock *);
2432 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2433 void tryPropagateBranch(BasicBlock *);
2434 inline bool isConstantCondition(Value *pred);
2435 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2436 inline void removeFlow(Instruction *);
2437 };
2438
2439 bool
2440 FlatteningPass::isConstantCondition(Value *pred)
2441 {
2442 Instruction *insn = pred->getUniqueInsn();
2443 assert(insn);
2444 if (insn->op != OP_SET || insn->srcExists(2))
2445 return false;
2446
2447 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2448 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2449 DataFile file;
2450 if (ld) {
2451 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2452 return false;
2453 if (ld->src(0).isIndirect(0))
2454 return false;
2455 file = ld->src(0).getFile();
2456 } else {
2457 file = insn->src(s).getFile();
2458 // catch $r63 on NVC0
2459 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
2460 file = FILE_IMMEDIATE;
2461 }
2462 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2463 return false;
2464 }
2465 return true;
2466 }
2467
2468 void
2469 FlatteningPass::removeFlow(Instruction *insn)
2470 {
2471 FlowInstruction *term = insn ? insn->asFlow() : NULL;
2472 if (!term)
2473 return;
2474 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
2475
2476 if (term->op == OP_BRA) {
2477 // TODO: this might get more difficult when we get arbitrary BRAs
2478 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
2479 return;
2480 } else
2481 if (term->op != OP_JOIN)
2482 return;
2483
2484 Value *pred = term->getPredicate();
2485
2486 delete_Instruction(prog, term);
2487
2488 if (pred && pred->refCount() == 0) {
2489 Instruction *pSet = pred->getUniqueInsn();
2490 pred->join->reg.data.id = -1; // deallocate
2491 if (pSet->isDead())
2492 delete_Instruction(prog, pSet);
2493 }
2494 }
2495
2496 void
2497 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
2498 {
2499 for (Instruction *i = bb->getEntry(); i; i = i->next) {
2500 if (i->isNop())
2501 continue;
2502 assert(!i->getPredicate());
2503 i->setPredicate(cc, pred);
2504 }
2505 removeFlow(bb->getExit());
2506 }
2507
2508 bool
2509 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
2510 {
2511 if (insn->isPseudo())
2512 return true;
2513 // TODO: calls where we don't know which registers are modified
2514
2515 if (!prog->getTarget()->mayPredicate(insn, pred))
2516 return false;
2517 for (int d = 0; insn->defExists(d); ++d)
2518 if (insn->getDef(d)->equals(pred))
2519 return false;
2520 return true;
2521 }
2522
2523 // If we jump to BRA/RET/EXIT, replace the jump with it.
2524 // NOTE: We do not update the CFG anymore here !
2525 //
2526 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2527 // BB:0
2528 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2529 // BB1:
2530 // bra BB:3
2531 // BB2:
2532 // ...
2533 // BB3:
2534 // ...
2535 void
2536 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
2537 {
2538 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
2539 BasicBlock *bf = i->asFlow()->target.bb;
2540
2541 if (bf->getInsnCount() != 1)
2542 continue;
2543
2544 FlowInstruction *bra = i->asFlow();
2545 FlowInstruction *rep = bf->getExit()->asFlow();
2546
2547 if (!rep || rep->getPredicate())
2548 continue;
2549 if (rep->op != OP_BRA &&
2550 rep->op != OP_JOIN &&
2551 rep->op != OP_EXIT)
2552 continue;
2553
2554 // TODO: If there are multiple branches to @rep, only the first would
2555 // be replaced, so only remove them after this pass is done ?
2556 // Also, need to check all incident blocks for fall-through exits and
2557 // add the branch there.
2558 bra->op = rep->op;
2559 bra->target.bb = rep->target.bb;
2560 if (bf->cfg.incidentCount() == 1)
2561 bf->remove(rep);
2562 }
2563 }
2564
2565 bool
2566 FlatteningPass::visit(BasicBlock *bb)
2567 {
2568 if (tryPredicateConditional(bb))
2569 return true;
2570
2571 // try to attach join to previous instruction
2572 if (prog->getTarget()->hasJoin) {
2573 Instruction *insn = bb->getExit();
2574 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2575 insn = insn->prev;
2576 if (insn && !insn->getPredicate() &&
2577 !insn->asFlow() &&
2578 insn->op != OP_TEXBAR &&
2579 !isTextureOp(insn->op) && // probably just nve4
2580 !isSurfaceOp(insn->op) && // not confirmed
2581 insn->op != OP_LINTERP && // probably just nve4
2582 insn->op != OP_PINTERP && // probably just nve4
2583 ((insn->op != OP_LOAD && insn->op != OP_STORE) ||
2584 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
2585 !insn->isNop()) {
2586 insn->join = 1;
2587 bb->remove(bb->getExit());
2588 return true;
2589 }
2590 }
2591 }
2592
2593 tryPropagateBranch(bb);
2594
2595 return true;
2596 }
2597
2598 bool
2599 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2600 {
2601 BasicBlock *bL = NULL, *bR = NULL;
2602 unsigned int nL = 0, nR = 0, limit = 12;
2603 Instruction *insn;
2604 unsigned int mask;
2605
2606 mask = bb->initiatesSimpleConditional();
2607 if (!mask)
2608 return false;
2609
2610 assert(bb->getExit());
2611 Value *pred = bb->getExit()->getPredicate();
2612 assert(pred);
2613
2614 if (isConstantCondition(pred))
2615 limit = 4;
2616
2617 Graph::EdgeIterator ei = bb->cfg.outgoing();
2618
2619 if (mask & 1) {
2620 bL = BasicBlock::get(ei.getNode());
2621 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2622 if (!mayPredicate(insn, pred))
2623 return false;
2624 if (nL > limit)
2625 return false; // too long, do a real branch
2626 }
2627 ei.next();
2628
2629 if (mask & 2) {
2630 bR = BasicBlock::get(ei.getNode());
2631 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2632 if (!mayPredicate(insn, pred))
2633 return false;
2634 if (nR > limit)
2635 return false; // too long, do a real branch
2636 }
2637
2638 if (bL)
2639 predicateInstructions(bL, pred, bb->getExit()->cc);
2640 if (bR)
2641 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2642
2643 if (bb->joinAt) {
2644 bb->remove(bb->joinAt);
2645 bb->joinAt = NULL;
2646 }
2647 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2648
2649 // remove potential join operations at the end of the conditional
2650 if (prog->getTarget()->joinAnterior) {
2651 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2652 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2653 removeFlow(bb->getEntry());
2654 }
2655
2656 return true;
2657 }
2658
2659 // =============================================================================
2660
2661 // Fold Immediate into MAD; must be done after register allocation due to
2662 // constraint SDST == SSRC2
2663 // TODO:
2664 // Does NVC0+ have other situations where this pass makes sense?
2665 class NV50PostRaConstantFolding : public Pass
2666 {
2667 private:
2668 virtual bool visit(BasicBlock *);
2669 };
2670
2671 bool
2672 NV50PostRaConstantFolding::visit(BasicBlock *bb)
2673 {
2674 Value *vtmp;
2675 Instruction *def;
2676
2677 for (Instruction *i = bb->getFirst(); i; i = i->next) {
2678 switch (i->op) {
2679 case OP_MAD:
2680 if (i->def(0).getFile() != FILE_GPR ||
2681 i->src(0).getFile() != FILE_GPR ||
2682 i->src(1).getFile() != FILE_GPR ||
2683 i->src(2).getFile() != FILE_GPR ||
2684 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id ||
2685 !isFloatType(i->dType))
2686 break;
2687
2688 if (i->getDef(0)->reg.data.id >= 64 ||
2689 i->getSrc(0)->reg.data.id >= 64)
2690 break;
2691
2692 def = i->getSrc(1)->getInsn();
2693 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
2694 vtmp = i->getSrc(1);
2695 i->setSrc(1, def->getSrc(0));
2696
2697 /* There's no post-RA dead code elimination, so do it here
2698 * XXX: if we add more code-removing post-RA passes, we might
2699 * want to create a post-RA dead-code elim pass */
2700 if (vtmp->refCount() == 0)
2701 delete_Instruction(bb->getProgram(), def);
2702
2703 break;
2704 }
2705 break;
2706 default:
2707 break;
2708 }
2709 }
2710
2711 return true;
2712 }
2713
2714 // =============================================================================
2715
2716 // Common subexpression elimination. Stupid O^2 implementation.
2717 class LocalCSE : public Pass
2718 {
2719 private:
2720 virtual bool visit(BasicBlock *);
2721
2722 inline bool tryReplace(Instruction **, Instruction *);
2723
2724 DLList ops[OP_LAST + 1];
2725 };
2726
2727 class GlobalCSE : public Pass
2728 {
2729 private:
2730 virtual bool visit(BasicBlock *);
2731 };
2732
2733 bool
2734 Instruction::isActionEqual(const Instruction *that) const
2735 {
2736 if (this->op != that->op ||
2737 this->dType != that->dType ||
2738 this->sType != that->sType)
2739 return false;
2740 if (this->cc != that->cc)
2741 return false;
2742
2743 if (this->asTex()) {
2744 if (memcmp(&this->asTex()->tex,
2745 &that->asTex()->tex,
2746 sizeof(this->asTex()->tex)))
2747 return false;
2748 } else
2749 if (this->asCmp()) {
2750 if (this->asCmp()->setCond != that->asCmp()->setCond)
2751 return false;
2752 } else
2753 if (this->asFlow()) {
2754 return false;
2755 } else {
2756 if (this->ipa != that->ipa ||
2757 this->lanes != that->lanes ||
2758 this->perPatch != that->perPatch)
2759 return false;
2760 if (this->postFactor != that->postFactor)
2761 return false;
2762 }
2763
2764 if (this->subOp != that->subOp ||
2765 this->saturate != that->saturate ||
2766 this->rnd != that->rnd ||
2767 this->ftz != that->ftz ||
2768 this->dnz != that->dnz ||
2769 this->cache != that->cache ||
2770 this->mask != that->mask)
2771 return false;
2772
2773 return true;
2774 }
2775
2776 bool
2777 Instruction::isResultEqual(const Instruction *that) const
2778 {
2779 unsigned int d, s;
2780
2781 // NOTE: location of discard only affects tex with liveOnly and quadops
2782 if (!this->defExists(0) && this->op != OP_DISCARD)
2783 return false;
2784
2785 if (!isActionEqual(that))
2786 return false;
2787
2788 if (this->predSrc != that->predSrc)
2789 return false;
2790
2791 for (d = 0; this->defExists(d); ++d) {
2792 if (!that->defExists(d) ||
2793 !this->getDef(d)->equals(that->getDef(d), false))
2794 return false;
2795 }
2796 if (that->defExists(d))
2797 return false;
2798
2799 for (s = 0; this->srcExists(s); ++s) {
2800 if (!that->srcExists(s))
2801 return false;
2802 if (this->src(s).mod != that->src(s).mod)
2803 return false;
2804 if (!this->getSrc(s)->equals(that->getSrc(s), true))
2805 return false;
2806 }
2807 if (that->srcExists(s))
2808 return false;
2809
2810 if (op == OP_LOAD || op == OP_VFETCH) {
2811 switch (src(0).getFile()) {
2812 case FILE_MEMORY_CONST:
2813 case FILE_SHADER_INPUT:
2814 return true;
2815 case FILE_SHADER_OUTPUT:
2816 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
2817 default:
2818 return false;
2819 }
2820 }
2821
2822 return true;
2823 }
2824
2825 // pull through common expressions from different in-blocks
2826 bool
2827 GlobalCSE::visit(BasicBlock *bb)
2828 {
2829 Instruction *phi, *next, *ik;
2830 int s;
2831
2832 // TODO: maybe do this with OP_UNION, too
2833
2834 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2835 next = phi->next;
2836 if (phi->getSrc(0)->refCount() > 1)
2837 continue;
2838 ik = phi->getSrc(0)->getInsn();
2839 if (!ik)
2840 continue; // probably a function input
2841 for (s = 1; phi->srcExists(s); ++s) {
2842 if (phi->getSrc(s)->refCount() > 1)
2843 break;
2844 if (!phi->getSrc(s)->getInsn() ||
2845 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
2846 break;
2847 }
2848 if (!phi->srcExists(s)) {
2849 Instruction *entry = bb->getEntry();
2850 ik->bb->remove(ik);
2851 if (!entry || entry->op != OP_JOIN)
2852 bb->insertHead(ik);
2853 else
2854 bb->insertAfter(entry, ik);
2855 ik->setDef(0, phi->getDef(0));
2856 delete_Instruction(prog, phi);
2857 }
2858 }
2859
2860 return true;
2861 }
2862
2863 bool
2864 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2865 {
2866 Instruction *old = *ptr;
2867
2868 // TODO: maybe relax this later (causes trouble with OP_UNION)
2869 if (i->isPredicated())
2870 return false;
2871
2872 if (!old->isResultEqual(i))
2873 return false;
2874
2875 for (int d = 0; old->defExists(d); ++d)
2876 old->def(d).replace(i->getDef(d), false);
2877 delete_Instruction(prog, old);
2878 *ptr = NULL;
2879 return true;
2880 }
2881
2882 bool
2883 LocalCSE::visit(BasicBlock *bb)
2884 {
2885 unsigned int replaced;
2886
2887 do {
2888 Instruction *ir, *next;
2889
2890 replaced = 0;
2891
2892 // will need to know the order of instructions
2893 int serial = 0;
2894 for (ir = bb->getFirst(); ir; ir = ir->next)
2895 ir->serial = serial++;
2896
2897 for (ir = bb->getEntry(); ir; ir = next) {
2898 int s;
2899 Value *src = NULL;
2900
2901 next = ir->next;
2902
2903 if (ir->fixed) {
2904 ops[ir->op].insert(ir);
2905 continue;
2906 }
2907
2908 for (s = 0; ir->srcExists(s); ++s)
2909 if (ir->getSrc(s)->asLValue())
2910 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2911 src = ir->getSrc(s);
2912
2913 if (src) {
2914 for (Value::UseIterator it = src->uses.begin();
2915 it != src->uses.end(); ++it) {
2916 Instruction *ik = (*it)->getInsn();
2917 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
2918 if (tryReplace(&ir, ik))
2919 break;
2920 }
2921 } else {
2922 DLLIST_FOR_EACH(&ops[ir->op], iter)
2923 {
2924 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2925 if (tryReplace(&ir, ik))
2926 break;
2927 }
2928 }
2929
2930 if (ir)
2931 ops[ir->op].insert(ir);
2932 else
2933 ++replaced;
2934 }
2935 for (unsigned int i = 0; i <= OP_LAST; ++i)
2936 ops[i].clear();
2937
2938 } while (replaced);
2939
2940 return true;
2941 }
2942
2943 // =============================================================================
2944
2945 // Remove computations of unused values.
2946 class DeadCodeElim : public Pass
2947 {
2948 public:
2949 bool buryAll(Program *);
2950
2951 private:
2952 virtual bool visit(BasicBlock *);
2953
2954 void checkSplitLoad(Instruction *ld); // for partially dead loads
2955
2956 unsigned int deadCount;
2957 };
2958
2959 bool
2960 DeadCodeElim::buryAll(Program *prog)
2961 {
2962 do {
2963 deadCount = 0;
2964 if (!this->run(prog, false, false))
2965 return false;
2966 } while (deadCount);
2967
2968 return true;
2969 }
2970
2971 bool
2972 DeadCodeElim::visit(BasicBlock *bb)
2973 {
2974 Instruction *next;
2975
2976 for (Instruction *i = bb->getFirst(); i; i = next) {
2977 next = i->next;
2978 if (i->isDead()) {
2979 ++deadCount;
2980 delete_Instruction(prog, i);
2981 } else
2982 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2983 checkSplitLoad(i);
2984 } else
2985 if (i->defExists(0) && !i->getDef(0)->refCount()) {
2986 if (i->op == OP_ATOM ||
2987 i->op == OP_SUREDP ||
2988 i->op == OP_SUREDB)
2989 i->setDef(0, NULL);
2990 }
2991 }
2992 return true;
2993 }
2994
2995 // Each load can go into up to 4 destinations, any of which might potentially
2996 // be dead (i.e. a hole). These can always be split into 2 loads, independent
2997 // of where the holes are. We find the first contiguous region, put it into
2998 // the first load, and then put the second contiguous region into the second
2999 // load. There can be at most 2 contiguous regions.
3000 //
3001 // Note that there are some restrictions, for example it's not possible to do
3002 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3003 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3004 // split into a 64-bit and 32-bit load.
3005 void
3006 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3007 {
3008 Instruction *ld2 = NULL; // can get at most 2 loads
3009 Value *def1[4];
3010 Value *def2[4];
3011 int32_t addr1, addr2;
3012 int32_t size1, size2;
3013 int d, n1, n2;
3014 uint32_t mask = 0xffffffff;
3015
3016 for (d = 0; ld1->defExists(d); ++d)
3017 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3018 mask &= ~(1 << d);
3019 if (mask == 0xffffffff)
3020 return;
3021
3022 addr1 = ld1->getSrc(0)->reg.data.offset;
3023 n1 = n2 = 0;
3024 size1 = size2 = 0;
3025
3026 // Compute address/width for first load
3027 for (d = 0; ld1->defExists(d); ++d) {
3028 if (mask & (1 << d)) {
3029 if (size1 && (addr1 & 0x7))
3030 break;
3031 def1[n1] = ld1->getDef(d);
3032 size1 += def1[n1++]->reg.size;
3033 } else
3034 if (!n1) {
3035 addr1 += ld1->getDef(d)->reg.size;
3036 } else {
3037 break;
3038 }
3039 }
3040
3041 // Scale back the size of the first load until it can be loaded. This
3042 // typically happens for TYPE_B96 loads.
3043 while (n1 &&
3044 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3045 typeOfSize(size1))) {
3046 size1 -= def1[--n1]->reg.size;
3047 d--;
3048 }
3049
3050 // Compute address/width for second load
3051 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3052 if (mask & (1 << d)) {
3053 assert(!size2 || !(addr2 & 0x7));
3054 def2[n2] = ld1->getDef(d);
3055 size2 += def2[n2++]->reg.size;
3056 } else if (!n2) {
3057 assert(!n2);
3058 addr2 += ld1->getDef(d)->reg.size;
3059 } else {
3060 break;
3061 }
3062 }
3063
3064 // Make sure that we've processed all the values
3065 for (; ld1->defExists(d); ++d)
3066 assert(!(mask & (1 << d)));
3067
3068 updateLdStOffset(ld1, addr1, func);
3069 ld1->setType(typeOfSize(size1));
3070 for (d = 0; d < 4; ++d)
3071 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3072
3073 if (!n2)
3074 return;
3075
3076 ld2 = cloneShallow(func, ld1);
3077 updateLdStOffset(ld2, addr2, func);
3078 ld2->setType(typeOfSize(size2));
3079 for (d = 0; d < 4; ++d)
3080 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3081
3082 ld1->bb->insertAfter(ld1, ld2);
3083 }
3084
3085 // =============================================================================
3086
3087 #define RUN_PASS(l, n, f) \
3088 if (level >= (l)) { \
3089 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3090 INFO("PEEPHOLE: %s\n", #n); \
3091 n pass; \
3092 if (!pass.f(this)) \
3093 return false; \
3094 }
3095
3096 bool
3097 Program::optimizeSSA(int level)
3098 {
3099 RUN_PASS(1, DeadCodeElim, buryAll);
3100 RUN_PASS(1, CopyPropagation, run);
3101 RUN_PASS(1, MergeSplits, run);
3102 RUN_PASS(2, GlobalCSE, run);
3103 RUN_PASS(1, LocalCSE, run);
3104 RUN_PASS(2, AlgebraicOpt, run);
3105 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
3106 RUN_PASS(1, ConstantFolding, foldAll);
3107 RUN_PASS(1, LoadPropagation, run);
3108 RUN_PASS(2, MemoryOpt, run);
3109 RUN_PASS(2, LocalCSE, run);
3110 RUN_PASS(0, DeadCodeElim, buryAll);
3111
3112 return true;
3113 }
3114
3115 bool
3116 Program::optimizePostRA(int level)
3117 {
3118 RUN_PASS(2, FlatteningPass, run);
3119 if (getTarget()->getChipset() < 0xc0)
3120 RUN_PASS(2, NV50PostRaConstantFolding, run);
3121
3122 return true;
3123 }
3124
3125 }