Merge branch 'master' of ../mesa into vulkan
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmd32Load(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmd32Load(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
172 return false;
173 return ld->src(0).getFile() == FILE_IMMEDIATE;
174 }
175
176 bool
177 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
178 {
179 return ld &&
180 (ld->op == OP_VFETCH ||
181 (ld->op == OP_LOAD &&
182 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
183 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
184 }
185
186 void
187 LoadPropagation::checkSwapSrc01(Instruction *insn)
188 {
189 if (!prog->getTarget()->getOpInfo(insn).commutative)
190 if (insn->op != OP_SET && insn->op != OP_SLCT)
191 return;
192 if (insn->src(1).getFile() != FILE_GPR)
193 return;
194
195 Instruction *i0 = insn->getSrc(0)->getInsn();
196 Instruction *i1 = insn->getSrc(1)->getInsn();
197
198 if (isCSpaceLoad(i0)) {
199 if (!isCSpaceLoad(i1))
200 insn->swapSources(0, 1);
201 else
202 return;
203 } else
204 if (isImmd32Load(i0)) {
205 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
206 insn->swapSources(0, 1);
207 else
208 return;
209 } else
210 if (isAttribOrSharedLoad(i1)) {
211 if (!isAttribOrSharedLoad(i0))
212 insn->swapSources(0, 1);
213 else
214 return;
215 } else {
216 return;
217 }
218
219 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
220 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
221 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
222 else
223 if (insn->op == OP_SLCT)
224 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
225 }
226
227 bool
228 LoadPropagation::visit(BasicBlock *bb)
229 {
230 const Target *targ = prog->getTarget();
231 Instruction *next;
232
233 for (Instruction *i = bb->getEntry(); i; i = next) {
234 next = i->next;
235
236 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
237 continue;
238
239 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
240 continue;
241
242 if (i->srcExists(1))
243 checkSwapSrc01(i);
244
245 for (int s = 0; i->srcExists(s); ++s) {
246 Instruction *ld = i->getSrc(s)->getInsn();
247
248 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
249 continue;
250 if (!targ->insnCanLoad(i, s, ld))
251 continue;
252
253 // propagate !
254 i->setSrc(s, ld->getSrc(0));
255 if (ld->src(0).isIndirect(0))
256 i->setIndirect(s, 0, ld->getIndirect(0, 0));
257
258 if (ld->getDef(0)->refCount() == 0)
259 delete_Instruction(prog, ld);
260 }
261 }
262 return true;
263 }
264
265 // =============================================================================
266
267 // Evaluate constant expressions.
268 class ConstantFolding : public Pass
269 {
270 public:
271 bool foldAll(Program *);
272
273 private:
274 virtual bool visit(BasicBlock *);
275
276 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
277 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
278 void opnd(Instruction *, ImmediateValue&, int s);
279
280 void unary(Instruction *, const ImmediateValue&);
281
282 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
283
284 CmpInstruction *findOriginForTestWithZero(Value *);
285
286 unsigned int foldCount;
287
288 BuildUtil bld;
289 };
290
291 // TODO: remember generated immediates and only revisit these
292 bool
293 ConstantFolding::foldAll(Program *prog)
294 {
295 unsigned int iterCount = 0;
296 do {
297 foldCount = 0;
298 if (!run(prog))
299 return false;
300 } while (foldCount && ++iterCount < 2);
301 return true;
302 }
303
304 bool
305 ConstantFolding::visit(BasicBlock *bb)
306 {
307 Instruction *i, *next;
308
309 for (i = bb->getEntry(); i; i = next) {
310 next = i->next;
311 if (i->op == OP_MOV || i->op == OP_CALL)
312 continue;
313
314 ImmediateValue src0, src1, src2;
315
316 if (i->srcExists(2) &&
317 i->src(0).getImmediate(src0) &&
318 i->src(1).getImmediate(src1) &&
319 i->src(2).getImmediate(src2))
320 expr(i, src0, src1, src2);
321 else
322 if (i->srcExists(1) &&
323 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
324 expr(i, src0, src1);
325 else
326 if (i->srcExists(0) && i->src(0).getImmediate(src0))
327 opnd(i, src0, 0);
328 else
329 if (i->srcExists(1) && i->src(1).getImmediate(src1))
330 opnd(i, src1, 1);
331 }
332 return true;
333 }
334
335 CmpInstruction *
336 ConstantFolding::findOriginForTestWithZero(Value *value)
337 {
338 if (!value)
339 return NULL;
340 Instruction *insn = value->getInsn();
341
342 if (insn->asCmp() && insn->op != OP_SLCT)
343 return insn->asCmp();
344
345 /* Sometimes mov's will sneak in as a result of other folding. This gets
346 * cleaned up later.
347 */
348 if (insn->op == OP_MOV)
349 return findOriginForTestWithZero(insn->getSrc(0));
350
351 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
352 if (insn->op == OP_AND) {
353 int s = 0;
354 ImmediateValue imm;
355 if (!insn->src(s).getImmediate(imm)) {
356 s = 1;
357 if (!insn->src(s).getImmediate(imm))
358 return NULL;
359 }
360 if (imm.reg.data.f32 != 1.0f)
361 return NULL;
362 /* TODO: Come up with a way to handle the condition being inverted */
363 if (insn->src(!s).mod != Modifier(0))
364 return NULL;
365 return findOriginForTestWithZero(insn->getSrc(!s));
366 }
367
368 return NULL;
369 }
370
371 void
372 Modifier::applyTo(ImmediateValue& imm) const
373 {
374 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
375 return;
376 switch (imm.reg.type) {
377 case TYPE_F32:
378 if (bits & NV50_IR_MOD_ABS)
379 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
380 if (bits & NV50_IR_MOD_NEG)
381 imm.reg.data.f32 = -imm.reg.data.f32;
382 if (bits & NV50_IR_MOD_SAT) {
383 if (imm.reg.data.f32 < 0.0f)
384 imm.reg.data.f32 = 0.0f;
385 else
386 if (imm.reg.data.f32 > 1.0f)
387 imm.reg.data.f32 = 1.0f;
388 }
389 assert(!(bits & NV50_IR_MOD_NOT));
390 break;
391
392 case TYPE_S8: // NOTE: will be extended
393 case TYPE_S16:
394 case TYPE_S32:
395 case TYPE_U8: // NOTE: treated as signed
396 case TYPE_U16:
397 case TYPE_U32:
398 if (bits & NV50_IR_MOD_ABS)
399 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
400 imm.reg.data.s32 : -imm.reg.data.s32;
401 if (bits & NV50_IR_MOD_NEG)
402 imm.reg.data.s32 = -imm.reg.data.s32;
403 if (bits & NV50_IR_MOD_NOT)
404 imm.reg.data.s32 = ~imm.reg.data.s32;
405 break;
406
407 case TYPE_F64:
408 if (bits & NV50_IR_MOD_ABS)
409 imm.reg.data.f64 = fabs(imm.reg.data.f64);
410 if (bits & NV50_IR_MOD_NEG)
411 imm.reg.data.f64 = -imm.reg.data.f64;
412 if (bits & NV50_IR_MOD_SAT) {
413 if (imm.reg.data.f64 < 0.0)
414 imm.reg.data.f64 = 0.0;
415 else
416 if (imm.reg.data.f64 > 1.0)
417 imm.reg.data.f64 = 1.0;
418 }
419 assert(!(bits & NV50_IR_MOD_NOT));
420 break;
421
422 default:
423 assert(!"invalid/unhandled type");
424 imm.reg.data.u64 = 0;
425 break;
426 }
427 }
428
429 operation
430 Modifier::getOp() const
431 {
432 switch (bits) {
433 case NV50_IR_MOD_ABS: return OP_ABS;
434 case NV50_IR_MOD_NEG: return OP_NEG;
435 case NV50_IR_MOD_SAT: return OP_SAT;
436 case NV50_IR_MOD_NOT: return OP_NOT;
437 case 0:
438 return OP_MOV;
439 default:
440 return OP_CVT;
441 }
442 }
443
444 void
445 ConstantFolding::expr(Instruction *i,
446 ImmediateValue &imm0, ImmediateValue &imm1)
447 {
448 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
449 struct Storage res;
450
451 memset(&res.data, 0, sizeof(res.data));
452
453 switch (i->op) {
454 case OP_MAD:
455 case OP_FMA:
456 case OP_MUL:
457 if (i->dnz && i->dType == TYPE_F32) {
458 if (!isfinite(a->data.f32))
459 a->data.f32 = 0.0f;
460 if (!isfinite(b->data.f32))
461 b->data.f32 = 0.0f;
462 }
463 switch (i->dType) {
464 case TYPE_F32:
465 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
466 break;
467 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
468 case TYPE_S32:
469 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
470 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
471 break;
472 }
473 /* fallthrough */
474 case TYPE_U32:
475 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
476 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
477 break;
478 }
479 res.data.u32 = a->data.u32 * b->data.u32; break;
480 default:
481 return;
482 }
483 break;
484 case OP_DIV:
485 if (b->data.u32 == 0)
486 break;
487 switch (i->dType) {
488 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
489 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
490 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
491 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
492 default:
493 return;
494 }
495 break;
496 case OP_ADD:
497 switch (i->dType) {
498 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
499 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
500 case TYPE_S32:
501 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
502 default:
503 return;
504 }
505 break;
506 case OP_POW:
507 switch (i->dType) {
508 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
509 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
510 default:
511 return;
512 }
513 break;
514 case OP_MAX:
515 switch (i->dType) {
516 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
517 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
518 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
519 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
520 default:
521 return;
522 }
523 break;
524 case OP_MIN:
525 switch (i->dType) {
526 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
527 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
528 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
529 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
530 default:
531 return;
532 }
533 break;
534 case OP_AND:
535 res.data.u64 = a->data.u64 & b->data.u64;
536 break;
537 case OP_OR:
538 res.data.u64 = a->data.u64 | b->data.u64;
539 break;
540 case OP_XOR:
541 res.data.u64 = a->data.u64 ^ b->data.u64;
542 break;
543 case OP_SHL:
544 res.data.u32 = a->data.u32 << b->data.u32;
545 break;
546 case OP_SHR:
547 switch (i->dType) {
548 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
549 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
550 default:
551 return;
552 }
553 break;
554 case OP_SLCT:
555 if (a->data.u32 != b->data.u32)
556 return;
557 res.data.u32 = a->data.u32;
558 break;
559 case OP_EXTBF: {
560 int offset = b->data.u32 & 0xff;
561 int width = (b->data.u32 >> 8) & 0xff;
562 int rshift = offset;
563 int lshift = 0;
564 if (width == 0) {
565 res.data.u32 = 0;
566 break;
567 }
568 if (width + offset < 32) {
569 rshift = 32 - width;
570 lshift = 32 - width - offset;
571 }
572 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
573 res.data.u32 = util_bitreverse(a->data.u32);
574 else
575 res.data.u32 = a->data.u32;
576 switch (i->dType) {
577 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
578 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
579 default:
580 return;
581 }
582 break;
583 }
584 case OP_POPCNT:
585 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
586 break;
587 case OP_PFETCH:
588 // The two arguments to pfetch are logically added together. Normally
589 // the second argument will not be constant, but that can happen.
590 res.data.u32 = a->data.u32 + b->data.u32;
591 break;
592 default:
593 return;
594 }
595 ++foldCount;
596
597 i->src(0).mod = Modifier(0);
598 i->src(1).mod = Modifier(0);
599 i->postFactor = 0;
600
601 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
602 i->setSrc(1, NULL);
603
604 i->getSrc(0)->reg.data = res.data;
605
606 switch (i->op) {
607 case OP_MAD:
608 case OP_FMA: {
609 i->op = OP_ADD;
610
611 /* Move the immediate to the second arg, otherwise the ADD operation
612 * won't be emittable
613 */
614 i->setSrc(1, i->getSrc(0));
615 i->setSrc(0, i->getSrc(2));
616 i->src(0).mod = i->src(2).mod;
617 i->setSrc(2, NULL);
618
619 ImmediateValue src0;
620 if (i->src(0).getImmediate(src0))
621 expr(i, src0, *i->getSrc(1)->asImm());
622 if (i->saturate && !prog->getTarget()->isSatSupported(i)) {
623 bld.setPosition(i, false);
624 i->setSrc(1, bld.loadImm(NULL, res.data.u32));
625 }
626 break;
627 }
628 case OP_PFETCH:
629 // Leave PFETCH alone... we just folded its 2 args into 1.
630 break;
631 default:
632 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
633 break;
634 }
635 i->subOp = 0;
636 }
637
638 void
639 ConstantFolding::expr(Instruction *i,
640 ImmediateValue &imm0,
641 ImmediateValue &imm1,
642 ImmediateValue &imm2)
643 {
644 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
645 struct Storage res;
646
647 memset(&res.data, 0, sizeof(res.data));
648
649 switch (i->op) {
650 case OP_INSBF: {
651 int offset = b->data.u32 & 0xff;
652 int width = (b->data.u32 >> 8) & 0xff;
653 unsigned bitmask = ((1 << width) - 1) << offset;
654 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
655 break;
656 }
657 default:
658 return;
659 }
660
661 ++foldCount;
662 i->src(0).mod = Modifier(0);
663 i->src(1).mod = Modifier(0);
664 i->src(2).mod = Modifier(0);
665
666 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
667 i->setSrc(1, NULL);
668 i->setSrc(2, NULL);
669
670 i->getSrc(0)->reg.data = res.data;
671
672 i->op = OP_MOV;
673 }
674
675 void
676 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
677 {
678 Storage res;
679
680 if (i->dType != TYPE_F32)
681 return;
682 switch (i->op) {
683 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
684 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
685 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
686 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
687 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
688 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
689 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
690 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
691 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
692 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
693 case OP_PRESIN:
694 case OP_PREEX2:
695 // these should be handled in subsequent OP_SIN/COS/EX2
696 res.data.f32 = imm.reg.data.f32;
697 break;
698 default:
699 return;
700 }
701 i->op = OP_MOV;
702 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
703 i->src(0).mod = Modifier(0);
704 }
705
706 void
707 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
708 const int s, ImmediateValue& imm2)
709 {
710 const int t = s ? 0 : 1;
711 Instruction *insn;
712 Instruction *mul1 = NULL; // mul1 before mul2
713 int e = 0;
714 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
715 ImmediateValue imm1;
716
717 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
718
719 if (mul2->getSrc(t)->refCount() == 1) {
720 insn = mul2->getSrc(t)->getInsn();
721 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
722 mul1 = insn;
723 if (mul1 && !mul1->saturate) {
724 int s1;
725
726 if (mul1->src(s1 = 0).getImmediate(imm1) ||
727 mul1->src(s1 = 1).getImmediate(imm1)) {
728 bld.setPosition(mul1, false);
729 // a = mul r, imm1
730 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
731 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
732 mul1->src(s1).mod = Modifier(0);
733 mul2->def(0).replace(mul1->getDef(0), false);
734 mul1->saturate = mul2->saturate;
735 } else
736 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
737 // c = mul a, b
738 // d = mul c, imm -> d = mul_x_imm a, b
739 mul1->postFactor = e;
740 mul2->def(0).replace(mul1->getDef(0), false);
741 if (f < 0)
742 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
743 mul1->saturate = mul2->saturate;
744 }
745 return;
746 }
747 }
748 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
749 // b = mul a, imm
750 // d = mul b, c -> d = mul_x_imm a, c
751 int s2, t2;
752 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
753 if (!insn)
754 return;
755 mul1 = mul2;
756 mul2 = NULL;
757 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
758 t2 = s2 ? 0 : 1;
759 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
760 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
761 mul2 = insn;
762 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
763 mul2->postFactor = e;
764 mul2->setSrc(s2, mul1->src(t));
765 if (f < 0)
766 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
767 }
768 }
769 }
770
771 void
772 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
773 {
774 const int t = !s;
775 const operation op = i->op;
776 Instruction *newi = i;
777
778 switch (i->op) {
779 case OP_MUL:
780 if (i->dType == TYPE_F32)
781 tryCollapseChainedMULs(i, s, imm0);
782
783 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
784 assert(!isFloatType(i->sType));
785 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
786 bld.setPosition(i, false);
787 // Need to set to the sign value, which is a compare.
788 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
789 TYPE_S32, i->getSrc(t), bld.mkImm(0));
790 delete_Instruction(prog, i);
791 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
792 // The high bits can't be set in this case (either mul by 0 or
793 // unsigned by 1)
794 i->op = OP_MOV;
795 i->subOp = 0;
796 i->setSrc(0, new_ImmediateValue(prog, 0u));
797 i->src(0).mod = Modifier(0);
798 i->setSrc(1, NULL);
799 } else if (!imm0.isNegative() && imm0.isPow2()) {
800 // Translate into a shift
801 imm0.applyLog2();
802 i->op = OP_SHR;
803 i->subOp = 0;
804 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
805 i->setSrc(0, i->getSrc(t));
806 i->src(0).mod = i->src(t).mod;
807 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
808 i->src(1).mod = 0;
809 }
810 } else
811 if (imm0.isInteger(0)) {
812 i->op = OP_MOV;
813 i->setSrc(0, new_ImmediateValue(prog, 0u));
814 i->src(0).mod = Modifier(0);
815 i->postFactor = 0;
816 i->setSrc(1, NULL);
817 } else
818 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
819 if (imm0.isNegative())
820 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
821 i->op = i->src(t).mod.getOp();
822 if (s == 0) {
823 i->setSrc(0, i->getSrc(1));
824 i->src(0).mod = i->src(1).mod;
825 i->src(1).mod = 0;
826 }
827 if (i->op != OP_CVT)
828 i->src(0).mod = 0;
829 i->setSrc(1, NULL);
830 } else
831 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
832 if (imm0.isNegative())
833 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
834 i->op = OP_ADD;
835 i->setSrc(s, i->getSrc(t));
836 i->src(s).mod = i->src(t).mod;
837 } else
838 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
839 i->op = OP_SHL;
840 imm0.applyLog2();
841 i->setSrc(0, i->getSrc(t));
842 i->src(0).mod = i->src(t).mod;
843 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
844 i->src(1).mod = 0;
845 }
846 break;
847 case OP_MAD:
848 if (imm0.isInteger(0)) {
849 i->setSrc(0, i->getSrc(2));
850 i->src(0).mod = i->src(2).mod;
851 i->setSrc(1, NULL);
852 i->setSrc(2, NULL);
853 i->op = i->src(0).mod.getOp();
854 if (i->op != OP_CVT)
855 i->src(0).mod = 0;
856 } else
857 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
858 if (imm0.isNegative())
859 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
860 if (s == 0) {
861 i->setSrc(0, i->getSrc(1));
862 i->src(0).mod = i->src(1).mod;
863 }
864 i->setSrc(1, i->getSrc(2));
865 i->src(1).mod = i->src(2).mod;
866 i->setSrc(2, NULL);
867 i->op = OP_ADD;
868 }
869 break;
870 case OP_ADD:
871 if (i->usesFlags())
872 break;
873 if (imm0.isInteger(0)) {
874 if (s == 0) {
875 i->setSrc(0, i->getSrc(1));
876 i->src(0).mod = i->src(1).mod;
877 }
878 i->setSrc(1, NULL);
879 i->op = i->src(0).mod.getOp();
880 if (i->op != OP_CVT)
881 i->src(0).mod = Modifier(0);
882 }
883 break;
884
885 case OP_DIV:
886 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
887 break;
888 bld.setPosition(i, false);
889 if (imm0.reg.data.u32 == 0) {
890 break;
891 } else
892 if (imm0.reg.data.u32 == 1) {
893 i->op = OP_MOV;
894 i->setSrc(1, NULL);
895 } else
896 if (i->dType == TYPE_U32 && imm0.isPow2()) {
897 i->op = OP_SHR;
898 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
899 } else
900 if (i->dType == TYPE_U32) {
901 Instruction *mul;
902 Value *tA, *tB;
903 const uint32_t d = imm0.reg.data.u32;
904 uint32_t m;
905 int r, s;
906 uint32_t l = util_logbase2(d);
907 if (((uint32_t)1 << l) < d)
908 ++l;
909 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
910 r = l ? 1 : 0;
911 s = l ? (l - 1) : 0;
912
913 tA = bld.getSSA();
914 tB = bld.getSSA();
915 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
916 bld.loadImm(NULL, m));
917 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
918 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
919 tA = bld.getSSA();
920 if (r)
921 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
922 else
923 tA = tB;
924 tB = s ? bld.getSSA() : i->getDef(0);
925 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
926 if (s)
927 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
928
929 delete_Instruction(prog, i);
930 } else
931 if (imm0.reg.data.s32 == -1) {
932 i->op = OP_NEG;
933 i->setSrc(1, NULL);
934 } else {
935 LValue *tA, *tB;
936 LValue *tD;
937 const int32_t d = imm0.reg.data.s32;
938 int32_t m;
939 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
940 if ((1 << l) < abs(d))
941 ++l;
942 if (!l)
943 l = 1;
944 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
945
946 tA = bld.getSSA();
947 tB = bld.getSSA();
948 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
949 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
950 if (l > 1)
951 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
952 else
953 tB = tA;
954 tA = bld.getSSA();
955 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
956 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
957 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
958 if (d < 0)
959 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
960
961 delete_Instruction(prog, i);
962 }
963 break;
964
965 case OP_MOD:
966 if (i->sType == TYPE_U32 && imm0.isPow2()) {
967 bld.setPosition(i, false);
968 i->op = OP_AND;
969 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
970 }
971 break;
972
973 case OP_SET: // TODO: SET_AND,OR,XOR
974 {
975 /* This optimizes the case where the output of a set is being compared
976 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
977 * can be a lot cleverer in our comparison.
978 */
979 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
980 CondCode cc, ccZ;
981 if (imm0.reg.data.u32 != 0 || !si)
982 return;
983 cc = si->setCond;
984 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
985 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
986 // first.
987 if (s == 0)
988 ccZ = reverseCondCode(ccZ);
989 // If there is a negative modifier, we need to undo that, by flipping
990 // the comparison to zero.
991 if (i->src(t).mod.neg())
992 ccZ = reverseCondCode(ccZ);
993 // If this is a signed comparison, we expect the input to be a regular
994 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
995 // is positive, so just flip the sign.
996 if (i->sType == TYPE_S32) {
997 assert(!isFloatType(si->dType));
998 ccZ = reverseCondCode(ccZ);
999 }
1000 switch (ccZ) {
1001 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1002 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1003 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1004 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1005 case CC_GT: break; // bool > 0 -- bool
1006 case CC_NE: break; // bool != 0 -- bool
1007 default:
1008 return;
1009 }
1010
1011 // Update the condition of this SET to be identical to the origin set,
1012 // but with the updated condition code. The original SET should get
1013 // DCE'd, ideally.
1014 i->op = si->op;
1015 i->asCmp()->setCond = cc;
1016 i->setSrc(0, si->src(0));
1017 i->setSrc(1, si->src(1));
1018 if (si->srcExists(2))
1019 i->setSrc(2, si->src(2));
1020 i->sType = si->sType;
1021 }
1022 break;
1023
1024 case OP_AND:
1025 {
1026 Instruction *src = i->getSrc(t)->getInsn();
1027 ImmediateValue imm1;
1028 if (imm0.reg.data.u32 == 0) {
1029 i->op = OP_MOV;
1030 i->setSrc(0, new_ImmediateValue(prog, 0u));
1031 i->src(0).mod = Modifier(0);
1032 i->setSrc(1, NULL);
1033 } else if (imm0.reg.data.u32 == ~0U) {
1034 i->op = i->src(t).mod.getOp();
1035 if (t) {
1036 i->setSrc(0, i->getSrc(t));
1037 i->src(0).mod = i->src(t).mod;
1038 }
1039 i->setSrc(1, NULL);
1040 } else if (src->asCmp()) {
1041 CmpInstruction *cmp = src->asCmp();
1042 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1043 return;
1044 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1045 return;
1046 if (imm0.reg.data.f32 != 1.0)
1047 return;
1048 if (cmp->dType != TYPE_U32)
1049 return;
1050
1051 cmp->dType = TYPE_F32;
1052 if (i->src(t).mod != Modifier(0)) {
1053 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1054 i->src(t).mod = Modifier(0);
1055 cmp->setCond = inverseCondCode(cmp->setCond);
1056 }
1057 i->op = OP_MOV;
1058 i->setSrc(s, NULL);
1059 if (t) {
1060 i->setSrc(0, i->getSrc(t));
1061 i->setSrc(t, NULL);
1062 }
1063 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1064 src->op == OP_SHR &&
1065 src->src(1).getImmediate(imm1) &&
1066 i->src(t).mod == Modifier(0) &&
1067 util_is_power_of_two(imm0.reg.data.u32 + 1)) {
1068 // low byte = offset, high byte = width
1069 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1070 i->op = OP_EXTBF;
1071 i->setSrc(0, src->getSrc(0));
1072 i->setSrc(1, new_ImmediateValue(prog, ext));
1073 }
1074 }
1075 break;
1076
1077 case OP_SHL:
1078 {
1079 if (s != 1 || i->src(0).mod != Modifier(0))
1080 break;
1081 // try to concatenate shifts
1082 Instruction *si = i->getSrc(0)->getInsn();
1083 if (!si || si->op != OP_SHL)
1084 break;
1085 ImmediateValue imm1;
1086 if (si->src(1).getImmediate(imm1)) {
1087 bld.setPosition(i, false);
1088 i->setSrc(0, si->getSrc(0));
1089 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1090 }
1091 }
1092 break;
1093
1094 case OP_ABS:
1095 case OP_NEG:
1096 case OP_SAT:
1097 case OP_LG2:
1098 case OP_RCP:
1099 case OP_SQRT:
1100 case OP_RSQ:
1101 case OP_PRESIN:
1102 case OP_SIN:
1103 case OP_COS:
1104 case OP_PREEX2:
1105 case OP_EX2:
1106 unary(i, imm0);
1107 break;
1108 case OP_BFIND: {
1109 int32_t res;
1110 switch (i->dType) {
1111 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1112 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1113 default:
1114 return;
1115 }
1116 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1117 res = 31 - res;
1118 bld.setPosition(i, false); /* make sure bld is init'ed */
1119 i->setSrc(0, bld.mkImm(res));
1120 i->setSrc(1, NULL);
1121 i->op = OP_MOV;
1122 i->subOp = 0;
1123 break;
1124 }
1125 case OP_POPCNT: {
1126 // Only deal with 1-arg POPCNT here
1127 if (i->srcExists(1))
1128 break;
1129 uint32_t res = util_bitcount(imm0.reg.data.u32);
1130 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1131 i->setSrc(1, NULL);
1132 i->op = OP_MOV;
1133 break;
1134 }
1135 case OP_CVT: {
1136 Storage res;
1137
1138 // TODO: handle 64-bit values properly
1139 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1140 return;
1141
1142 // TODO: handle single byte/word extractions
1143 if (i->subOp)
1144 return;
1145
1146 bld.setPosition(i, true); /* make sure bld is init'ed */
1147
1148 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1149 case type: \
1150 switch (i->sType) { \
1151 case TYPE_F32: \
1152 res.data.dst = util_iround(i->saturate ? \
1153 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1154 imm0.reg.data.f32); \
1155 break; \
1156 case TYPE_S32: \
1157 res.data.dst = i->saturate ? \
1158 CLAMP(imm0.reg.data.s32, imin, imax) : \
1159 imm0.reg.data.s32; \
1160 break; \
1161 case TYPE_U32: \
1162 res.data.dst = i->saturate ? \
1163 CLAMP(imm0.reg.data.u32, umin, umax) : \
1164 imm0.reg.data.u32; \
1165 break; \
1166 case TYPE_S16: \
1167 res.data.dst = i->saturate ? \
1168 CLAMP(imm0.reg.data.s16, imin, imax) : \
1169 imm0.reg.data.s16; \
1170 break; \
1171 case TYPE_U16: \
1172 res.data.dst = i->saturate ? \
1173 CLAMP(imm0.reg.data.u16, umin, umax) : \
1174 imm0.reg.data.u16; \
1175 break; \
1176 default: return; \
1177 } \
1178 i->setSrc(0, bld.mkImm(res.data.dst)); \
1179 break
1180
1181 switch(i->dType) {
1182 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1183 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1184 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1185 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1186 case TYPE_F32:
1187 switch (i->sType) {
1188 case TYPE_F32:
1189 res.data.f32 = i->saturate ?
1190 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1191 imm0.reg.data.f32;
1192 break;
1193 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1194 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1195 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1196 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1197 default:
1198 return;
1199 }
1200 i->setSrc(0, bld.mkImm(res.data.f32));
1201 break;
1202 default:
1203 return;
1204 }
1205 #undef CASE
1206
1207 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1208 i->op = OP_MOV;
1209 i->saturate = 0;
1210 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1211 break;
1212 }
1213 default:
1214 return;
1215 }
1216 if (newi->op != op)
1217 foldCount++;
1218 }
1219
1220 // =============================================================================
1221
1222 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1223 class ModifierFolding : public Pass
1224 {
1225 private:
1226 virtual bool visit(BasicBlock *);
1227 };
1228
1229 bool
1230 ModifierFolding::visit(BasicBlock *bb)
1231 {
1232 const Target *target = prog->getTarget();
1233
1234 Instruction *i, *next, *mi;
1235 Modifier mod;
1236
1237 for (i = bb->getEntry(); i; i = next) {
1238 next = i->next;
1239
1240 if (0 && i->op == OP_SUB) {
1241 // turn "sub" into "add neg" (do we really want this ?)
1242 i->op = OP_ADD;
1243 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1244 }
1245
1246 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1247 mi = i->getSrc(s)->getInsn();
1248 if (!mi ||
1249 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1250 continue;
1251 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1252 if ((i->op != OP_ADD &&
1253 i->op != OP_MUL) ||
1254 (mi->op != OP_ABS &&
1255 mi->op != OP_NEG))
1256 continue;
1257 } else
1258 if (i->sType != mi->dType) {
1259 continue;
1260 }
1261 if ((mod = Modifier(mi->op)) == Modifier(0))
1262 continue;
1263 mod *= mi->src(0).mod;
1264
1265 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1266 // abs neg [abs] = abs
1267 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1268 } else
1269 if ((i->op == OP_NEG) && mod.neg()) {
1270 assert(s == 0);
1271 // neg as both opcode and modifier on same insn is prohibited
1272 // neg neg abs = abs, neg neg = identity
1273 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1274 i->op = mod.getOp();
1275 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1276 if (mod == Modifier(0))
1277 i->op = OP_MOV;
1278 }
1279
1280 if (target->isModSupported(i, s, mod)) {
1281 i->setSrc(s, mi->getSrc(0));
1282 i->src(s).mod *= mod;
1283 }
1284 }
1285
1286 if (i->op == OP_SAT) {
1287 mi = i->getSrc(0)->getInsn();
1288 if (mi &&
1289 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1290 mi->saturate = 1;
1291 mi->setDef(0, i->getDef(0));
1292 delete_Instruction(prog, i);
1293 }
1294 }
1295 }
1296
1297 return true;
1298 }
1299
1300 // =============================================================================
1301
1302 // MUL + ADD -> MAD/FMA
1303 // MIN/MAX(a, a) -> a, etc.
1304 // SLCT(a, b, const) -> cc(const) ? a : b
1305 // RCP(RCP(a)) -> a
1306 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1307 class AlgebraicOpt : public Pass
1308 {
1309 private:
1310 virtual bool visit(BasicBlock *);
1311
1312 void handleABS(Instruction *);
1313 bool handleADD(Instruction *);
1314 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1315 void handleMINMAX(Instruction *);
1316 void handleRCP(Instruction *);
1317 void handleSLCT(Instruction *);
1318 void handleLOGOP(Instruction *);
1319 void handleCVT_NEG(Instruction *);
1320 void handleCVT_EXTBF(Instruction *);
1321 void handleSUCLAMP(Instruction *);
1322
1323 BuildUtil bld;
1324 };
1325
1326 void
1327 AlgebraicOpt::handleABS(Instruction *abs)
1328 {
1329 Instruction *sub = abs->getSrc(0)->getInsn();
1330 DataType ty;
1331 if (!sub ||
1332 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1333 return;
1334 // expect not to have mods yet, if we do, bail
1335 if (sub->src(0).mod || sub->src(1).mod)
1336 return;
1337 // hidden conversion ?
1338 ty = intTypeToSigned(sub->dType);
1339 if (abs->dType != abs->sType || ty != abs->sType)
1340 return;
1341
1342 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1343 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1344 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1345 return;
1346
1347 Value *src0 = sub->getSrc(0);
1348 Value *src1 = sub->getSrc(1);
1349
1350 if (sub->op == OP_ADD) {
1351 Instruction *neg = sub->getSrc(1)->getInsn();
1352 if (neg && neg->op != OP_NEG) {
1353 neg = sub->getSrc(0)->getInsn();
1354 src0 = sub->getSrc(1);
1355 }
1356 if (!neg || neg->op != OP_NEG ||
1357 neg->dType != neg->sType || neg->sType != ty)
1358 return;
1359 src1 = neg->getSrc(0);
1360 }
1361
1362 // found ABS(SUB))
1363 abs->moveSources(1, 2); // move sources >=1 up by 2
1364 abs->op = OP_SAD;
1365 abs->setType(sub->dType);
1366 abs->setSrc(0, src0);
1367 abs->setSrc(1, src1);
1368 bld.setPosition(abs, false);
1369 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1370 }
1371
1372 bool
1373 AlgebraicOpt::handleADD(Instruction *add)
1374 {
1375 Value *src0 = add->getSrc(0);
1376 Value *src1 = add->getSrc(1);
1377
1378 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1379 return false;
1380
1381 bool changed = false;
1382 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1383 changed = tryADDToMADOrSAD(add, OP_MAD);
1384 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1385 changed = tryADDToMADOrSAD(add, OP_SAD);
1386 return changed;
1387 }
1388
1389 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1390 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1391 bool
1392 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1393 {
1394 Value *src0 = add->getSrc(0);
1395 Value *src1 = add->getSrc(1);
1396 Value *src;
1397 int s;
1398 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1399 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1400 Modifier mod[4];
1401
1402 if (src0->refCount() == 1 &&
1403 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1404 s = 0;
1405 else
1406 if (src1->refCount() == 1 &&
1407 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1408 s = 1;
1409 else
1410 return false;
1411
1412 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
1413 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
1414 return false;
1415
1416 src = add->getSrc(s);
1417
1418 if (src->getInsn()->postFactor)
1419 return false;
1420 if (toOp == OP_SAD) {
1421 ImmediateValue imm;
1422 if (!src->getInsn()->src(2).getImmediate(imm))
1423 return false;
1424 if (!imm.isInteger(0))
1425 return false;
1426 }
1427
1428 mod[0] = add->src(0).mod;
1429 mod[1] = add->src(1).mod;
1430 mod[2] = src->getUniqueInsn()->src(0).mod;
1431 mod[3] = src->getUniqueInsn()->src(1).mod;
1432
1433 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1434 return false;
1435
1436 add->op = toOp;
1437 add->subOp = src->getInsn()->subOp; // potentially mul-high
1438
1439 add->setSrc(2, add->src(s ? 0 : 1));
1440
1441 add->setSrc(0, src->getInsn()->getSrc(0));
1442 add->src(0).mod = mod[2] ^ mod[s];
1443 add->setSrc(1, src->getInsn()->getSrc(1));
1444 add->src(1).mod = mod[3];
1445
1446 return true;
1447 }
1448
1449 void
1450 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1451 {
1452 Value *src0 = minmax->getSrc(0);
1453 Value *src1 = minmax->getSrc(1);
1454
1455 if (src0 != src1 || src0->reg.file != FILE_GPR)
1456 return;
1457 if (minmax->src(0).mod == minmax->src(1).mod) {
1458 if (minmax->def(0).mayReplace(minmax->src(0))) {
1459 minmax->def(0).replace(minmax->src(0), false);
1460 minmax->bb->remove(minmax);
1461 } else {
1462 minmax->op = OP_CVT;
1463 minmax->setSrc(1, NULL);
1464 }
1465 } else {
1466 // TODO:
1467 // min(x, -x) = -abs(x)
1468 // min(x, -abs(x)) = -abs(x)
1469 // min(x, abs(x)) = x
1470 // max(x, -abs(x)) = x
1471 // max(x, abs(x)) = abs(x)
1472 // max(x, -x) = abs(x)
1473 }
1474 }
1475
1476 void
1477 AlgebraicOpt::handleRCP(Instruction *rcp)
1478 {
1479 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1480
1481 if (si && si->op == OP_RCP) {
1482 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1483 rcp->op = mod.getOp();
1484 rcp->setSrc(0, si->getSrc(0));
1485 }
1486 }
1487
1488 void
1489 AlgebraicOpt::handleSLCT(Instruction *slct)
1490 {
1491 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1492 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1493 slct->setSrc(0, slct->getSrc(1));
1494 } else
1495 if (slct->getSrc(0) != slct->getSrc(1)) {
1496 return;
1497 }
1498 slct->op = OP_MOV;
1499 slct->setSrc(1, NULL);
1500 slct->setSrc(2, NULL);
1501 }
1502
1503 void
1504 AlgebraicOpt::handleLOGOP(Instruction *logop)
1505 {
1506 Value *src0 = logop->getSrc(0);
1507 Value *src1 = logop->getSrc(1);
1508
1509 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1510 return;
1511
1512 if (src0 == src1) {
1513 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1514 logop->def(0).mayReplace(logop->src(0))) {
1515 logop->def(0).replace(logop->src(0), false);
1516 delete_Instruction(prog, logop);
1517 }
1518 } else {
1519 // try AND(SET, SET) -> SET_AND(SET)
1520 Instruction *set0 = src0->getInsn();
1521 Instruction *set1 = src1->getInsn();
1522
1523 if (!set0 || set0->fixed || !set1 || set1->fixed)
1524 return;
1525 if (set1->op != OP_SET) {
1526 Instruction *xchg = set0;
1527 set0 = set1;
1528 set1 = xchg;
1529 if (set1->op != OP_SET)
1530 return;
1531 }
1532 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1533 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1534 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1535 return;
1536 if (set0->op != OP_SET &&
1537 set0->op != OP_SET_AND &&
1538 set0->op != OP_SET_OR &&
1539 set0->op != OP_SET_XOR)
1540 return;
1541 if (set0->getDef(0)->refCount() > 1 &&
1542 set1->getDef(0)->refCount() > 1)
1543 return;
1544 if (set0->getPredicate() || set1->getPredicate())
1545 return;
1546 // check that they don't source each other
1547 for (int s = 0; s < 2; ++s)
1548 if (set0->getSrc(s) == set1->getDef(0) ||
1549 set1->getSrc(s) == set0->getDef(0))
1550 return;
1551
1552 set0 = cloneForward(func, set0);
1553 set1 = cloneShallow(func, set1);
1554 logop->bb->insertAfter(logop, set1);
1555 logop->bb->insertAfter(logop, set0);
1556
1557 set0->dType = TYPE_U8;
1558 set0->getDef(0)->reg.file = FILE_PREDICATE;
1559 set0->getDef(0)->reg.size = 1;
1560 set1->setSrc(2, set0->getDef(0));
1561 set1->op = redOp;
1562 set1->setDef(0, logop->getDef(0));
1563 delete_Instruction(prog, logop);
1564 }
1565 }
1566
1567 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1568 // nv50:
1569 // F2I(NEG(I2F(ABS(SET))))
1570 void
1571 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1572 {
1573 Instruction *insn = cvt->getSrc(0)->getInsn();
1574 if (cvt->sType != TYPE_F32 ||
1575 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1576 return;
1577 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1578 return;
1579 if (insn->src(0).mod != Modifier(0))
1580 return;
1581 insn = insn->getSrc(0)->getInsn();
1582
1583 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1584 if (insn && insn->op == OP_CVT &&
1585 insn->dType == TYPE_F32 &&
1586 insn->sType == TYPE_S32) {
1587 insn = insn->getSrc(0)->getInsn();
1588 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1589 insn->src(0).mod)
1590 return;
1591 insn = insn->getSrc(0)->getInsn();
1592 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1593 return;
1594 } else
1595 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1596 return;
1597 }
1598
1599 Instruction *bset = cloneShallow(func, insn);
1600 bset->dType = TYPE_U32;
1601 bset->setDef(0, cvt->getDef(0));
1602 cvt->bb->insertAfter(cvt, bset);
1603 delete_Instruction(prog, cvt);
1604 }
1605
1606 // Some shaders extract packed bytes out of words and convert them to
1607 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
1608 // nv50 for word sizes.
1609 //
1610 // CVT(EXTBF(x, byte/word))
1611 // CVT(AND(bytemask, x))
1612 // CVT(AND(bytemask, SHR(x, 8/16/24)))
1613 // CVT(SHR(x, 16/24))
1614 void
1615 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
1616 {
1617 Instruction *insn = cvt->getSrc(0)->getInsn();
1618 ImmediateValue imm;
1619 Value *arg = NULL;
1620 unsigned width, offset;
1621 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
1622 return;
1623 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
1624 width = (imm.reg.data.u32 >> 8) & 0xff;
1625 offset = imm.reg.data.u32 & 0xff;
1626 arg = insn->getSrc(0);
1627
1628 if (width != 8 && width != 16)
1629 return;
1630 if (width == 8 && offset & 0x7)
1631 return;
1632 if (width == 16 && offset & 0xf)
1633 return;
1634 } else if (insn->op == OP_AND) {
1635 int s;
1636 if (insn->src(0).getImmediate(imm))
1637 s = 0;
1638 else if (insn->src(1).getImmediate(imm))
1639 s = 1;
1640 else
1641 return;
1642
1643 if (imm.reg.data.u32 == 0xff)
1644 width = 8;
1645 else if (imm.reg.data.u32 == 0xffff)
1646 width = 16;
1647 else
1648 return;
1649
1650 arg = insn->getSrc(!s);
1651 Instruction *shift = arg->getInsn();
1652 offset = 0;
1653 if (shift && shift->op == OP_SHR &&
1654 shift->sType == cvt->sType &&
1655 shift->src(1).getImmediate(imm) &&
1656 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1657 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
1658 arg = shift->getSrc(0);
1659 offset = imm.reg.data.u32;
1660 }
1661 } else if (insn->op == OP_SHR &&
1662 insn->sType == cvt->sType &&
1663 insn->src(1).getImmediate(imm)) {
1664 arg = insn->getSrc(0);
1665 if (imm.reg.data.u32 == 24) {
1666 width = 8;
1667 offset = 24;
1668 } else if (imm.reg.data.u32 == 16) {
1669 width = 16;
1670 offset = 16;
1671 } else {
1672 return;
1673 }
1674 }
1675
1676 if (!arg)
1677 return;
1678
1679 // Irrespective of what came earlier, we can undo a shift on the argument
1680 // by adjusting the offset.
1681 Instruction *shift = arg->getInsn();
1682 if (shift && shift->op == OP_SHL &&
1683 shift->src(1).getImmediate(imm) &&
1684 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1685 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
1686 imm.reg.data.u32 <= offset) {
1687 arg = shift->getSrc(0);
1688 offset -= imm.reg.data.u32;
1689 }
1690
1691 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
1692 // annoying to detect them.
1693
1694 if (width == 8) {
1695 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
1696 } else {
1697 assert(width == 16);
1698 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
1699 }
1700 cvt->setSrc(0, arg);
1701 cvt->subOp = offset >> 3;
1702 }
1703
1704 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1705 void
1706 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1707 {
1708 ImmediateValue imm;
1709 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1710 int s;
1711 Instruction *add;
1712
1713 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1714
1715 // look for ADD (TODO: only count references by non-SUCLAMP)
1716 if (insn->getSrc(0)->refCount() > 1)
1717 return;
1718 add = insn->getSrc(0)->getInsn();
1719 if (!add || add->op != OP_ADD ||
1720 (add->dType != TYPE_U32 &&
1721 add->dType != TYPE_S32))
1722 return;
1723
1724 // look for immediate
1725 for (s = 0; s < 2; ++s)
1726 if (add->src(s).getImmediate(imm))
1727 break;
1728 if (s >= 2)
1729 return;
1730 s = s ? 0 : 1;
1731 // determine if immediate fits
1732 val += imm.reg.data.s32;
1733 if (val > 31 || val < -32)
1734 return;
1735 // determine if other addend fits
1736 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
1737 return;
1738
1739 bld.setPosition(insn, false); // make sure bld is init'ed
1740 // replace sources
1741 insn->setSrc(2, bld.mkImm(val));
1742 insn->setSrc(0, add->getSrc(s));
1743 }
1744
1745 bool
1746 AlgebraicOpt::visit(BasicBlock *bb)
1747 {
1748 Instruction *next;
1749 for (Instruction *i = bb->getEntry(); i; i = next) {
1750 next = i->next;
1751 switch (i->op) {
1752 case OP_ABS:
1753 handleABS(i);
1754 break;
1755 case OP_ADD:
1756 handleADD(i);
1757 break;
1758 case OP_RCP:
1759 handleRCP(i);
1760 break;
1761 case OP_MIN:
1762 case OP_MAX:
1763 handleMINMAX(i);
1764 break;
1765 case OP_SLCT:
1766 handleSLCT(i);
1767 break;
1768 case OP_AND:
1769 case OP_OR:
1770 case OP_XOR:
1771 handleLOGOP(i);
1772 break;
1773 case OP_CVT:
1774 handleCVT_NEG(i);
1775 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
1776 handleCVT_EXTBF(i);
1777 break;
1778 case OP_SUCLAMP:
1779 handleSUCLAMP(i);
1780 break;
1781 default:
1782 break;
1783 }
1784 }
1785
1786 return true;
1787 }
1788
1789 // =============================================================================
1790
1791 static inline void
1792 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1793 {
1794 if (offset != ldst->getSrc(0)->reg.data.offset) {
1795 if (ldst->getSrc(0)->refCount() > 1)
1796 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
1797 ldst->getSrc(0)->reg.data.offset = offset;
1798 }
1799 }
1800
1801 // Combine loads and stores, forward stores to loads where possible.
1802 class MemoryOpt : public Pass
1803 {
1804 private:
1805 class Record
1806 {
1807 public:
1808 Record *next;
1809 Instruction *insn;
1810 const Value *rel[2];
1811 const Value *base;
1812 int32_t offset;
1813 int8_t fileIndex;
1814 uint8_t size;
1815 bool locked;
1816 Record *prev;
1817
1818 bool overlaps(const Instruction *ldst) const;
1819
1820 inline void link(Record **);
1821 inline void unlink(Record **);
1822 inline void set(const Instruction *ldst);
1823 };
1824
1825 public:
1826 MemoryOpt();
1827
1828 Record *loads[DATA_FILE_COUNT];
1829 Record *stores[DATA_FILE_COUNT];
1830
1831 MemoryPool recordPool;
1832
1833 private:
1834 virtual bool visit(BasicBlock *);
1835 bool runOpt(BasicBlock *);
1836
1837 Record **getList(const Instruction *);
1838
1839 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1840
1841 // merge @insn into load/store instruction from @rec
1842 bool combineLd(Record *rec, Instruction *ld);
1843 bool combineSt(Record *rec, Instruction *st);
1844
1845 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1846 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1847 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1848
1849 void addRecord(Instruction *ldst);
1850 void purgeRecords(Instruction *const st, DataFile);
1851 void lockStores(Instruction *const ld);
1852 void reset();
1853
1854 private:
1855 Record *prevRecord;
1856 };
1857
1858 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1859 {
1860 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1861 loads[i] = NULL;
1862 stores[i] = NULL;
1863 }
1864 prevRecord = NULL;
1865 }
1866
1867 void
1868 MemoryOpt::reset()
1869 {
1870 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1871 Record *it, *next;
1872 for (it = loads[i]; it; it = next) {
1873 next = it->next;
1874 recordPool.release(it);
1875 }
1876 loads[i] = NULL;
1877 for (it = stores[i]; it; it = next) {
1878 next = it->next;
1879 recordPool.release(it);
1880 }
1881 stores[i] = NULL;
1882 }
1883 }
1884
1885 bool
1886 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1887 {
1888 int32_t offRc = rec->offset;
1889 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1890 int sizeRc = rec->size;
1891 int sizeLd = typeSizeof(ld->dType);
1892 int size = sizeRc + sizeLd;
1893 int d, j;
1894
1895 if (!prog->getTarget()->
1896 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1897 return false;
1898 // no unaligned loads
1899 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1900 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1901 return false;
1902
1903 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1904
1905 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1906
1907 if (offLd < offRc) {
1908 int sz;
1909 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1910 // d: nr of definitions in ld
1911 // j: nr of definitions in rec->insn, move:
1912 for (d = d + j - 1; j > 0; --j, --d)
1913 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1914
1915 if (rec->insn->getSrc(0)->refCount() > 1)
1916 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
1917 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1918
1919 d = 0;
1920 } else {
1921 d = j;
1922 }
1923 // move definitions of @ld to @rec->insn
1924 for (j = 0; sizeLd; ++j, ++d) {
1925 sizeLd -= ld->getDef(j)->reg.size;
1926 rec->insn->setDef(d, ld->getDef(j));
1927 }
1928
1929 rec->size = size;
1930 rec->insn->getSrc(0)->reg.size = size;
1931 rec->insn->setType(typeOfSize(size));
1932
1933 delete_Instruction(prog, ld);
1934
1935 return true;
1936 }
1937
1938 bool
1939 MemoryOpt::combineSt(Record *rec, Instruction *st)
1940 {
1941 int32_t offRc = rec->offset;
1942 int32_t offSt = st->getSrc(0)->reg.data.offset;
1943 int sizeRc = rec->size;
1944 int sizeSt = typeSizeof(st->dType);
1945 int s = sizeSt / 4;
1946 int size = sizeRc + sizeSt;
1947 int j, k;
1948 Value *src[4]; // no modifiers in ValueRef allowed for st
1949 Value *extra[3];
1950
1951 if (!prog->getTarget()->
1952 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
1953 return false;
1954 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1955 return false;
1956
1957 st->takeExtraSources(0, extra); // save predicate and indirect address
1958
1959 if (offRc < offSt) {
1960 // save values from @st
1961 for (s = 0; sizeSt; ++s) {
1962 sizeSt -= st->getSrc(s + 1)->reg.size;
1963 src[s] = st->getSrc(s + 1);
1964 }
1965 // set record's values as low sources of @st
1966 for (j = 1; sizeRc; ++j) {
1967 sizeRc -= rec->insn->getSrc(j)->reg.size;
1968 st->setSrc(j, rec->insn->getSrc(j));
1969 }
1970 // set saved values as high sources of @st
1971 for (k = j, j = 0; j < s; ++j)
1972 st->setSrc(k++, src[j]);
1973
1974 updateLdStOffset(st, offRc, func);
1975 } else {
1976 for (j = 1; sizeSt; ++j)
1977 sizeSt -= st->getSrc(j)->reg.size;
1978 for (s = 1; sizeRc; ++j, ++s) {
1979 sizeRc -= rec->insn->getSrc(s)->reg.size;
1980 st->setSrc(j, rec->insn->getSrc(s));
1981 }
1982 rec->offset = offSt;
1983 }
1984 st->putExtraSources(0, extra); // restore pointer and predicate
1985
1986 delete_Instruction(prog, rec->insn);
1987 rec->insn = st;
1988 rec->size = size;
1989 rec->insn->getSrc(0)->reg.size = size;
1990 rec->insn->setType(typeOfSize(size));
1991 return true;
1992 }
1993
1994 void
1995 MemoryOpt::Record::set(const Instruction *ldst)
1996 {
1997 const Symbol *mem = ldst->getSrc(0)->asSym();
1998 fileIndex = mem->reg.fileIndex;
1999 rel[0] = ldst->getIndirect(0, 0);
2000 rel[1] = ldst->getIndirect(0, 1);
2001 offset = mem->reg.data.offset;
2002 base = mem->getBase();
2003 size = typeSizeof(ldst->sType);
2004 }
2005
2006 void
2007 MemoryOpt::Record::link(Record **list)
2008 {
2009 next = *list;
2010 if (next)
2011 next->prev = this;
2012 prev = NULL;
2013 *list = this;
2014 }
2015
2016 void
2017 MemoryOpt::Record::unlink(Record **list)
2018 {
2019 if (next)
2020 next->prev = prev;
2021 if (prev)
2022 prev->next = next;
2023 else
2024 *list = next;
2025 }
2026
2027 MemoryOpt::Record **
2028 MemoryOpt::getList(const Instruction *insn)
2029 {
2030 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2031 return &loads[insn->src(0).getFile()];
2032 return &stores[insn->src(0).getFile()];
2033 }
2034
2035 void
2036 MemoryOpt::addRecord(Instruction *i)
2037 {
2038 Record **list = getList(i);
2039 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2040
2041 it->link(list);
2042 it->set(i);
2043 it->insn = i;
2044 it->locked = false;
2045 }
2046
2047 MemoryOpt::Record *
2048 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2049 {
2050 const Symbol *sym = insn->getSrc(0)->asSym();
2051 const int size = typeSizeof(insn->sType);
2052 Record *rec = NULL;
2053 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2054
2055 for (; it; it = it->next) {
2056 if (it->locked && insn->op != OP_LOAD)
2057 continue;
2058 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2059 it->rel[0] != insn->getIndirect(0, 0) ||
2060 it->fileIndex != sym->reg.fileIndex ||
2061 it->rel[1] != insn->getIndirect(0, 1))
2062 continue;
2063
2064 if (it->offset < sym->reg.data.offset) {
2065 if (it->offset + it->size >= sym->reg.data.offset) {
2066 isAdj = (it->offset + it->size == sym->reg.data.offset);
2067 if (!isAdj)
2068 return it;
2069 if (!(it->offset & 0x7))
2070 rec = it;
2071 }
2072 } else {
2073 isAdj = it->offset != sym->reg.data.offset;
2074 if (size <= it->size && !isAdj)
2075 return it;
2076 else
2077 if (!(sym->reg.data.offset & 0x7))
2078 if (it->offset - size <= sym->reg.data.offset)
2079 rec = it;
2080 }
2081 }
2082 return rec;
2083 }
2084
2085 bool
2086 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2087 {
2088 Instruction *st = rec->insn;
2089 int32_t offSt = rec->offset;
2090 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2091 int d, s;
2092
2093 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2094 offSt += st->getSrc(s)->reg.size;
2095 if (offSt != offLd)
2096 return false;
2097
2098 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2099 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2100 return false;
2101 if (st->getSrc(s)->reg.file != FILE_GPR)
2102 return false;
2103 ld->def(d).replace(st->src(s), false);
2104 }
2105 ld->bb->remove(ld);
2106 return true;
2107 }
2108
2109 bool
2110 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2111 {
2112 Instruction *ldR = rec->insn;
2113 int32_t offR = rec->offset;
2114 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2115 int dR, dE;
2116
2117 assert(offR <= offE);
2118 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2119 offR += ldR->getDef(dR)->reg.size;
2120 if (offR != offE)
2121 return false;
2122
2123 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2124 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2125 return false;
2126 ldE->def(dE).replace(ldR->getDef(dR), false);
2127 }
2128
2129 delete_Instruction(prog, ldE);
2130 return true;
2131 }
2132
2133 bool
2134 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2135 {
2136 const Instruction *const ri = rec->insn;
2137 Value *extra[3];
2138
2139 int32_t offS = st->getSrc(0)->reg.data.offset;
2140 int32_t offR = rec->offset;
2141 int32_t endS = offS + typeSizeof(st->dType);
2142 int32_t endR = offR + typeSizeof(ri->dType);
2143
2144 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2145
2146 st->takeExtraSources(0, extra);
2147
2148 if (offR < offS) {
2149 Value *vals[10];
2150 int s, n;
2151 int k = 0;
2152 // get non-replaced sources of ri
2153 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2154 vals[k++] = ri->getSrc(s);
2155 n = s;
2156 // get replaced sources of st
2157 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2158 vals[k++] = st->getSrc(s);
2159 // skip replaced sources of ri
2160 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2161 // get non-replaced sources after values covered by st
2162 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2163 vals[k++] = ri->getSrc(s);
2164 assert((unsigned int)k <= Elements(vals));
2165 for (s = 0; s < k; ++s)
2166 st->setSrc(s + 1, vals[s]);
2167 st->setSrc(0, ri->getSrc(0));
2168 } else
2169 if (endR > endS) {
2170 int j, s;
2171 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2172 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2173 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2174 st->setSrc(s++, ri->getSrc(j));
2175 }
2176 st->putExtraSources(0, extra);
2177
2178 delete_Instruction(prog, rec->insn);
2179
2180 rec->insn = st;
2181 rec->offset = st->getSrc(0)->reg.data.offset;
2182
2183 st->setType(typeOfSize(rec->size));
2184
2185 return true;
2186 }
2187
2188 bool
2189 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2190 {
2191 Record that;
2192 that.set(ldst);
2193
2194 if (this->fileIndex != that.fileIndex)
2195 return false;
2196
2197 if (this->rel[0] || that.rel[0])
2198 return this->base == that.base;
2199 return
2200 (this->offset < that.offset + that.size) &&
2201 (this->offset + this->size > that.offset);
2202 }
2203
2204 // We must not eliminate stores that affect the result of @ld if
2205 // we find later stores to the same location, and we may no longer
2206 // merge them with later stores.
2207 // The stored value can, however, still be used to determine the value
2208 // returned by future loads.
2209 void
2210 MemoryOpt::lockStores(Instruction *const ld)
2211 {
2212 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2213 if (!r->locked && r->overlaps(ld))
2214 r->locked = true;
2215 }
2216
2217 // Prior loads from the location of @st are no longer valid.
2218 // Stores to the location of @st may no longer be used to derive
2219 // the value at it nor be coalesced into later stores.
2220 void
2221 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
2222 {
2223 if (st)
2224 f = st->src(0).getFile();
2225
2226 for (Record *r = loads[f]; r; r = r->next)
2227 if (!st || r->overlaps(st))
2228 r->unlink(&loads[f]);
2229
2230 for (Record *r = stores[f]; r; r = r->next)
2231 if (!st || r->overlaps(st))
2232 r->unlink(&stores[f]);
2233 }
2234
2235 bool
2236 MemoryOpt::visit(BasicBlock *bb)
2237 {
2238 bool ret = runOpt(bb);
2239 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
2240 // where 96 bit memory operations are forbidden.
2241 if (ret)
2242 ret = runOpt(bb);
2243 return ret;
2244 }
2245
2246 bool
2247 MemoryOpt::runOpt(BasicBlock *bb)
2248 {
2249 Instruction *ldst, *next;
2250 Record *rec;
2251 bool isAdjacent = true;
2252
2253 for (ldst = bb->getEntry(); ldst; ldst = next) {
2254 bool keep = true;
2255 bool isLoad = true;
2256 next = ldst->next;
2257
2258 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
2259 if (ldst->isDead()) {
2260 // might have been produced by earlier optimization
2261 delete_Instruction(prog, ldst);
2262 continue;
2263 }
2264 } else
2265 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
2266 isLoad = false;
2267 } else {
2268 // TODO: maybe have all fixed ops act as barrier ?
2269 if (ldst->op == OP_CALL ||
2270 ldst->op == OP_BAR ||
2271 ldst->op == OP_MEMBAR) {
2272 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2273 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2274 purgeRecords(NULL, FILE_MEMORY_SHARED);
2275 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2276 } else
2277 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
2278 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
2279 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2280 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2281 purgeRecords(NULL, FILE_MEMORY_SHARED);
2282 } else {
2283 purgeRecords(NULL, ldst->src(0).getFile());
2284 }
2285 } else
2286 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
2287 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2288 }
2289 continue;
2290 }
2291 if (ldst->getPredicate()) // TODO: handle predicated ld/st
2292 continue;
2293 if (ldst->perPatch) // TODO: create separate per-patch lists
2294 continue;
2295
2296 if (isLoad) {
2297 DataFile file = ldst->src(0).getFile();
2298
2299 // if ld l[]/g[] look for previous store to eliminate the reload
2300 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
2301 // TODO: shared memory ?
2302 rec = findRecord(ldst, false, isAdjacent);
2303 if (rec && !isAdjacent)
2304 keep = !replaceLdFromSt(ldst, rec);
2305 }
2306
2307 // or look for ld from the same location and replace this one
2308 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
2309 if (rec) {
2310 if (!isAdjacent)
2311 keep = !replaceLdFromLd(ldst, rec);
2312 else
2313 // or combine a previous load with this one
2314 keep = !combineLd(rec, ldst);
2315 }
2316 if (keep)
2317 lockStores(ldst);
2318 } else {
2319 rec = findRecord(ldst, false, isAdjacent);
2320 if (rec) {
2321 if (!isAdjacent)
2322 keep = !replaceStFromSt(ldst, rec);
2323 else
2324 keep = !combineSt(rec, ldst);
2325 }
2326 if (keep)
2327 purgeRecords(ldst, DATA_FILE_COUNT);
2328 }
2329 if (keep)
2330 addRecord(ldst);
2331 }
2332 reset();
2333
2334 return true;
2335 }
2336
2337 // =============================================================================
2338
2339 // Turn control flow into predicated instructions (after register allocation !).
2340 // TODO:
2341 // Could move this to before register allocation on NVC0 and also handle nested
2342 // constructs.
2343 class FlatteningPass : public Pass
2344 {
2345 private:
2346 virtual bool visit(BasicBlock *);
2347
2348 bool tryPredicateConditional(BasicBlock *);
2349 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2350 void tryPropagateBranch(BasicBlock *);
2351 inline bool isConstantCondition(Value *pred);
2352 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2353 inline void removeFlow(Instruction *);
2354 };
2355
2356 bool
2357 FlatteningPass::isConstantCondition(Value *pred)
2358 {
2359 Instruction *insn = pred->getUniqueInsn();
2360 assert(insn);
2361 if (insn->op != OP_SET || insn->srcExists(2))
2362 return false;
2363
2364 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2365 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2366 DataFile file;
2367 if (ld) {
2368 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2369 return false;
2370 if (ld->src(0).isIndirect(0))
2371 return false;
2372 file = ld->src(0).getFile();
2373 } else {
2374 file = insn->src(s).getFile();
2375 // catch $r63 on NVC0
2376 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
2377 file = FILE_IMMEDIATE;
2378 }
2379 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2380 return false;
2381 }
2382 return true;
2383 }
2384
2385 void
2386 FlatteningPass::removeFlow(Instruction *insn)
2387 {
2388 FlowInstruction *term = insn ? insn->asFlow() : NULL;
2389 if (!term)
2390 return;
2391 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
2392
2393 if (term->op == OP_BRA) {
2394 // TODO: this might get more difficult when we get arbitrary BRAs
2395 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
2396 return;
2397 } else
2398 if (term->op != OP_JOIN)
2399 return;
2400
2401 Value *pred = term->getPredicate();
2402
2403 delete_Instruction(prog, term);
2404
2405 if (pred && pred->refCount() == 0) {
2406 Instruction *pSet = pred->getUniqueInsn();
2407 pred->join->reg.data.id = -1; // deallocate
2408 if (pSet->isDead())
2409 delete_Instruction(prog, pSet);
2410 }
2411 }
2412
2413 void
2414 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
2415 {
2416 for (Instruction *i = bb->getEntry(); i; i = i->next) {
2417 if (i->isNop())
2418 continue;
2419 assert(!i->getPredicate());
2420 i->setPredicate(cc, pred);
2421 }
2422 removeFlow(bb->getExit());
2423 }
2424
2425 bool
2426 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
2427 {
2428 if (insn->isPseudo())
2429 return true;
2430 // TODO: calls where we don't know which registers are modified
2431
2432 if (!prog->getTarget()->mayPredicate(insn, pred))
2433 return false;
2434 for (int d = 0; insn->defExists(d); ++d)
2435 if (insn->getDef(d)->equals(pred))
2436 return false;
2437 return true;
2438 }
2439
2440 // If we jump to BRA/RET/EXIT, replace the jump with it.
2441 // NOTE: We do not update the CFG anymore here !
2442 //
2443 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2444 // BB:0
2445 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2446 // BB1:
2447 // bra BB:3
2448 // BB2:
2449 // ...
2450 // BB3:
2451 // ...
2452 void
2453 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
2454 {
2455 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
2456 BasicBlock *bf = i->asFlow()->target.bb;
2457
2458 if (bf->getInsnCount() != 1)
2459 continue;
2460
2461 FlowInstruction *bra = i->asFlow();
2462 FlowInstruction *rep = bf->getExit()->asFlow();
2463
2464 if (!rep || rep->getPredicate())
2465 continue;
2466 if (rep->op != OP_BRA &&
2467 rep->op != OP_JOIN &&
2468 rep->op != OP_EXIT)
2469 continue;
2470
2471 // TODO: If there are multiple branches to @rep, only the first would
2472 // be replaced, so only remove them after this pass is done ?
2473 // Also, need to check all incident blocks for fall-through exits and
2474 // add the branch there.
2475 bra->op = rep->op;
2476 bra->target.bb = rep->target.bb;
2477 if (bf->cfg.incidentCount() == 1)
2478 bf->remove(rep);
2479 }
2480 }
2481
2482 bool
2483 FlatteningPass::visit(BasicBlock *bb)
2484 {
2485 if (tryPredicateConditional(bb))
2486 return true;
2487
2488 // try to attach join to previous instruction
2489 if (prog->getTarget()->hasJoin) {
2490 Instruction *insn = bb->getExit();
2491 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2492 insn = insn->prev;
2493 if (insn && !insn->getPredicate() &&
2494 !insn->asFlow() &&
2495 insn->op != OP_TEXBAR &&
2496 !isTextureOp(insn->op) && // probably just nve4
2497 !isSurfaceOp(insn->op) && // not confirmed
2498 insn->op != OP_LINTERP && // probably just nve4
2499 insn->op != OP_PINTERP && // probably just nve4
2500 ((insn->op != OP_LOAD && insn->op != OP_STORE) ||
2501 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
2502 !insn->isNop()) {
2503 insn->join = 1;
2504 bb->remove(bb->getExit());
2505 return true;
2506 }
2507 }
2508 }
2509
2510 tryPropagateBranch(bb);
2511
2512 return true;
2513 }
2514
2515 bool
2516 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2517 {
2518 BasicBlock *bL = NULL, *bR = NULL;
2519 unsigned int nL = 0, nR = 0, limit = 12;
2520 Instruction *insn;
2521 unsigned int mask;
2522
2523 mask = bb->initiatesSimpleConditional();
2524 if (!mask)
2525 return false;
2526
2527 assert(bb->getExit());
2528 Value *pred = bb->getExit()->getPredicate();
2529 assert(pred);
2530
2531 if (isConstantCondition(pred))
2532 limit = 4;
2533
2534 Graph::EdgeIterator ei = bb->cfg.outgoing();
2535
2536 if (mask & 1) {
2537 bL = BasicBlock::get(ei.getNode());
2538 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2539 if (!mayPredicate(insn, pred))
2540 return false;
2541 if (nL > limit)
2542 return false; // too long, do a real branch
2543 }
2544 ei.next();
2545
2546 if (mask & 2) {
2547 bR = BasicBlock::get(ei.getNode());
2548 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2549 if (!mayPredicate(insn, pred))
2550 return false;
2551 if (nR > limit)
2552 return false; // too long, do a real branch
2553 }
2554
2555 if (bL)
2556 predicateInstructions(bL, pred, bb->getExit()->cc);
2557 if (bR)
2558 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2559
2560 if (bb->joinAt) {
2561 bb->remove(bb->joinAt);
2562 bb->joinAt = NULL;
2563 }
2564 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2565
2566 // remove potential join operations at the end of the conditional
2567 if (prog->getTarget()->joinAnterior) {
2568 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2569 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2570 removeFlow(bb->getEntry());
2571 }
2572
2573 return true;
2574 }
2575
2576 // =============================================================================
2577
2578 // Fold Immediate into MAD; must be done after register allocation due to
2579 // constraint SDST == SSRC2
2580 // TODO:
2581 // Does NVC0+ have other situations where this pass makes sense?
2582 class NV50PostRaConstantFolding : public Pass
2583 {
2584 private:
2585 virtual bool visit(BasicBlock *);
2586 };
2587
2588 bool
2589 NV50PostRaConstantFolding::visit(BasicBlock *bb)
2590 {
2591 Value *vtmp;
2592 Instruction *def;
2593
2594 for (Instruction *i = bb->getFirst(); i; i = i->next) {
2595 switch (i->op) {
2596 case OP_MAD:
2597 if (i->def(0).getFile() != FILE_GPR ||
2598 i->src(0).getFile() != FILE_GPR ||
2599 i->src(1).getFile() != FILE_GPR ||
2600 i->src(2).getFile() != FILE_GPR ||
2601 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id ||
2602 !isFloatType(i->dType))
2603 break;
2604
2605 if (i->getDef(0)->reg.data.id >= 64 ||
2606 i->getSrc(0)->reg.data.id >= 64)
2607 break;
2608
2609 def = i->getSrc(1)->getInsn();
2610 if (def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
2611 vtmp = i->getSrc(1);
2612 i->setSrc(1, def->getSrc(0));
2613
2614 /* There's no post-RA dead code elimination, so do it here
2615 * XXX: if we add more code-removing post-RA passes, we might
2616 * want to create a post-RA dead-code elim pass */
2617 if (vtmp->refCount() == 0)
2618 delete_Instruction(bb->getProgram(), def);
2619
2620 break;
2621 }
2622 break;
2623 default:
2624 break;
2625 }
2626 }
2627
2628 return true;
2629 }
2630
2631 // =============================================================================
2632
2633 // Common subexpression elimination. Stupid O^2 implementation.
2634 class LocalCSE : public Pass
2635 {
2636 private:
2637 virtual bool visit(BasicBlock *);
2638
2639 inline bool tryReplace(Instruction **, Instruction *);
2640
2641 DLList ops[OP_LAST + 1];
2642 };
2643
2644 class GlobalCSE : public Pass
2645 {
2646 private:
2647 virtual bool visit(BasicBlock *);
2648 };
2649
2650 bool
2651 Instruction::isActionEqual(const Instruction *that) const
2652 {
2653 if (this->op != that->op ||
2654 this->dType != that->dType ||
2655 this->sType != that->sType)
2656 return false;
2657 if (this->cc != that->cc)
2658 return false;
2659
2660 if (this->asTex()) {
2661 if (memcmp(&this->asTex()->tex,
2662 &that->asTex()->tex,
2663 sizeof(this->asTex()->tex)))
2664 return false;
2665 } else
2666 if (this->asCmp()) {
2667 if (this->asCmp()->setCond != that->asCmp()->setCond)
2668 return false;
2669 } else
2670 if (this->asFlow()) {
2671 return false;
2672 } else {
2673 if (this->ipa != that->ipa ||
2674 this->lanes != that->lanes ||
2675 this->perPatch != that->perPatch)
2676 return false;
2677 if (this->postFactor != that->postFactor)
2678 return false;
2679 }
2680
2681 if (this->subOp != that->subOp ||
2682 this->saturate != that->saturate ||
2683 this->rnd != that->rnd ||
2684 this->ftz != that->ftz ||
2685 this->dnz != that->dnz ||
2686 this->cache != that->cache ||
2687 this->mask != that->mask)
2688 return false;
2689
2690 return true;
2691 }
2692
2693 bool
2694 Instruction::isResultEqual(const Instruction *that) const
2695 {
2696 unsigned int d, s;
2697
2698 // NOTE: location of discard only affects tex with liveOnly and quadops
2699 if (!this->defExists(0) && this->op != OP_DISCARD)
2700 return false;
2701
2702 if (!isActionEqual(that))
2703 return false;
2704
2705 if (this->predSrc != that->predSrc)
2706 return false;
2707
2708 for (d = 0; this->defExists(d); ++d) {
2709 if (!that->defExists(d) ||
2710 !this->getDef(d)->equals(that->getDef(d), false))
2711 return false;
2712 }
2713 if (that->defExists(d))
2714 return false;
2715
2716 for (s = 0; this->srcExists(s); ++s) {
2717 if (!that->srcExists(s))
2718 return false;
2719 if (this->src(s).mod != that->src(s).mod)
2720 return false;
2721 if (!this->getSrc(s)->equals(that->getSrc(s), true))
2722 return false;
2723 }
2724 if (that->srcExists(s))
2725 return false;
2726
2727 if (op == OP_LOAD || op == OP_VFETCH) {
2728 switch (src(0).getFile()) {
2729 case FILE_MEMORY_CONST:
2730 case FILE_SHADER_INPUT:
2731 return true;
2732 case FILE_SHADER_OUTPUT:
2733 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
2734 default:
2735 return false;
2736 }
2737 }
2738
2739 return true;
2740 }
2741
2742 // pull through common expressions from different in-blocks
2743 bool
2744 GlobalCSE::visit(BasicBlock *bb)
2745 {
2746 Instruction *phi, *next, *ik;
2747 int s;
2748
2749 // TODO: maybe do this with OP_UNION, too
2750
2751 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2752 next = phi->next;
2753 if (phi->getSrc(0)->refCount() > 1)
2754 continue;
2755 ik = phi->getSrc(0)->getInsn();
2756 if (!ik)
2757 continue; // probably a function input
2758 for (s = 1; phi->srcExists(s); ++s) {
2759 if (phi->getSrc(s)->refCount() > 1)
2760 break;
2761 if (!phi->getSrc(s)->getInsn() ||
2762 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
2763 break;
2764 }
2765 if (!phi->srcExists(s)) {
2766 Instruction *entry = bb->getEntry();
2767 ik->bb->remove(ik);
2768 if (!entry || entry->op != OP_JOIN)
2769 bb->insertHead(ik);
2770 else
2771 bb->insertAfter(entry, ik);
2772 ik->setDef(0, phi->getDef(0));
2773 delete_Instruction(prog, phi);
2774 }
2775 }
2776
2777 return true;
2778 }
2779
2780 bool
2781 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2782 {
2783 Instruction *old = *ptr;
2784
2785 // TODO: maybe relax this later (causes trouble with OP_UNION)
2786 if (i->isPredicated())
2787 return false;
2788
2789 if (!old->isResultEqual(i))
2790 return false;
2791
2792 for (int d = 0; old->defExists(d); ++d)
2793 old->def(d).replace(i->getDef(d), false);
2794 delete_Instruction(prog, old);
2795 *ptr = NULL;
2796 return true;
2797 }
2798
2799 bool
2800 LocalCSE::visit(BasicBlock *bb)
2801 {
2802 unsigned int replaced;
2803
2804 do {
2805 Instruction *ir, *next;
2806
2807 replaced = 0;
2808
2809 // will need to know the order of instructions
2810 int serial = 0;
2811 for (ir = bb->getFirst(); ir; ir = ir->next)
2812 ir->serial = serial++;
2813
2814 for (ir = bb->getEntry(); ir; ir = next) {
2815 int s;
2816 Value *src = NULL;
2817
2818 next = ir->next;
2819
2820 if (ir->fixed) {
2821 ops[ir->op].insert(ir);
2822 continue;
2823 }
2824
2825 for (s = 0; ir->srcExists(s); ++s)
2826 if (ir->getSrc(s)->asLValue())
2827 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2828 src = ir->getSrc(s);
2829
2830 if (src) {
2831 for (Value::UseIterator it = src->uses.begin();
2832 it != src->uses.end(); ++it) {
2833 Instruction *ik = (*it)->getInsn();
2834 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
2835 if (tryReplace(&ir, ik))
2836 break;
2837 }
2838 } else {
2839 DLLIST_FOR_EACH(&ops[ir->op], iter)
2840 {
2841 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2842 if (tryReplace(&ir, ik))
2843 break;
2844 }
2845 }
2846
2847 if (ir)
2848 ops[ir->op].insert(ir);
2849 else
2850 ++replaced;
2851 }
2852 for (unsigned int i = 0; i <= OP_LAST; ++i)
2853 ops[i].clear();
2854
2855 } while (replaced);
2856
2857 return true;
2858 }
2859
2860 // =============================================================================
2861
2862 // Remove computations of unused values.
2863 class DeadCodeElim : public Pass
2864 {
2865 public:
2866 bool buryAll(Program *);
2867
2868 private:
2869 virtual bool visit(BasicBlock *);
2870
2871 void checkSplitLoad(Instruction *ld); // for partially dead loads
2872
2873 unsigned int deadCount;
2874 };
2875
2876 bool
2877 DeadCodeElim::buryAll(Program *prog)
2878 {
2879 do {
2880 deadCount = 0;
2881 if (!this->run(prog, false, false))
2882 return false;
2883 } while (deadCount);
2884
2885 return true;
2886 }
2887
2888 bool
2889 DeadCodeElim::visit(BasicBlock *bb)
2890 {
2891 Instruction *next;
2892
2893 for (Instruction *i = bb->getFirst(); i; i = next) {
2894 next = i->next;
2895 if (i->isDead()) {
2896 ++deadCount;
2897 delete_Instruction(prog, i);
2898 } else
2899 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2900 checkSplitLoad(i);
2901 } else
2902 if (i->defExists(0) && !i->getDef(0)->refCount()) {
2903 if (i->op == OP_ATOM ||
2904 i->op == OP_SUREDP ||
2905 i->op == OP_SUREDB)
2906 i->setDef(0, NULL);
2907 }
2908 }
2909 return true;
2910 }
2911
2912 void
2913 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2914 {
2915 Instruction *ld2 = NULL; // can get at most 2 loads
2916 Value *def1[4];
2917 Value *def2[4];
2918 int32_t addr1, addr2;
2919 int32_t size1, size2;
2920 int d, n1, n2;
2921 uint32_t mask = 0xffffffff;
2922
2923 for (d = 0; ld1->defExists(d); ++d)
2924 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2925 mask &= ~(1 << d);
2926 if (mask == 0xffffffff)
2927 return;
2928
2929 addr1 = ld1->getSrc(0)->reg.data.offset;
2930 n1 = n2 = 0;
2931 size1 = size2 = 0;
2932 for (d = 0; ld1->defExists(d); ++d) {
2933 if (mask & (1 << d)) {
2934 if (size1 && (addr1 & 0x7))
2935 break;
2936 def1[n1] = ld1->getDef(d);
2937 size1 += def1[n1++]->reg.size;
2938 } else
2939 if (!n1) {
2940 addr1 += ld1->getDef(d)->reg.size;
2941 } else {
2942 break;
2943 }
2944 }
2945 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2946 if (mask & (1 << d)) {
2947 def2[n2] = ld1->getDef(d);
2948 size2 += def2[n2++]->reg.size;
2949 } else {
2950 assert(!n2);
2951 addr2 += ld1->getDef(d)->reg.size;
2952 }
2953 }
2954
2955 updateLdStOffset(ld1, addr1, func);
2956 ld1->setType(typeOfSize(size1));
2957 for (d = 0; d < 4; ++d)
2958 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2959
2960 if (!n2)
2961 return;
2962
2963 ld2 = cloneShallow(func, ld1);
2964 updateLdStOffset(ld2, addr2, func);
2965 ld2->setType(typeOfSize(size2));
2966 for (d = 0; d < 4; ++d)
2967 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2968
2969 ld1->bb->insertAfter(ld1, ld2);
2970 }
2971
2972 // =============================================================================
2973
2974 #define RUN_PASS(l, n, f) \
2975 if (level >= (l)) { \
2976 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2977 INFO("PEEPHOLE: %s\n", #n); \
2978 n pass; \
2979 if (!pass.f(this)) \
2980 return false; \
2981 }
2982
2983 bool
2984 Program::optimizeSSA(int level)
2985 {
2986 RUN_PASS(1, DeadCodeElim, buryAll);
2987 RUN_PASS(1, CopyPropagation, run);
2988 RUN_PASS(1, MergeSplits, run);
2989 RUN_PASS(2, GlobalCSE, run);
2990 RUN_PASS(1, LocalCSE, run);
2991 RUN_PASS(2, AlgebraicOpt, run);
2992 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2993 RUN_PASS(1, ConstantFolding, foldAll);
2994 RUN_PASS(1, LoadPropagation, run);
2995 RUN_PASS(2, MemoryOpt, run);
2996 RUN_PASS(2, LocalCSE, run);
2997 RUN_PASS(0, DeadCodeElim, buryAll);
2998
2999 return true;
3000 }
3001
3002 bool
3003 Program::optimizePostRA(int level)
3004 {
3005 RUN_PASS(2, FlatteningPass, run);
3006 if (getTarget()->getChipset() < 0xc0)
3007 RUN_PASS(2, NV50PostRaConstantFolding, run);
3008
3009 return true;
3010 }
3011
3012 }