nvc0: do not set tiled mode on gart bo when fence debugging is used
[mesa.git] / src / gallium / drivers / nv50 / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nv50_ir.h"
24 #include "nv50_ir_target.h"
25 #include "nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class LoadPropagation : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125
126 void checkSwapSrc01(Instruction *);
127
128 bool isCSpaceLoad(Instruction *);
129 bool isImmd32Load(Instruction *);
130 bool isAttribOrSharedLoad(Instruction *);
131 };
132
133 bool
134 LoadPropagation::isCSpaceLoad(Instruction *ld)
135 {
136 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
137 }
138
139 bool
140 LoadPropagation::isImmd32Load(Instruction *ld)
141 {
142 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
143 return false;
144 return ld->src(0).getFile() == FILE_IMMEDIATE;
145 }
146
147 bool
148 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
149 {
150 return ld &&
151 (ld->op == OP_VFETCH ||
152 (ld->op == OP_LOAD &&
153 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
154 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
155 }
156
157 void
158 LoadPropagation::checkSwapSrc01(Instruction *insn)
159 {
160 if (!prog->getTarget()->getOpInfo(insn).commutative)
161 if (insn->op != OP_SET && insn->op != OP_SLCT)
162 return;
163 if (insn->src(1).getFile() != FILE_GPR)
164 return;
165
166 Instruction *i0 = insn->getSrc(0)->getInsn();
167 Instruction *i1 = insn->getSrc(1)->getInsn();
168
169 if (isCSpaceLoad(i0)) {
170 if (!isCSpaceLoad(i1))
171 insn->swapSources(0, 1);
172 else
173 return;
174 } else
175 if (isImmd32Load(i0)) {
176 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
177 insn->swapSources(0, 1);
178 else
179 return;
180 } else
181 if (isAttribOrSharedLoad(i1)) {
182 if (!isAttribOrSharedLoad(i0))
183 insn->swapSources(0, 1);
184 else
185 return;
186 } else {
187 return;
188 }
189
190 if (insn->op == OP_SET)
191 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
192 else
193 if (insn->op == OP_SLCT)
194 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
195 }
196
197 bool
198 LoadPropagation::visit(BasicBlock *bb)
199 {
200 const Target *targ = prog->getTarget();
201 Instruction *next;
202
203 for (Instruction *i = bb->getEntry(); i; i = next) {
204 next = i->next;
205
206 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
207 continue;
208
209 if (i->srcExists(1))
210 checkSwapSrc01(i);
211
212 for (int s = 0; i->srcExists(s); ++s) {
213 Instruction *ld = i->getSrc(s)->getInsn();
214
215 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
216 continue;
217 if (!targ->insnCanLoad(i, s, ld))
218 continue;
219
220 // propagate !
221 i->setSrc(s, ld->getSrc(0));
222 if (ld->src(0).isIndirect(0))
223 i->setIndirect(s, 0, ld->getIndirect(0, 0));
224
225 if (ld->getDef(0)->refCount() == 0)
226 delete_Instruction(prog, ld);
227 }
228 }
229 return true;
230 }
231
232 // =============================================================================
233
234 // Evaluate constant expressions.
235 class ConstantFolding : public Pass
236 {
237 public:
238 bool foldAll(Program *);
239
240 private:
241 virtual bool visit(BasicBlock *);
242
243 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
244 void opnd(Instruction *, ImmediateValue&, int s);
245
246 void unary(Instruction *, const ImmediateValue&);
247
248 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
249
250 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
251 CmpInstruction *findOriginForTestWithZero(Value *);
252
253 unsigned int foldCount;
254
255 BuildUtil bld;
256 };
257
258 // TODO: remember generated immediates and only revisit these
259 bool
260 ConstantFolding::foldAll(Program *prog)
261 {
262 unsigned int iterCount = 0;
263 do {
264 foldCount = 0;
265 if (!run(prog))
266 return false;
267 } while (foldCount && ++iterCount < 2);
268 return true;
269 }
270
271 bool
272 ConstantFolding::visit(BasicBlock *bb)
273 {
274 Instruction *i, *next;
275
276 for (i = bb->getEntry(); i; i = next) {
277 next = i->next;
278 if (i->op == OP_MOV || i->op == OP_CALL)
279 continue;
280
281 ImmediateValue src0, src1;
282
283 if (i->srcExists(1) &&
284 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
285 expr(i, src0, src1);
286 else
287 if (i->srcExists(0) && i->src(0).getImmediate(src0))
288 opnd(i, src0, 0);
289 else
290 if (i->srcExists(1) && i->src(1).getImmediate(src1))
291 opnd(i, src1, 1);
292 }
293 return true;
294 }
295
296 CmpInstruction *
297 ConstantFolding::findOriginForTestWithZero(Value *value)
298 {
299 if (!value)
300 return NULL;
301 Instruction *insn = value->getInsn();
302
303 while (insn && insn->op != OP_SET) {
304 Instruction *next = NULL;
305 switch (insn->op) {
306 case OP_NEG:
307 case OP_ABS:
308 case OP_CVT:
309 next = insn->getSrc(0)->getInsn();
310 if (insn->sType != next->dType)
311 return NULL;
312 break;
313 case OP_MOV:
314 next = insn->getSrc(0)->getInsn();
315 break;
316 default:
317 return NULL;
318 }
319 insn = next;
320 }
321 return insn ? insn->asCmp() : NULL;
322 }
323
324 void
325 Modifier::applyTo(ImmediateValue& imm) const
326 {
327 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
328 return;
329 switch (imm.reg.type) {
330 case TYPE_F32:
331 if (bits & NV50_IR_MOD_ABS)
332 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
333 if (bits & NV50_IR_MOD_NEG)
334 imm.reg.data.f32 = -imm.reg.data.f32;
335 if (bits & NV50_IR_MOD_SAT) {
336 if (imm.reg.data.f32 < 0.0f)
337 imm.reg.data.f32 = 0.0f;
338 else
339 if (imm.reg.data.f32 > 1.0f)
340 imm.reg.data.f32 = 1.0f;
341 }
342 assert(!(bits & NV50_IR_MOD_NOT));
343 break;
344
345 case TYPE_S8: // NOTE: will be extended
346 case TYPE_S16:
347 case TYPE_S32:
348 case TYPE_U8: // NOTE: treated as signed
349 case TYPE_U16:
350 case TYPE_U32:
351 if (bits & NV50_IR_MOD_ABS)
352 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
353 imm.reg.data.s32 : -imm.reg.data.s32;
354 if (bits & NV50_IR_MOD_NEG)
355 imm.reg.data.s32 = -imm.reg.data.s32;
356 if (bits & NV50_IR_MOD_NOT)
357 imm.reg.data.s32 = ~imm.reg.data.s32;
358 break;
359
360 case TYPE_F64:
361 if (bits & NV50_IR_MOD_ABS)
362 imm.reg.data.f64 = fabs(imm.reg.data.f64);
363 if (bits & NV50_IR_MOD_NEG)
364 imm.reg.data.f64 = -imm.reg.data.f64;
365 if (bits & NV50_IR_MOD_SAT) {
366 if (imm.reg.data.f64 < 0.0)
367 imm.reg.data.f64 = 0.0;
368 else
369 if (imm.reg.data.f64 > 1.0)
370 imm.reg.data.f64 = 1.0;
371 }
372 assert(!(bits & NV50_IR_MOD_NOT));
373 break;
374
375 default:
376 assert(!"invalid/unhandled type");
377 imm.reg.data.u64 = 0;
378 break;
379 }
380 }
381
382 operation
383 Modifier::getOp() const
384 {
385 switch (bits) {
386 case NV50_IR_MOD_ABS: return OP_ABS;
387 case NV50_IR_MOD_NEG: return OP_NEG;
388 case NV50_IR_MOD_SAT: return OP_SAT;
389 case NV50_IR_MOD_NOT: return OP_NOT;
390 case 0:
391 return OP_MOV;
392 default:
393 return OP_CVT;
394 }
395 }
396
397 void
398 ConstantFolding::expr(Instruction *i,
399 ImmediateValue &imm0, ImmediateValue &imm1)
400 {
401 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
402 struct Storage res;
403
404 memset(&res.data, 0, sizeof(res.data));
405
406 switch (i->op) {
407 case OP_MAD:
408 case OP_FMA:
409 case OP_MUL:
410 if (i->dnz && i->dType == TYPE_F32) {
411 if (!isfinite(a->data.f32))
412 a->data.f32 = 0.0f;
413 if (!isfinite(b->data.f32))
414 b->data.f32 = 0.0f;
415 }
416 switch (i->dType) {
417 case TYPE_F32: res.data.f32 = a->data.f32 * b->data.f32; break;
418 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
419 case TYPE_S32:
420 case TYPE_U32: res.data.u32 = a->data.u32 * b->data.u32; break;
421 default:
422 return;
423 }
424 break;
425 case OP_DIV:
426 if (b->data.u32 == 0)
427 break;
428 switch (i->dType) {
429 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
430 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
431 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
432 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
433 default:
434 return;
435 }
436 break;
437 case OP_ADD:
438 switch (i->dType) {
439 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
440 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
441 case TYPE_S32:
442 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
443 default:
444 return;
445 }
446 break;
447 case OP_POW:
448 switch (i->dType) {
449 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
450 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
451 default:
452 return;
453 }
454 break;
455 case OP_MAX:
456 switch (i->dType) {
457 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
458 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
459 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
460 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
461 default:
462 return;
463 }
464 break;
465 case OP_MIN:
466 switch (i->dType) {
467 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
468 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
469 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
470 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
471 default:
472 return;
473 }
474 break;
475 case OP_AND:
476 res.data.u64 = a->data.u64 & b->data.u64;
477 break;
478 case OP_OR:
479 res.data.u64 = a->data.u64 | b->data.u64;
480 break;
481 case OP_XOR:
482 res.data.u64 = a->data.u64 ^ b->data.u64;
483 break;
484 case OP_SHL:
485 res.data.u32 = a->data.u32 << b->data.u32;
486 break;
487 case OP_SHR:
488 switch (i->dType) {
489 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
490 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
491 default:
492 return;
493 }
494 break;
495 case OP_SLCT:
496 if (a->data.u32 != b->data.u32)
497 return;
498 res.data.u32 = a->data.u32;
499 break;
500 default:
501 return;
502 }
503 ++foldCount;
504
505 i->src(0).mod = Modifier(0);
506 i->src(1).mod = Modifier(0);
507
508 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
509 i->setSrc(1, NULL);
510
511 i->getSrc(0)->reg.data = res.data;
512
513 if (i->op == OP_MAD || i->op == OP_FMA) {
514 i->op = OP_ADD;
515
516 i->setSrc(1, i->getSrc(0));
517 i->src(1).mod = i->src(2).mod;
518 i->setSrc(0, i->getSrc(2));
519 i->setSrc(2, NULL);
520
521 ImmediateValue src0;
522 if (i->src(0).getImmediate(src0))
523 expr(i, src0, *i->getSrc(1)->asImm());
524 } else {
525 i->op = OP_MOV;
526 }
527 }
528
529 void
530 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
531 {
532 Storage res;
533
534 if (i->dType != TYPE_F32)
535 return;
536 switch (i->op) {
537 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
538 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
539 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
540 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
541 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
542 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
543 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
544 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
545 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
546 case OP_PRESIN:
547 case OP_PREEX2:
548 // these should be handled in subsequent OP_SIN/COS/EX2
549 res.data.f32 = imm.reg.data.f32;
550 break;
551 default:
552 return;
553 }
554 i->op = OP_MOV;
555 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
556 i->src(0).mod = Modifier(0);
557 }
558
559 void
560 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
561 const int s, ImmediateValue& imm2)
562 {
563 const int t = s ? 0 : 1;
564 Instruction *insn;
565 Instruction *mul1 = NULL; // mul1 before mul2
566 int e = 0;
567 float f = imm2.reg.data.f32;
568 ImmediateValue imm1;
569
570 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
571
572 if (mul2->getSrc(t)->refCount() == 1) {
573 insn = mul2->getSrc(t)->getInsn();
574 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
575 mul1 = insn;
576 if (mul1 && !mul1->saturate) {
577 int s1;
578
579 if (mul1->src(s1 = 0).getImmediate(imm1) ||
580 mul1->src(s1 = 1).getImmediate(imm1)) {
581 bld.setPosition(mul1, false);
582 // a = mul r, imm1
583 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
584 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
585 mul1->src(s1).mod = Modifier(0);
586 mul2->def(0).replace(mul1->getDef(0), false);
587 } else
588 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
589 // c = mul a, b
590 // d = mul c, imm -> d = mul_x_imm a, b
591 mul1->postFactor = e;
592 mul2->def(0).replace(mul1->getDef(0), false);
593 if (f < 0)
594 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
595 }
596 mul1->saturate = mul2->saturate;
597 return;
598 }
599 }
600 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
601 // b = mul a, imm
602 // d = mul b, c -> d = mul_x_imm a, c
603 int s2, t2;
604 insn = mul2->getDef(0)->uses.front()->getInsn();
605 if (!insn)
606 return;
607 mul1 = mul2;
608 mul2 = NULL;
609 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
610 t2 = s2 ? 0 : 1;
611 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
612 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
613 mul2 = insn;
614 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
615 mul2->postFactor = e;
616 mul2->setSrc(s2, mul1->src(t));
617 if (f < 0)
618 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
619 }
620 }
621 }
622
623 void
624 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
625 {
626 const int t = !s;
627 const operation op = i->op;
628
629 switch (i->op) {
630 case OP_MUL:
631 if (i->dType == TYPE_F32)
632 tryCollapseChainedMULs(i, s, imm0);
633
634 if (imm0.isInteger(0)) {
635 i->op = OP_MOV;
636 i->setSrc(0, new_ImmediateValue(prog, 0u));
637 i->src(0).mod = Modifier(0);
638 i->setSrc(1, NULL);
639 } else
640 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
641 if (imm0.isNegative())
642 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
643 i->op = i->src(t).mod.getOp();
644 if (s == 0) {
645 i->setSrc(0, i->getSrc(1));
646 i->src(0).mod = i->src(1).mod;
647 i->src(1).mod = 0;
648 }
649 if (i->op != OP_CVT)
650 i->src(0).mod = 0;
651 i->setSrc(1, NULL);
652 } else
653 if (imm0.isInteger(2) || imm0.isInteger(-2)) {
654 if (imm0.isNegative())
655 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
656 i->op = OP_ADD;
657 i->setSrc(s, i->getSrc(t));
658 i->src(s).mod = i->src(t).mod;
659 } else
660 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
661 i->op = OP_SHL;
662 imm0.applyLog2();
663 i->setSrc(0, i->getSrc(t));
664 i->src(0).mod = i->src(t).mod;
665 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
666 i->src(1).mod = 0;
667 }
668 break;
669 case OP_ADD:
670 if (i->usesFlags())
671 break;
672 if (imm0.isInteger(0)) {
673 if (s == 0) {
674 i->setSrc(0, i->getSrc(1));
675 i->src(0).mod = i->src(1).mod;
676 }
677 i->setSrc(1, NULL);
678 i->op = i->src(0).mod.getOp();
679 if (i->op != OP_CVT)
680 i->src(0).mod = Modifier(0);
681 }
682 break;
683
684 case OP_DIV:
685 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
686 break;
687 bld.setPosition(i, false);
688 if (imm0.reg.data.u32 == 0) {
689 break;
690 } else
691 if (imm0.reg.data.u32 == 1) {
692 i->op = OP_MOV;
693 i->setSrc(1, NULL);
694 } else
695 if (i->dType == TYPE_U32 && imm0.isPow2()) {
696 i->op = OP_SHR;
697 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
698 } else
699 if (i->dType == TYPE_U32) {
700 Instruction *mul;
701 Value *tA, *tB;
702 const uint32_t d = imm0.reg.data.u32;
703 uint32_t m;
704 int r, s;
705 uint32_t l = util_logbase2(d);
706 if (((uint32_t)1 << l) < d)
707 ++l;
708 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
709 r = l ? 1 : 0;
710 s = l ? (l - 1) : 0;
711
712 tA = bld.getSSA();
713 tB = bld.getSSA();
714 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
715 bld.loadImm(NULL, m));
716 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
717 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
718 tA = bld.getSSA();
719 if (r)
720 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
721 else
722 tA = tB;
723 tB = s ? bld.getSSA() : i->getDef(0);
724 bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
725 if (s)
726 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
727
728 delete_Instruction(prog, i);
729 } else
730 if (imm0.reg.data.s32 == -1) {
731 i->op = OP_NEG;
732 i->setSrc(1, NULL);
733 } else {
734 LValue *tA, *tB;
735 LValue *tD;
736 const int32_t d = imm0.reg.data.s32;
737 int32_t m;
738 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
739 if ((1 << l) < abs(d))
740 ++l;
741 if (!l)
742 l = 1;
743 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
744
745 tA = bld.getSSA();
746 tB = bld.getSSA();
747 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
748 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
749 if (l > 1)
750 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
751 else
752 tB = tA;
753 tA = bld.getSSA();
754 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, i->getSrc(0), bld.mkImm(0));
755 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
756 bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
757 if (d < 0)
758 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
759
760 delete_Instruction(prog, i);
761 }
762 break;
763
764 case OP_MOD:
765 if (i->sType == TYPE_U32 && imm0.isPow2()) {
766 bld.setPosition(i, false);
767 i->op = OP_AND;
768 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
769 }
770 break;
771
772 case OP_SET: // TODO: SET_AND,OR,XOR
773 {
774 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
775 CondCode cc, ccZ;
776 if (i->src(t).mod != Modifier(0))
777 return;
778 if (imm0.reg.data.u32 != 0 || !si || si->op != OP_SET)
779 return;
780 cc = si->setCond;
781 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
782 if (s == 0)
783 ccZ = reverseCondCode(ccZ);
784 switch (ccZ) {
785 case CC_LT: cc = CC_FL; break;
786 case CC_GE: cc = CC_TR; break;
787 case CC_EQ: cc = inverseCondCode(cc); break;
788 case CC_LE: cc = inverseCondCode(cc); break;
789 case CC_GT: break;
790 case CC_NE: break;
791 default:
792 return;
793 }
794 i->asCmp()->setCond = cc;
795 i->setSrc(0, si->src(0));
796 i->setSrc(1, si->src(1));
797 i->sType = si->sType;
798 }
799 break;
800
801 case OP_SHL:
802 {
803 if (s != 1 || i->src(0).mod != Modifier(0))
804 break;
805 // try to concatenate shifts
806 Instruction *si = i->getSrc(0)->getInsn();
807 if (!si || si->op != OP_SHL)
808 break;
809 ImmediateValue imm1;
810 if (si->src(1).getImmediate(imm1)) {
811 bld.setPosition(i, false);
812 i->setSrc(0, si->getSrc(0));
813 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
814 }
815 }
816 break;
817
818 case OP_ABS:
819 case OP_NEG:
820 case OP_LG2:
821 case OP_RCP:
822 case OP_SQRT:
823 case OP_RSQ:
824 case OP_PRESIN:
825 case OP_SIN:
826 case OP_COS:
827 case OP_PREEX2:
828 case OP_EX2:
829 unary(i, imm0);
830 break;
831 default:
832 return;
833 }
834 if (i->op != op)
835 foldCount++;
836 }
837
838 // =============================================================================
839
840 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
841 class ModifierFolding : public Pass
842 {
843 private:
844 virtual bool visit(BasicBlock *);
845 };
846
847 bool
848 ModifierFolding::visit(BasicBlock *bb)
849 {
850 const Target *target = prog->getTarget();
851
852 Instruction *i, *next, *mi;
853 Modifier mod;
854
855 for (i = bb->getEntry(); i; i = next) {
856 next = i->next;
857
858 if (0 && i->op == OP_SUB) {
859 // turn "sub" into "add neg" (do we really want this ?)
860 i->op = OP_ADD;
861 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
862 }
863
864 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
865 mi = i->getSrc(s)->getInsn();
866 if (!mi ||
867 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
868 continue;
869 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
870 if ((i->op != OP_ADD &&
871 i->op != OP_MUL) ||
872 (mi->op != OP_ABS &&
873 mi->op != OP_NEG))
874 continue;
875 } else
876 if (i->sType != mi->dType) {
877 continue;
878 }
879 if ((mod = Modifier(mi->op)) == Modifier(0))
880 continue;
881 mod *= mi->src(0).mod;
882
883 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
884 // abs neg [abs] = abs
885 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
886 } else
887 if ((i->op == OP_NEG) && mod.neg()) {
888 assert(s == 0);
889 // neg as both opcode and modifier on same insn is prohibited
890 // neg neg abs = abs, neg neg = identity
891 mod = mod & Modifier(~NV50_IR_MOD_NEG);
892 i->op = mod.getOp();
893 mod = mod & Modifier(~NV50_IR_MOD_ABS);
894 if (mod == Modifier(0))
895 i->op = OP_MOV;
896 }
897
898 if (target->isModSupported(i, s, mod)) {
899 i->setSrc(s, mi->getSrc(0));
900 i->src(s).mod *= mod;
901 }
902 }
903
904 if (i->op == OP_SAT) {
905 mi = i->getSrc(0)->getInsn();
906 if (mi &&
907 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
908 mi->saturate = 1;
909 mi->setDef(0, i->getDef(0));
910 delete_Instruction(prog, i);
911 }
912 }
913 }
914
915 return true;
916 }
917
918 // =============================================================================
919
920 // MUL + ADD -> MAD/FMA
921 // MIN/MAX(a, a) -> a, etc.
922 // SLCT(a, b, const) -> cc(const) ? a : b
923 // RCP(RCP(a)) -> a
924 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
925 class AlgebraicOpt : public Pass
926 {
927 private:
928 virtual bool visit(BasicBlock *);
929
930 void handleABS(Instruction *);
931 bool handleADD(Instruction *);
932 bool tryADDToMADOrSAD(Instruction *, operation toOp);
933 void handleMINMAX(Instruction *);
934 void handleRCP(Instruction *);
935 void handleSLCT(Instruction *);
936 void handleLOGOP(Instruction *);
937 void handleCVT(Instruction *);
938 void handleSUCLAMP(Instruction *);
939
940 BuildUtil bld;
941 };
942
943 void
944 AlgebraicOpt::handleABS(Instruction *abs)
945 {
946 Instruction *sub = abs->getSrc(0)->getInsn();
947 DataType ty;
948 if (!sub ||
949 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
950 return;
951 // expect not to have mods yet, if we do, bail
952 if (sub->src(0).mod || sub->src(1).mod)
953 return;
954 // hidden conversion ?
955 ty = intTypeToSigned(sub->dType);
956 if (abs->dType != abs->sType || ty != abs->sType)
957 return;
958
959 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
960 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
961 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
962 return;
963
964 Value *src0 = sub->getSrc(0);
965 Value *src1 = sub->getSrc(1);
966
967 if (sub->op == OP_ADD) {
968 Instruction *neg = sub->getSrc(1)->getInsn();
969 if (neg && neg->op != OP_NEG) {
970 neg = sub->getSrc(0)->getInsn();
971 src0 = sub->getSrc(1);
972 }
973 if (!neg || neg->op != OP_NEG ||
974 neg->dType != neg->sType || neg->sType != ty)
975 return;
976 src1 = neg->getSrc(0);
977 }
978
979 // found ABS(SUB))
980 abs->moveSources(1, 2); // move sources >=1 up by 2
981 abs->op = OP_SAD;
982 abs->setType(sub->dType);
983 abs->setSrc(0, src0);
984 abs->setSrc(1, src1);
985 bld.setPosition(abs, false);
986 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
987 }
988
989 bool
990 AlgebraicOpt::handleADD(Instruction *add)
991 {
992 Value *src0 = add->getSrc(0);
993 Value *src1 = add->getSrc(1);
994
995 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
996 return false;
997
998 bool changed = false;
999 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1000 changed = tryADDToMADOrSAD(add, OP_MAD);
1001 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1002 changed = tryADDToMADOrSAD(add, OP_SAD);
1003 return changed;
1004 }
1005
1006 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1007 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1008 bool
1009 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1010 {
1011 Value *src0 = add->getSrc(0);
1012 Value *src1 = add->getSrc(1);
1013 Value *src;
1014 int s;
1015 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1016 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1017 Modifier mod[4];
1018
1019 if (src0->refCount() == 1 &&
1020 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1021 s = 0;
1022 else
1023 if (src1->refCount() == 1 &&
1024 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1025 s = 1;
1026 else
1027 return false;
1028
1029 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
1030 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
1031 return false;
1032
1033 src = add->getSrc(s);
1034
1035 if (src->getInsn()->postFactor)
1036 return false;
1037 if (toOp == OP_SAD) {
1038 ImmediateValue imm;
1039 if (!src->getInsn()->src(2).getImmediate(imm))
1040 return false;
1041 if (!imm.isInteger(0))
1042 return false;
1043 }
1044
1045 mod[0] = add->src(0).mod;
1046 mod[1] = add->src(1).mod;
1047 mod[2] = src->getUniqueInsn()->src(0).mod;
1048 mod[3] = src->getUniqueInsn()->src(1).mod;
1049
1050 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1051 return false;
1052
1053 add->op = toOp;
1054 add->subOp = src->getInsn()->subOp; // potentially mul-high
1055
1056 add->setSrc(2, add->src(s ? 0 : 1));
1057
1058 add->setSrc(0, src->getInsn()->getSrc(0));
1059 add->src(0).mod = mod[2] ^ mod[s];
1060 add->setSrc(1, src->getInsn()->getSrc(1));
1061 add->src(1).mod = mod[3];
1062
1063 return true;
1064 }
1065
1066 void
1067 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1068 {
1069 Value *src0 = minmax->getSrc(0);
1070 Value *src1 = minmax->getSrc(1);
1071
1072 if (src0 != src1 || src0->reg.file != FILE_GPR)
1073 return;
1074 if (minmax->src(0).mod == minmax->src(1).mod) {
1075 if (minmax->def(0).mayReplace(minmax->src(0))) {
1076 minmax->def(0).replace(minmax->src(0), false);
1077 minmax->bb->remove(minmax);
1078 } else {
1079 minmax->op = OP_CVT;
1080 minmax->setSrc(1, NULL);
1081 }
1082 } else {
1083 // TODO:
1084 // min(x, -x) = -abs(x)
1085 // min(x, -abs(x)) = -abs(x)
1086 // min(x, abs(x)) = x
1087 // max(x, -abs(x)) = x
1088 // max(x, abs(x)) = abs(x)
1089 // max(x, -x) = abs(x)
1090 }
1091 }
1092
1093 void
1094 AlgebraicOpt::handleRCP(Instruction *rcp)
1095 {
1096 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1097
1098 if (si && si->op == OP_RCP) {
1099 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1100 rcp->op = mod.getOp();
1101 rcp->setSrc(0, si->getSrc(0));
1102 }
1103 }
1104
1105 void
1106 AlgebraicOpt::handleSLCT(Instruction *slct)
1107 {
1108 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1109 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1110 slct->setSrc(0, slct->getSrc(1));
1111 } else
1112 if (slct->getSrc(0) != slct->getSrc(1)) {
1113 return;
1114 }
1115 slct->op = OP_MOV;
1116 slct->setSrc(1, NULL);
1117 slct->setSrc(2, NULL);
1118 }
1119
1120 void
1121 AlgebraicOpt::handleLOGOP(Instruction *logop)
1122 {
1123 Value *src0 = logop->getSrc(0);
1124 Value *src1 = logop->getSrc(1);
1125
1126 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1127 return;
1128
1129 if (src0 == src1) {
1130 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1131 logop->def(0).mayReplace(logop->src(0))) {
1132 logop->def(0).replace(logop->src(0), false);
1133 delete_Instruction(prog, logop);
1134 }
1135 } else {
1136 // try AND(SET, SET) -> SET_AND(SET)
1137 Instruction *set0 = src0->getInsn();
1138 Instruction *set1 = src1->getInsn();
1139
1140 if (!set0 || set0->fixed || !set1 || set1->fixed)
1141 return;
1142 if (set1->op != OP_SET) {
1143 Instruction *xchg = set0;
1144 set0 = set1;
1145 set1 = xchg;
1146 if (set1->op != OP_SET)
1147 return;
1148 }
1149 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1150 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1151 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1152 return;
1153 if (set0->op != OP_SET &&
1154 set0->op != OP_SET_AND &&
1155 set0->op != OP_SET_OR &&
1156 set0->op != OP_SET_XOR)
1157 return;
1158 if (set0->getDef(0)->refCount() > 1 &&
1159 set1->getDef(0)->refCount() > 1)
1160 return;
1161 if (set0->getPredicate() || set1->getPredicate())
1162 return;
1163 // check that they don't source each other
1164 for (int s = 0; s < 2; ++s)
1165 if (set0->getSrc(s) == set1->getDef(0) ||
1166 set1->getSrc(s) == set0->getDef(0))
1167 return;
1168
1169 set0 = cloneForward(func, set0);
1170 set1 = cloneShallow(func, set1);
1171 logop->bb->insertAfter(logop, set1);
1172 logop->bb->insertAfter(logop, set0);
1173
1174 set0->dType = TYPE_U8;
1175 set0->getDef(0)->reg.file = FILE_PREDICATE;
1176 set0->getDef(0)->reg.size = 1;
1177 set1->setSrc(2, set0->getDef(0));
1178 set1->op = redOp;
1179 set1->setDef(0, logop->getDef(0));
1180 delete_Instruction(prog, logop);
1181 }
1182 }
1183
1184 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1185 // nv50:
1186 // F2I(NEG(I2F(ABS(SET))))
1187 void
1188 AlgebraicOpt::handleCVT(Instruction *cvt)
1189 {
1190 if (cvt->sType != TYPE_F32 ||
1191 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1192 return;
1193 Instruction *insn = cvt->getSrc(0)->getInsn();
1194 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1195 return;
1196 if (insn->src(0).mod != Modifier(0))
1197 return;
1198 insn = insn->getSrc(0)->getInsn();
1199
1200 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1201 if (insn && insn->op == OP_CVT &&
1202 insn->dType == TYPE_F32 &&
1203 insn->sType == TYPE_S32) {
1204 insn = insn->getSrc(0)->getInsn();
1205 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1206 insn->src(0).mod)
1207 return;
1208 insn = insn->getSrc(0)->getInsn();
1209 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1210 return;
1211 } else
1212 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1213 return;
1214 }
1215
1216 Instruction *bset = cloneShallow(func, insn);
1217 bset->dType = TYPE_U32;
1218 bset->setDef(0, cvt->getDef(0));
1219 cvt->bb->insertAfter(cvt, bset);
1220 delete_Instruction(prog, cvt);
1221 }
1222
1223 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1224 void
1225 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1226 {
1227 ImmediateValue imm;
1228 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1229 int s;
1230 Instruction *add;
1231
1232 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1233
1234 // look for ADD (TODO: only count references by non-SUCLAMP)
1235 if (insn->getSrc(0)->refCount() > 1)
1236 return;
1237 add = insn->getSrc(0)->getInsn();
1238 if (!add || add->op != OP_ADD ||
1239 (add->dType != TYPE_U32 &&
1240 add->dType != TYPE_S32))
1241 return;
1242
1243 // look for immediate
1244 for (s = 0; s < 2; ++s)
1245 if (add->src(s).getImmediate(imm))
1246 break;
1247 if (s >= 2)
1248 return;
1249 s = s ? 0 : 1;
1250 // determine if immediate fits
1251 val += imm.reg.data.s32;
1252 if (val > 31 || val < -32)
1253 return;
1254 // determine if other addend fits
1255 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
1256 return;
1257
1258 bld.setPosition(insn, false); // make sure bld is init'ed
1259 // replace sources
1260 insn->setSrc(2, bld.mkImm(val));
1261 insn->setSrc(0, add->getSrc(s));
1262 }
1263
1264 bool
1265 AlgebraicOpt::visit(BasicBlock *bb)
1266 {
1267 Instruction *next;
1268 for (Instruction *i = bb->getEntry(); i; i = next) {
1269 next = i->next;
1270 switch (i->op) {
1271 case OP_ABS:
1272 handleABS(i);
1273 break;
1274 case OP_ADD:
1275 handleADD(i);
1276 break;
1277 case OP_RCP:
1278 handleRCP(i);
1279 break;
1280 case OP_MIN:
1281 case OP_MAX:
1282 handleMINMAX(i);
1283 break;
1284 case OP_SLCT:
1285 handleSLCT(i);
1286 break;
1287 case OP_AND:
1288 case OP_OR:
1289 case OP_XOR:
1290 handleLOGOP(i);
1291 break;
1292 case OP_CVT:
1293 handleCVT(i);
1294 break;
1295 case OP_SUCLAMP:
1296 handleSUCLAMP(i);
1297 break;
1298 default:
1299 break;
1300 }
1301 }
1302
1303 return true;
1304 }
1305
1306 // =============================================================================
1307
1308 static inline void
1309 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1310 {
1311 if (offset != ldst->getSrc(0)->reg.data.offset) {
1312 if (ldst->getSrc(0)->refCount() > 1)
1313 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
1314 ldst->getSrc(0)->reg.data.offset = offset;
1315 }
1316 }
1317
1318 // Combine loads and stores, forward stores to loads where possible.
1319 class MemoryOpt : public Pass
1320 {
1321 private:
1322 class Record
1323 {
1324 public:
1325 Record *next;
1326 Instruction *insn;
1327 const Value *rel[2];
1328 const Value *base;
1329 int32_t offset;
1330 int8_t fileIndex;
1331 uint8_t size;
1332 bool locked;
1333 Record *prev;
1334
1335 bool overlaps(const Instruction *ldst) const;
1336
1337 inline void link(Record **);
1338 inline void unlink(Record **);
1339 inline void set(const Instruction *ldst);
1340 };
1341
1342 public:
1343 MemoryOpt();
1344
1345 Record *loads[DATA_FILE_COUNT];
1346 Record *stores[DATA_FILE_COUNT];
1347
1348 MemoryPool recordPool;
1349
1350 private:
1351 virtual bool visit(BasicBlock *);
1352 bool runOpt(BasicBlock *);
1353
1354 Record **getList(const Instruction *);
1355
1356 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1357
1358 // merge @insn into load/store instruction from @rec
1359 bool combineLd(Record *rec, Instruction *ld);
1360 bool combineSt(Record *rec, Instruction *st);
1361
1362 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1363 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1364 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1365
1366 void addRecord(Instruction *ldst);
1367 void purgeRecords(Instruction *const st, DataFile);
1368 void lockStores(Instruction *const ld);
1369 void reset();
1370
1371 private:
1372 Record *prevRecord;
1373 };
1374
1375 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1376 {
1377 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1378 loads[i] = NULL;
1379 stores[i] = NULL;
1380 }
1381 prevRecord = NULL;
1382 }
1383
1384 void
1385 MemoryOpt::reset()
1386 {
1387 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1388 Record *it, *next;
1389 for (it = loads[i]; it; it = next) {
1390 next = it->next;
1391 recordPool.release(it);
1392 }
1393 loads[i] = NULL;
1394 for (it = stores[i]; it; it = next) {
1395 next = it->next;
1396 recordPool.release(it);
1397 }
1398 stores[i] = NULL;
1399 }
1400 }
1401
1402 bool
1403 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1404 {
1405 int32_t offRc = rec->offset;
1406 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1407 int sizeRc = rec->size;
1408 int sizeLd = typeSizeof(ld->dType);
1409 int size = sizeRc + sizeLd;
1410 int d, j;
1411
1412 if (!prog->getTarget()->
1413 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1414 return false;
1415 // no unaligned loads
1416 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1417 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1418 return false;
1419
1420 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1421
1422 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1423
1424 if (offLd < offRc) {
1425 int sz;
1426 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1427 // d: nr of definitions in ld
1428 // j: nr of definitions in rec->insn, move:
1429 for (d = d + j - 1; j > 0; --j, --d)
1430 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1431
1432 if (rec->insn->getSrc(0)->refCount() > 1)
1433 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
1434 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1435
1436 d = 0;
1437 } else {
1438 d = j;
1439 }
1440 // move definitions of @ld to @rec->insn
1441 for (j = 0; sizeLd; ++j, ++d) {
1442 sizeLd -= ld->getDef(j)->reg.size;
1443 rec->insn->setDef(d, ld->getDef(j));
1444 }
1445
1446 rec->size = size;
1447 rec->insn->getSrc(0)->reg.size = size;
1448 rec->insn->setType(typeOfSize(size));
1449
1450 delete_Instruction(prog, ld);
1451
1452 return true;
1453 }
1454
1455 bool
1456 MemoryOpt::combineSt(Record *rec, Instruction *st)
1457 {
1458 int32_t offRc = rec->offset;
1459 int32_t offSt = st->getSrc(0)->reg.data.offset;
1460 int sizeRc = rec->size;
1461 int sizeSt = typeSizeof(st->dType);
1462 int s = sizeSt / 4;
1463 int size = sizeRc + sizeSt;
1464 int j, k;
1465 Value *src[4]; // no modifiers in ValueRef allowed for st
1466 Value *extra[3];
1467
1468 if (!prog->getTarget()->
1469 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
1470 return false;
1471 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1472 return false;
1473
1474 st->takeExtraSources(0, extra); // save predicate and indirect address
1475
1476 if (offRc < offSt) {
1477 // save values from @st
1478 for (s = 0; sizeSt; ++s) {
1479 sizeSt -= st->getSrc(s + 1)->reg.size;
1480 src[s] = st->getSrc(s + 1);
1481 }
1482 // set record's values as low sources of @st
1483 for (j = 1; sizeRc; ++j) {
1484 sizeRc -= rec->insn->getSrc(j)->reg.size;
1485 st->setSrc(j, rec->insn->getSrc(j));
1486 }
1487 // set saved values as high sources of @st
1488 for (k = j, j = 0; j < s; ++j)
1489 st->setSrc(k++, src[j]);
1490
1491 updateLdStOffset(st, offRc, func);
1492 } else {
1493 for (j = 1; sizeSt; ++j)
1494 sizeSt -= st->getSrc(j)->reg.size;
1495 for (s = 1; sizeRc; ++j, ++s) {
1496 sizeRc -= rec->insn->getSrc(s)->reg.size;
1497 st->setSrc(j, rec->insn->getSrc(s));
1498 }
1499 rec->offset = offSt;
1500 }
1501 st->putExtraSources(0, extra); // restore pointer and predicate
1502
1503 delete_Instruction(prog, rec->insn);
1504 rec->insn = st;
1505 rec->size = size;
1506 rec->insn->getSrc(0)->reg.size = size;
1507 rec->insn->setType(typeOfSize(size));
1508 return true;
1509 }
1510
1511 void
1512 MemoryOpt::Record::set(const Instruction *ldst)
1513 {
1514 const Symbol *mem = ldst->getSrc(0)->asSym();
1515 fileIndex = mem->reg.fileIndex;
1516 rel[0] = ldst->getIndirect(0, 0);
1517 rel[1] = ldst->getIndirect(0, 1);
1518 offset = mem->reg.data.offset;
1519 base = mem->getBase();
1520 size = typeSizeof(ldst->sType);
1521 }
1522
1523 void
1524 MemoryOpt::Record::link(Record **list)
1525 {
1526 next = *list;
1527 if (next)
1528 next->prev = this;
1529 prev = NULL;
1530 *list = this;
1531 }
1532
1533 void
1534 MemoryOpt::Record::unlink(Record **list)
1535 {
1536 if (next)
1537 next->prev = prev;
1538 if (prev)
1539 prev->next = next;
1540 else
1541 *list = next;
1542 }
1543
1544 MemoryOpt::Record **
1545 MemoryOpt::getList(const Instruction *insn)
1546 {
1547 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
1548 return &loads[insn->src(0).getFile()];
1549 return &stores[insn->src(0).getFile()];
1550 }
1551
1552 void
1553 MemoryOpt::addRecord(Instruction *i)
1554 {
1555 Record **list = getList(i);
1556 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
1557
1558 it->link(list);
1559 it->set(i);
1560 it->insn = i;
1561 it->locked = false;
1562 }
1563
1564 MemoryOpt::Record *
1565 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
1566 {
1567 const Symbol *sym = insn->getSrc(0)->asSym();
1568 const int size = typeSizeof(insn->sType);
1569 Record *rec = NULL;
1570 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
1571
1572 for (; it; it = it->next) {
1573 if (it->locked && insn->op != OP_LOAD)
1574 continue;
1575 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
1576 it->rel[0] != insn->getIndirect(0, 0) ||
1577 it->fileIndex != sym->reg.fileIndex ||
1578 it->rel[1] != insn->getIndirect(0, 1))
1579 continue;
1580
1581 if (it->offset < sym->reg.data.offset) {
1582 if (it->offset + it->size >= sym->reg.data.offset) {
1583 isAdj = (it->offset + it->size == sym->reg.data.offset);
1584 if (!isAdj)
1585 return it;
1586 if (!(it->offset & 0x7))
1587 rec = it;
1588 }
1589 } else {
1590 isAdj = it->offset != sym->reg.data.offset;
1591 if (size <= it->size && !isAdj)
1592 return it;
1593 else
1594 if (!(sym->reg.data.offset & 0x7))
1595 if (it->offset - size <= sym->reg.data.offset)
1596 rec = it;
1597 }
1598 }
1599 return rec;
1600 }
1601
1602 bool
1603 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
1604 {
1605 Instruction *st = rec->insn;
1606 int32_t offSt = rec->offset;
1607 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1608 int d, s;
1609
1610 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
1611 offSt += st->getSrc(s)->reg.size;
1612 if (offSt != offLd)
1613 return false;
1614
1615 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
1616 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
1617 return false;
1618 if (st->getSrc(s)->reg.file != FILE_GPR)
1619 return false;
1620 ld->def(d).replace(st->src(s), false);
1621 }
1622 ld->bb->remove(ld);
1623 return true;
1624 }
1625
1626 bool
1627 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
1628 {
1629 Instruction *ldR = rec->insn;
1630 int32_t offR = rec->offset;
1631 int32_t offE = ldE->getSrc(0)->reg.data.offset;
1632 int dR, dE;
1633
1634 assert(offR <= offE);
1635 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
1636 offR += ldR->getDef(dR)->reg.size;
1637 if (offR != offE)
1638 return false;
1639
1640 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
1641 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
1642 return false;
1643 ldE->def(dE).replace(ldR->getDef(dR), false);
1644 }
1645
1646 delete_Instruction(prog, ldE);
1647 return true;
1648 }
1649
1650 bool
1651 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
1652 {
1653 const Instruction *const ri = rec->insn;
1654 Value *extra[3];
1655
1656 int32_t offS = st->getSrc(0)->reg.data.offset;
1657 int32_t offR = rec->offset;
1658 int32_t endS = offS + typeSizeof(st->dType);
1659 int32_t endR = offR + typeSizeof(ri->dType);
1660
1661 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
1662
1663 st->takeExtraSources(0, extra);
1664
1665 if (offR < offS) {
1666 Value *vals[10];
1667 int s, n;
1668 int k = 0;
1669 // get non-replaced sources of ri
1670 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
1671 vals[k++] = ri->getSrc(s);
1672 n = s;
1673 // get replaced sources of st
1674 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
1675 vals[k++] = st->getSrc(s);
1676 // skip replaced sources of ri
1677 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
1678 // get non-replaced sources after values covered by st
1679 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
1680 vals[k++] = ri->getSrc(s);
1681 assert((unsigned int)k <= Elements(vals));
1682 for (s = 0; s < k; ++s)
1683 st->setSrc(s + 1, vals[s]);
1684 st->setSrc(0, ri->getSrc(0));
1685 } else
1686 if (endR > endS) {
1687 int j, s;
1688 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
1689 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
1690 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
1691 st->setSrc(s++, ri->getSrc(j));
1692 }
1693 st->putExtraSources(0, extra);
1694
1695 delete_Instruction(prog, rec->insn);
1696
1697 rec->insn = st;
1698 rec->offset = st->getSrc(0)->reg.data.offset;
1699
1700 st->setType(typeOfSize(rec->size));
1701
1702 return true;
1703 }
1704
1705 bool
1706 MemoryOpt::Record::overlaps(const Instruction *ldst) const
1707 {
1708 Record that;
1709 that.set(ldst);
1710
1711 if (this->fileIndex != that.fileIndex)
1712 return false;
1713
1714 if (this->rel[0] || that.rel[0])
1715 return this->base == that.base;
1716 return
1717 (this->offset < that.offset + that.size) &&
1718 (this->offset + this->size > that.offset);
1719 }
1720
1721 // We must not eliminate stores that affect the result of @ld if
1722 // we find later stores to the same location, and we may no longer
1723 // merge them with later stores.
1724 // The stored value can, however, still be used to determine the value
1725 // returned by future loads.
1726 void
1727 MemoryOpt::lockStores(Instruction *const ld)
1728 {
1729 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
1730 if (!r->locked && r->overlaps(ld))
1731 r->locked = true;
1732 }
1733
1734 // Prior loads from the location of @st are no longer valid.
1735 // Stores to the location of @st may no longer be used to derive
1736 // the value at it nor be coalesced into later stores.
1737 void
1738 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
1739 {
1740 if (st)
1741 f = st->src(0).getFile();
1742
1743 for (Record *r = loads[f]; r; r = r->next)
1744 if (!st || r->overlaps(st))
1745 r->unlink(&loads[f]);
1746
1747 for (Record *r = stores[f]; r; r = r->next)
1748 if (!st || r->overlaps(st))
1749 r->unlink(&stores[f]);
1750 }
1751
1752 bool
1753 MemoryOpt::visit(BasicBlock *bb)
1754 {
1755 bool ret = runOpt(bb);
1756 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1757 // where 96 bit memory operations are forbidden.
1758 if (ret)
1759 ret = runOpt(bb);
1760 return ret;
1761 }
1762
1763 bool
1764 MemoryOpt::runOpt(BasicBlock *bb)
1765 {
1766 Instruction *ldst, *next;
1767 Record *rec;
1768 bool isAdjacent = true;
1769
1770 for (ldst = bb->getEntry(); ldst; ldst = next) {
1771 bool keep = true;
1772 bool isLoad = true;
1773 next = ldst->next;
1774
1775 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
1776 if (ldst->isDead()) {
1777 // might have been produced by earlier optimization
1778 delete_Instruction(prog, ldst);
1779 continue;
1780 }
1781 } else
1782 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
1783 isLoad = false;
1784 } else {
1785 // TODO: maybe have all fixed ops act as barrier ?
1786 if (ldst->op == OP_CALL ||
1787 ldst->op == OP_BAR ||
1788 ldst->op == OP_MEMBAR) {
1789 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1790 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1791 purgeRecords(NULL, FILE_MEMORY_SHARED);
1792 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1793 } else
1794 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
1795 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
1796 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1797 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1798 purgeRecords(NULL, FILE_MEMORY_SHARED);
1799 } else {
1800 purgeRecords(NULL, ldst->src(0).getFile());
1801 }
1802 } else
1803 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
1804 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1805 }
1806 continue;
1807 }
1808 if (ldst->getPredicate()) // TODO: handle predicated ld/st
1809 continue;
1810
1811 if (isLoad) {
1812 DataFile file = ldst->src(0).getFile();
1813
1814 // if ld l[]/g[] look for previous store to eliminate the reload
1815 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
1816 // TODO: shared memory ?
1817 rec = findRecord(ldst, false, isAdjacent);
1818 if (rec && !isAdjacent)
1819 keep = !replaceLdFromSt(ldst, rec);
1820 }
1821
1822 // or look for ld from the same location and replace this one
1823 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
1824 if (rec) {
1825 if (!isAdjacent)
1826 keep = !replaceLdFromLd(ldst, rec);
1827 else
1828 // or combine a previous load with this one
1829 keep = !combineLd(rec, ldst);
1830 }
1831 if (keep)
1832 lockStores(ldst);
1833 } else {
1834 rec = findRecord(ldst, false, isAdjacent);
1835 if (rec) {
1836 if (!isAdjacent)
1837 keep = !replaceStFromSt(ldst, rec);
1838 else
1839 keep = !combineSt(rec, ldst);
1840 }
1841 if (keep)
1842 purgeRecords(ldst, DATA_FILE_COUNT);
1843 }
1844 if (keep)
1845 addRecord(ldst);
1846 }
1847 reset();
1848
1849 return true;
1850 }
1851
1852 // =============================================================================
1853
1854 // Turn control flow into predicated instructions (after register allocation !).
1855 // TODO:
1856 // Could move this to before register allocation on NVC0 and also handle nested
1857 // constructs.
1858 class FlatteningPass : public Pass
1859 {
1860 private:
1861 virtual bool visit(BasicBlock *);
1862
1863 bool tryPredicateConditional(BasicBlock *);
1864 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
1865 void tryPropagateBranch(BasicBlock *);
1866 inline bool isConstantCondition(Value *pred);
1867 inline bool mayPredicate(const Instruction *, const Value *pred) const;
1868 inline void removeFlow(Instruction *);
1869 };
1870
1871 bool
1872 FlatteningPass::isConstantCondition(Value *pred)
1873 {
1874 Instruction *insn = pred->getUniqueInsn();
1875 assert(insn);
1876 if (insn->op != OP_SET || insn->srcExists(2))
1877 return false;
1878
1879 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
1880 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
1881 DataFile file;
1882 if (ld) {
1883 if (ld->op != OP_MOV && ld->op != OP_LOAD)
1884 return false;
1885 if (ld->src(0).isIndirect(0))
1886 return false;
1887 file = ld->src(0).getFile();
1888 } else {
1889 file = insn->src(s).getFile();
1890 // catch $r63 on NVC0
1891 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
1892 file = FILE_IMMEDIATE;
1893 }
1894 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
1895 return false;
1896 }
1897 return true;
1898 }
1899
1900 void
1901 FlatteningPass::removeFlow(Instruction *insn)
1902 {
1903 FlowInstruction *term = insn ? insn->asFlow() : NULL;
1904 if (!term)
1905 return;
1906 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
1907
1908 if (term->op == OP_BRA) {
1909 // TODO: this might get more difficult when we get arbitrary BRAs
1910 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
1911 return;
1912 } else
1913 if (term->op != OP_JOIN)
1914 return;
1915
1916 Value *pred = term->getPredicate();
1917
1918 delete_Instruction(prog, term);
1919
1920 if (pred && pred->refCount() == 0) {
1921 Instruction *pSet = pred->getUniqueInsn();
1922 pred->join->reg.data.id = -1; // deallocate
1923 if (pSet->isDead())
1924 delete_Instruction(prog, pSet);
1925 }
1926 }
1927
1928 void
1929 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
1930 {
1931 for (Instruction *i = bb->getEntry(); i; i = i->next) {
1932 if (i->isNop())
1933 continue;
1934 assert(!i->getPredicate());
1935 i->setPredicate(cc, pred);
1936 }
1937 removeFlow(bb->getExit());
1938 }
1939
1940 bool
1941 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
1942 {
1943 if (insn->isPseudo())
1944 return true;
1945 // TODO: calls where we don't know which registers are modified
1946
1947 if (!prog->getTarget()->mayPredicate(insn, pred))
1948 return false;
1949 for (int d = 0; insn->defExists(d); ++d)
1950 if (insn->getDef(d)->equals(pred))
1951 return false;
1952 return true;
1953 }
1954
1955 // If we jump to BRA/RET/EXIT, replace the jump with it.
1956 // NOTE: We do not update the CFG anymore here !
1957 //
1958 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
1959 // BB:0
1960 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
1961 // BB1:
1962 // bra BB:3
1963 // BB2:
1964 // ...
1965 // BB3:
1966 // ...
1967 void
1968 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
1969 {
1970 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
1971 BasicBlock *bf = i->asFlow()->target.bb;
1972
1973 if (bf->getInsnCount() != 1)
1974 continue;
1975
1976 FlowInstruction *bra = i->asFlow();
1977 FlowInstruction *rep = bf->getExit()->asFlow();
1978
1979 if (!rep || rep->getPredicate())
1980 continue;
1981 if (rep->op != OP_BRA &&
1982 rep->op != OP_JOIN &&
1983 rep->op != OP_EXIT)
1984 continue;
1985
1986 // TODO: If there are multiple branches to @rep, only the first would
1987 // be replaced, so only remove them after this pass is done ?
1988 // Also, need to check all incident blocks for fall-through exits and
1989 // add the branch there.
1990 bra->op = rep->op;
1991 bra->target.bb = rep->target.bb;
1992 if (bf->cfg.incidentCount() == 1)
1993 bf->remove(rep);
1994 }
1995 }
1996
1997 bool
1998 FlatteningPass::visit(BasicBlock *bb)
1999 {
2000 if (tryPredicateConditional(bb))
2001 return true;
2002
2003 // try to attach join to previous instruction
2004 Instruction *insn = bb->getExit();
2005 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2006 insn = insn->prev;
2007 if (insn && !insn->getPredicate() &&
2008 !insn->asFlow() &&
2009 insn->op != OP_TEXBAR &&
2010 !isTextureOp(insn->op) && // probably just nve4
2011 !isSurfaceOp(insn->op) && // not confirmed
2012 insn->op != OP_LINTERP && // probably just nve4
2013 insn->op != OP_PINTERP && // probably just nve4
2014 ((insn->op != OP_LOAD && insn->op != OP_STORE) ||
2015 typeSizeof(insn->dType) <= 4) &&
2016 !insn->isNop()) {
2017 insn->join = 1;
2018 bb->remove(bb->getExit());
2019 return true;
2020 }
2021 }
2022
2023 tryPropagateBranch(bb);
2024
2025 return true;
2026 }
2027
2028 bool
2029 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2030 {
2031 BasicBlock *bL = NULL, *bR = NULL;
2032 unsigned int nL = 0, nR = 0, limit = 12;
2033 Instruction *insn;
2034 unsigned int mask;
2035
2036 mask = bb->initiatesSimpleConditional();
2037 if (!mask)
2038 return false;
2039
2040 assert(bb->getExit());
2041 Value *pred = bb->getExit()->getPredicate();
2042 assert(pred);
2043
2044 if (isConstantCondition(pred))
2045 limit = 4;
2046
2047 Graph::EdgeIterator ei = bb->cfg.outgoing();
2048
2049 if (mask & 1) {
2050 bL = BasicBlock::get(ei.getNode());
2051 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2052 if (!mayPredicate(insn, pred))
2053 return false;
2054 if (nL > limit)
2055 return false; // too long, do a real branch
2056 }
2057 ei.next();
2058
2059 if (mask & 2) {
2060 bR = BasicBlock::get(ei.getNode());
2061 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2062 if (!mayPredicate(insn, pred))
2063 return false;
2064 if (nR > limit)
2065 return false; // too long, do a real branch
2066 }
2067
2068 if (bL)
2069 predicateInstructions(bL, pred, bb->getExit()->cc);
2070 if (bR)
2071 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2072
2073 if (bb->joinAt) {
2074 bb->remove(bb->joinAt);
2075 bb->joinAt = NULL;
2076 }
2077 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2078
2079 // remove potential join operations at the end of the conditional
2080 if (prog->getTarget()->joinAnterior) {
2081 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2082 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2083 removeFlow(bb->getEntry());
2084 }
2085
2086 return true;
2087 }
2088
2089 // =============================================================================
2090
2091 // Common subexpression elimination. Stupid O^2 implementation.
2092 class LocalCSE : public Pass
2093 {
2094 private:
2095 virtual bool visit(BasicBlock *);
2096
2097 inline bool tryReplace(Instruction **, Instruction *);
2098
2099 DLList ops[OP_LAST + 1];
2100 };
2101
2102 class GlobalCSE : public Pass
2103 {
2104 private:
2105 virtual bool visit(BasicBlock *);
2106 };
2107
2108 bool
2109 Instruction::isActionEqual(const Instruction *that) const
2110 {
2111 if (this->op != that->op ||
2112 this->dType != that->dType ||
2113 this->sType != that->sType)
2114 return false;
2115 if (this->cc != that->cc)
2116 return false;
2117
2118 if (this->asTex()) {
2119 if (memcmp(&this->asTex()->tex,
2120 &that->asTex()->tex,
2121 sizeof(this->asTex()->tex)))
2122 return false;
2123 } else
2124 if (this->asCmp()) {
2125 if (this->asCmp()->setCond != that->asCmp()->setCond)
2126 return false;
2127 } else
2128 if (this->asFlow()) {
2129 return false;
2130 } else {
2131 if (this->ipa != that->ipa ||
2132 this->lanes != that->lanes ||
2133 this->perPatch != that->perPatch)
2134 return false;
2135 if (this->postFactor != that->postFactor)
2136 return false;
2137 }
2138
2139 if (this->subOp != that->subOp ||
2140 this->saturate != that->saturate ||
2141 this->rnd != that->rnd ||
2142 this->ftz != that->ftz ||
2143 this->dnz != that->dnz ||
2144 this->cache != that->cache ||
2145 this->mask != that->mask)
2146 return false;
2147
2148 return true;
2149 }
2150
2151 bool
2152 Instruction::isResultEqual(const Instruction *that) const
2153 {
2154 unsigned int d, s;
2155
2156 // NOTE: location of discard only affects tex with liveOnly and quadops
2157 if (!this->defExists(0) && this->op != OP_DISCARD)
2158 return false;
2159
2160 if (!isActionEqual(that))
2161 return false;
2162
2163 if (this->predSrc != that->predSrc)
2164 return false;
2165
2166 for (d = 0; this->defExists(d); ++d) {
2167 if (!that->defExists(d) ||
2168 !this->getDef(d)->equals(that->getDef(d), false))
2169 return false;
2170 }
2171 if (that->defExists(d))
2172 return false;
2173
2174 for (s = 0; this->srcExists(s); ++s) {
2175 if (!that->srcExists(s))
2176 return false;
2177 if (this->src(s).mod != that->src(s).mod)
2178 return false;
2179 if (!this->getSrc(s)->equals(that->getSrc(s), true))
2180 return false;
2181 }
2182 if (that->srcExists(s))
2183 return false;
2184
2185 if (op == OP_LOAD || op == OP_VFETCH) {
2186 switch (src(0).getFile()) {
2187 case FILE_MEMORY_CONST:
2188 case FILE_SHADER_INPUT:
2189 return true;
2190 default:
2191 return false;
2192 }
2193 }
2194
2195 return true;
2196 }
2197
2198 // pull through common expressions from different in-blocks
2199 bool
2200 GlobalCSE::visit(BasicBlock *bb)
2201 {
2202 Instruction *phi, *next, *ik;
2203 int s;
2204
2205 // TODO: maybe do this with OP_UNION, too
2206
2207 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2208 next = phi->next;
2209 if (phi->getSrc(0)->refCount() > 1)
2210 continue;
2211 ik = phi->getSrc(0)->getInsn();
2212 if (!ik)
2213 continue; // probably a function input
2214 for (s = 1; phi->srcExists(s); ++s) {
2215 if (phi->getSrc(s)->refCount() > 1)
2216 break;
2217 if (!phi->getSrc(s)->getInsn() ||
2218 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
2219 break;
2220 }
2221 if (!phi->srcExists(s)) {
2222 Instruction *entry = bb->getEntry();
2223 ik->bb->remove(ik);
2224 if (!entry || entry->op != OP_JOIN)
2225 bb->insertHead(ik);
2226 else
2227 bb->insertAfter(entry, ik);
2228 ik->setDef(0, phi->getDef(0));
2229 delete_Instruction(prog, phi);
2230 }
2231 }
2232
2233 return true;
2234 }
2235
2236 bool
2237 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2238 {
2239 Instruction *old = *ptr;
2240
2241 // TODO: maybe relax this later (causes trouble with OP_UNION)
2242 if (i->isPredicated())
2243 return false;
2244
2245 if (!old->isResultEqual(i))
2246 return false;
2247
2248 for (int d = 0; old->defExists(d); ++d)
2249 old->def(d).replace(i->getDef(d), false);
2250 delete_Instruction(prog, old);
2251 *ptr = NULL;
2252 return true;
2253 }
2254
2255 bool
2256 LocalCSE::visit(BasicBlock *bb)
2257 {
2258 unsigned int replaced;
2259
2260 do {
2261 Instruction *ir, *next;
2262
2263 replaced = 0;
2264
2265 // will need to know the order of instructions
2266 int serial = 0;
2267 for (ir = bb->getFirst(); ir; ir = ir->next)
2268 ir->serial = serial++;
2269
2270 for (ir = bb->getEntry(); ir; ir = next) {
2271 int s;
2272 Value *src = NULL;
2273
2274 next = ir->next;
2275
2276 if (ir->fixed) {
2277 ops[ir->op].insert(ir);
2278 continue;
2279 }
2280
2281 for (s = 0; ir->srcExists(s); ++s)
2282 if (ir->getSrc(s)->asLValue())
2283 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2284 src = ir->getSrc(s);
2285
2286 if (src) {
2287 for (Value::UseIterator it = src->uses.begin();
2288 it != src->uses.end(); ++it) {
2289 Instruction *ik = (*it)->getInsn();
2290 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
2291 if (tryReplace(&ir, ik))
2292 break;
2293 }
2294 } else {
2295 DLLIST_FOR_EACH(&ops[ir->op], iter)
2296 {
2297 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2298 if (tryReplace(&ir, ik))
2299 break;
2300 }
2301 }
2302
2303 if (ir)
2304 ops[ir->op].insert(ir);
2305 else
2306 ++replaced;
2307 }
2308 for (unsigned int i = 0; i <= OP_LAST; ++i)
2309 ops[i].clear();
2310
2311 } while (replaced);
2312
2313 return true;
2314 }
2315
2316 // =============================================================================
2317
2318 // Remove computations of unused values.
2319 class DeadCodeElim : public Pass
2320 {
2321 public:
2322 bool buryAll(Program *);
2323
2324 private:
2325 virtual bool visit(BasicBlock *);
2326
2327 void checkSplitLoad(Instruction *ld); // for partially dead loads
2328
2329 unsigned int deadCount;
2330 };
2331
2332 bool
2333 DeadCodeElim::buryAll(Program *prog)
2334 {
2335 do {
2336 deadCount = 0;
2337 if (!this->run(prog, false, false))
2338 return false;
2339 } while (deadCount);
2340
2341 return true;
2342 }
2343
2344 bool
2345 DeadCodeElim::visit(BasicBlock *bb)
2346 {
2347 Instruction *next;
2348
2349 for (Instruction *i = bb->getFirst(); i; i = next) {
2350 next = i->next;
2351 if (i->isDead()) {
2352 ++deadCount;
2353 delete_Instruction(prog, i);
2354 } else
2355 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2356 checkSplitLoad(i);
2357 } else
2358 if (i->defExists(0) && !i->getDef(0)->refCount()) {
2359 if (i->op == OP_ATOM ||
2360 i->op == OP_SUREDP ||
2361 i->op == OP_SUREDB)
2362 i->setDef(0, NULL);
2363 }
2364 }
2365 return true;
2366 }
2367
2368 void
2369 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2370 {
2371 Instruction *ld2 = NULL; // can get at most 2 loads
2372 Value *def1[4];
2373 Value *def2[4];
2374 int32_t addr1, addr2;
2375 int32_t size1, size2;
2376 int d, n1, n2;
2377 uint32_t mask = 0xffffffff;
2378
2379 for (d = 0; ld1->defExists(d); ++d)
2380 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2381 mask &= ~(1 << d);
2382 if (mask == 0xffffffff)
2383 return;
2384
2385 addr1 = ld1->getSrc(0)->reg.data.offset;
2386 n1 = n2 = 0;
2387 size1 = size2 = 0;
2388 for (d = 0; ld1->defExists(d); ++d) {
2389 if (mask & (1 << d)) {
2390 if (size1 && (addr1 & 0x7))
2391 break;
2392 def1[n1] = ld1->getDef(d);
2393 size1 += def1[n1++]->reg.size;
2394 } else
2395 if (!n1) {
2396 addr1 += ld1->getDef(d)->reg.size;
2397 } else {
2398 break;
2399 }
2400 }
2401 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2402 if (mask & (1 << d)) {
2403 def2[n2] = ld1->getDef(d);
2404 size2 += def2[n2++]->reg.size;
2405 } else {
2406 assert(!n2);
2407 addr2 += ld1->getDef(d)->reg.size;
2408 }
2409 }
2410
2411 updateLdStOffset(ld1, addr1, func);
2412 ld1->setType(typeOfSize(size1));
2413 for (d = 0; d < 4; ++d)
2414 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2415
2416 if (!n2)
2417 return;
2418
2419 ld2 = cloneShallow(func, ld1);
2420 updateLdStOffset(ld2, addr2, func);
2421 ld2->setType(typeOfSize(size2));
2422 for (d = 0; d < 4; ++d)
2423 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2424
2425 ld1->bb->insertAfter(ld1, ld2);
2426 }
2427
2428 // =============================================================================
2429
2430 #define RUN_PASS(l, n, f) \
2431 if (level >= (l)) { \
2432 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2433 INFO("PEEPHOLE: %s\n", #n); \
2434 n pass; \
2435 if (!pass.f(this)) \
2436 return false; \
2437 }
2438
2439 bool
2440 Program::optimizeSSA(int level)
2441 {
2442 RUN_PASS(1, DeadCodeElim, buryAll);
2443 RUN_PASS(1, CopyPropagation, run);
2444 RUN_PASS(2, GlobalCSE, run);
2445 RUN_PASS(1, LocalCSE, run);
2446 RUN_PASS(2, AlgebraicOpt, run);
2447 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2448 RUN_PASS(1, ConstantFolding, foldAll);
2449 RUN_PASS(1, LoadPropagation, run);
2450 RUN_PASS(2, MemoryOpt, run);
2451 RUN_PASS(2, LocalCSE, run);
2452 RUN_PASS(0, DeadCodeElim, buryAll);
2453
2454 return true;
2455 }
2456
2457 bool
2458 Program::optimizePostRA(int level)
2459 {
2460 RUN_PASS(2, FlatteningPass, run);
2461 return true;
2462 }
2463
2464 }