nv50/ir: Fold IMM into MAD
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class LoadPropagation : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125
126 void checkSwapSrc01(Instruction *);
127
128 bool isCSpaceLoad(Instruction *);
129 bool isImmd32Load(Instruction *);
130 bool isAttribOrSharedLoad(Instruction *);
131 };
132
133 bool
134 LoadPropagation::isCSpaceLoad(Instruction *ld)
135 {
136 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
137 }
138
139 bool
140 LoadPropagation::isImmd32Load(Instruction *ld)
141 {
142 if (!ld || (ld->op != OP_MOV) || (typeSizeof(ld->dType) != 4))
143 return false;
144 return ld->src(0).getFile() == FILE_IMMEDIATE;
145 }
146
147 bool
148 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
149 {
150 return ld &&
151 (ld->op == OP_VFETCH ||
152 (ld->op == OP_LOAD &&
153 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
154 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
155 }
156
157 void
158 LoadPropagation::checkSwapSrc01(Instruction *insn)
159 {
160 if (!prog->getTarget()->getOpInfo(insn).commutative)
161 if (insn->op != OP_SET && insn->op != OP_SLCT)
162 return;
163 if (insn->src(1).getFile() != FILE_GPR)
164 return;
165
166 Instruction *i0 = insn->getSrc(0)->getInsn();
167 Instruction *i1 = insn->getSrc(1)->getInsn();
168
169 if (isCSpaceLoad(i0)) {
170 if (!isCSpaceLoad(i1))
171 insn->swapSources(0, 1);
172 else
173 return;
174 } else
175 if (isImmd32Load(i0)) {
176 if (!isCSpaceLoad(i1) && !isImmd32Load(i1))
177 insn->swapSources(0, 1);
178 else
179 return;
180 } else
181 if (isAttribOrSharedLoad(i1)) {
182 if (!isAttribOrSharedLoad(i0))
183 insn->swapSources(0, 1);
184 else
185 return;
186 } else {
187 return;
188 }
189
190 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
191 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
192 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
193 else
194 if (insn->op == OP_SLCT)
195 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
196 }
197
198 bool
199 LoadPropagation::visit(BasicBlock *bb)
200 {
201 const Target *targ = prog->getTarget();
202 Instruction *next;
203
204 for (Instruction *i = bb->getEntry(); i; i = next) {
205 next = i->next;
206
207 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
208 continue;
209
210 if (i->srcExists(1))
211 checkSwapSrc01(i);
212
213 for (int s = 0; i->srcExists(s); ++s) {
214 Instruction *ld = i->getSrc(s)->getInsn();
215
216 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
217 continue;
218 if (!targ->insnCanLoad(i, s, ld))
219 continue;
220
221 // propagate !
222 i->setSrc(s, ld->getSrc(0));
223 if (ld->src(0).isIndirect(0))
224 i->setIndirect(s, 0, ld->getIndirect(0, 0));
225
226 if (ld->getDef(0)->refCount() == 0)
227 delete_Instruction(prog, ld);
228 }
229 }
230 return true;
231 }
232
233 // =============================================================================
234
235 // Evaluate constant expressions.
236 class ConstantFolding : public Pass
237 {
238 public:
239 bool foldAll(Program *);
240
241 private:
242 virtual bool visit(BasicBlock *);
243
244 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
245 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
246 void opnd(Instruction *, ImmediateValue&, int s);
247
248 void unary(Instruction *, const ImmediateValue&);
249
250 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
251
252 // TGSI 'true' is converted to -1 by F2I(NEG(SET)), track back to SET
253 CmpInstruction *findOriginForTestWithZero(Value *);
254
255 unsigned int foldCount;
256
257 BuildUtil bld;
258 };
259
260 // TODO: remember generated immediates and only revisit these
261 bool
262 ConstantFolding::foldAll(Program *prog)
263 {
264 unsigned int iterCount = 0;
265 do {
266 foldCount = 0;
267 if (!run(prog))
268 return false;
269 } while (foldCount && ++iterCount < 2);
270 return true;
271 }
272
273 bool
274 ConstantFolding::visit(BasicBlock *bb)
275 {
276 Instruction *i, *next;
277
278 for (i = bb->getEntry(); i; i = next) {
279 next = i->next;
280 if (i->op == OP_MOV || i->op == OP_CALL)
281 continue;
282
283 ImmediateValue src0, src1, src2;
284
285 if (i->srcExists(2) &&
286 i->src(0).getImmediate(src0) &&
287 i->src(1).getImmediate(src1) &&
288 i->src(2).getImmediate(src2))
289 expr(i, src0, src1, src2);
290 else
291 if (i->srcExists(1) &&
292 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
293 expr(i, src0, src1);
294 else
295 if (i->srcExists(0) && i->src(0).getImmediate(src0))
296 opnd(i, src0, 0);
297 else
298 if (i->srcExists(1) && i->src(1).getImmediate(src1))
299 opnd(i, src1, 1);
300 }
301 return true;
302 }
303
304 CmpInstruction *
305 ConstantFolding::findOriginForTestWithZero(Value *value)
306 {
307 if (!value)
308 return NULL;
309 Instruction *insn = value->getInsn();
310
311 while (insn && insn->op != OP_SET) {
312 Instruction *next = NULL;
313 switch (insn->op) {
314 case OP_NEG:
315 case OP_ABS:
316 case OP_CVT:
317 next = insn->getSrc(0)->getInsn();
318 if (insn->sType != next->dType)
319 return NULL;
320 break;
321 case OP_MOV:
322 next = insn->getSrc(0)->getInsn();
323 break;
324 default:
325 return NULL;
326 }
327 insn = next;
328 }
329 return insn ? insn->asCmp() : NULL;
330 }
331
332 void
333 Modifier::applyTo(ImmediateValue& imm) const
334 {
335 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
336 return;
337 switch (imm.reg.type) {
338 case TYPE_F32:
339 if (bits & NV50_IR_MOD_ABS)
340 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
341 if (bits & NV50_IR_MOD_NEG)
342 imm.reg.data.f32 = -imm.reg.data.f32;
343 if (bits & NV50_IR_MOD_SAT) {
344 if (imm.reg.data.f32 < 0.0f)
345 imm.reg.data.f32 = 0.0f;
346 else
347 if (imm.reg.data.f32 > 1.0f)
348 imm.reg.data.f32 = 1.0f;
349 }
350 assert(!(bits & NV50_IR_MOD_NOT));
351 break;
352
353 case TYPE_S8: // NOTE: will be extended
354 case TYPE_S16:
355 case TYPE_S32:
356 case TYPE_U8: // NOTE: treated as signed
357 case TYPE_U16:
358 case TYPE_U32:
359 if (bits & NV50_IR_MOD_ABS)
360 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
361 imm.reg.data.s32 : -imm.reg.data.s32;
362 if (bits & NV50_IR_MOD_NEG)
363 imm.reg.data.s32 = -imm.reg.data.s32;
364 if (bits & NV50_IR_MOD_NOT)
365 imm.reg.data.s32 = ~imm.reg.data.s32;
366 break;
367
368 case TYPE_F64:
369 if (bits & NV50_IR_MOD_ABS)
370 imm.reg.data.f64 = fabs(imm.reg.data.f64);
371 if (bits & NV50_IR_MOD_NEG)
372 imm.reg.data.f64 = -imm.reg.data.f64;
373 if (bits & NV50_IR_MOD_SAT) {
374 if (imm.reg.data.f64 < 0.0)
375 imm.reg.data.f64 = 0.0;
376 else
377 if (imm.reg.data.f64 > 1.0)
378 imm.reg.data.f64 = 1.0;
379 }
380 assert(!(bits & NV50_IR_MOD_NOT));
381 break;
382
383 default:
384 assert(!"invalid/unhandled type");
385 imm.reg.data.u64 = 0;
386 break;
387 }
388 }
389
390 operation
391 Modifier::getOp() const
392 {
393 switch (bits) {
394 case NV50_IR_MOD_ABS: return OP_ABS;
395 case NV50_IR_MOD_NEG: return OP_NEG;
396 case NV50_IR_MOD_SAT: return OP_SAT;
397 case NV50_IR_MOD_NOT: return OP_NOT;
398 case 0:
399 return OP_MOV;
400 default:
401 return OP_CVT;
402 }
403 }
404
405 void
406 ConstantFolding::expr(Instruction *i,
407 ImmediateValue &imm0, ImmediateValue &imm1)
408 {
409 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
410 struct Storage res;
411
412 memset(&res.data, 0, sizeof(res.data));
413
414 switch (i->op) {
415 case OP_MAD:
416 case OP_FMA:
417 case OP_MUL:
418 if (i->dnz && i->dType == TYPE_F32) {
419 if (!isfinite(a->data.f32))
420 a->data.f32 = 0.0f;
421 if (!isfinite(b->data.f32))
422 b->data.f32 = 0.0f;
423 }
424 switch (i->dType) {
425 case TYPE_F32: res.data.f32 = a->data.f32 * b->data.f32; break;
426 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
427 case TYPE_S32:
428 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
429 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
430 break;
431 }
432 /* fallthrough */
433 case TYPE_U32:
434 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
435 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
436 break;
437 }
438 res.data.u32 = a->data.u32 * b->data.u32; break;
439 default:
440 return;
441 }
442 break;
443 case OP_DIV:
444 if (b->data.u32 == 0)
445 break;
446 switch (i->dType) {
447 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
448 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
449 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
450 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
451 default:
452 return;
453 }
454 break;
455 case OP_ADD:
456 switch (i->dType) {
457 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
458 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
459 case TYPE_S32:
460 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
461 default:
462 return;
463 }
464 break;
465 case OP_POW:
466 switch (i->dType) {
467 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
468 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
469 default:
470 return;
471 }
472 break;
473 case OP_MAX:
474 switch (i->dType) {
475 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
476 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
477 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
478 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
479 default:
480 return;
481 }
482 break;
483 case OP_MIN:
484 switch (i->dType) {
485 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
486 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
487 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
488 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
489 default:
490 return;
491 }
492 break;
493 case OP_AND:
494 res.data.u64 = a->data.u64 & b->data.u64;
495 break;
496 case OP_OR:
497 res.data.u64 = a->data.u64 | b->data.u64;
498 break;
499 case OP_XOR:
500 res.data.u64 = a->data.u64 ^ b->data.u64;
501 break;
502 case OP_SHL:
503 res.data.u32 = a->data.u32 << b->data.u32;
504 break;
505 case OP_SHR:
506 switch (i->dType) {
507 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
508 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
509 default:
510 return;
511 }
512 break;
513 case OP_SLCT:
514 if (a->data.u32 != b->data.u32)
515 return;
516 res.data.u32 = a->data.u32;
517 break;
518 case OP_EXTBF: {
519 int offset = b->data.u32 & 0xff;
520 int width = (b->data.u32 >> 8) & 0xff;
521 int rshift = offset;
522 int lshift = 0;
523 if (width == 0) {
524 res.data.u32 = 0;
525 break;
526 }
527 if (width + offset < 32) {
528 rshift = 32 - width;
529 lshift = 32 - width - offset;
530 }
531 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
532 res.data.u32 = util_bitreverse(a->data.u32);
533 else
534 res.data.u32 = a->data.u32;
535 switch (i->dType) {
536 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
537 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
538 default:
539 return;
540 }
541 break;
542 }
543 case OP_POPCNT:
544 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
545 break;
546 default:
547 return;
548 }
549 ++foldCount;
550
551 i->src(0).mod = Modifier(0);
552 i->src(1).mod = Modifier(0);
553
554 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
555 i->setSrc(1, NULL);
556
557 i->getSrc(0)->reg.data = res.data;
558
559 if (i->op == OP_MAD || i->op == OP_FMA) {
560 i->op = OP_ADD;
561
562 i->setSrc(1, i->getSrc(0));
563 i->src(1).mod = i->src(2).mod;
564 i->setSrc(0, i->getSrc(2));
565 i->setSrc(2, NULL);
566
567 ImmediateValue src0;
568 if (i->src(0).getImmediate(src0))
569 expr(i, src0, *i->getSrc(1)->asImm());
570 if (i->saturate && !prog->getTarget()->isSatSupported(i)) {
571 bld.setPosition(i, false);
572 i->setSrc(1, bld.loadImm(NULL, res.data.u32));
573 }
574 } else {
575 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
576 }
577 i->subOp = 0;
578 }
579
580 void
581 ConstantFolding::expr(Instruction *i,
582 ImmediateValue &imm0,
583 ImmediateValue &imm1,
584 ImmediateValue &imm2)
585 {
586 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
587 struct Storage res;
588
589 memset(&res.data, 0, sizeof(res.data));
590
591 switch (i->op) {
592 case OP_INSBF: {
593 int offset = b->data.u32 & 0xff;
594 int width = (b->data.u32 >> 8) & 0xff;
595 unsigned bitmask = ((1 << width) - 1) << offset;
596 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
597 break;
598 }
599 default:
600 return;
601 }
602
603 ++foldCount;
604 i->src(0).mod = Modifier(0);
605 i->src(1).mod = Modifier(0);
606 i->src(2).mod = Modifier(0);
607
608 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
609 i->setSrc(1, NULL);
610 i->setSrc(2, NULL);
611
612 i->getSrc(0)->reg.data = res.data;
613
614 i->op = OP_MOV;
615 }
616
617 void
618 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
619 {
620 Storage res;
621
622 if (i->dType != TYPE_F32)
623 return;
624 switch (i->op) {
625 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
626 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
627 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
628 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
629 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
630 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
631 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
632 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
633 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
634 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
635 case OP_PRESIN:
636 case OP_PREEX2:
637 // these should be handled in subsequent OP_SIN/COS/EX2
638 res.data.f32 = imm.reg.data.f32;
639 break;
640 default:
641 return;
642 }
643 i->op = OP_MOV;
644 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
645 i->src(0).mod = Modifier(0);
646 }
647
648 void
649 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
650 const int s, ImmediateValue& imm2)
651 {
652 const int t = s ? 0 : 1;
653 Instruction *insn;
654 Instruction *mul1 = NULL; // mul1 before mul2
655 int e = 0;
656 float f = imm2.reg.data.f32;
657 ImmediateValue imm1;
658
659 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
660
661 if (mul2->getSrc(t)->refCount() == 1) {
662 insn = mul2->getSrc(t)->getInsn();
663 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
664 mul1 = insn;
665 if (mul1 && !mul1->saturate) {
666 int s1;
667
668 if (mul1->src(s1 = 0).getImmediate(imm1) ||
669 mul1->src(s1 = 1).getImmediate(imm1)) {
670 bld.setPosition(mul1, false);
671 // a = mul r, imm1
672 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
673 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
674 mul1->src(s1).mod = Modifier(0);
675 mul2->def(0).replace(mul1->getDef(0), false);
676 } else
677 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
678 // c = mul a, b
679 // d = mul c, imm -> d = mul_x_imm a, b
680 mul1->postFactor = e;
681 mul2->def(0).replace(mul1->getDef(0), false);
682 if (f < 0)
683 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
684 }
685 mul1->saturate = mul2->saturate;
686 return;
687 }
688 }
689 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
690 // b = mul a, imm
691 // d = mul b, c -> d = mul_x_imm a, c
692 int s2, t2;
693 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
694 if (!insn)
695 return;
696 mul1 = mul2;
697 mul2 = NULL;
698 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
699 t2 = s2 ? 0 : 1;
700 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
701 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
702 mul2 = insn;
703 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
704 mul2->postFactor = e;
705 mul2->setSrc(s2, mul1->src(t));
706 if (f < 0)
707 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
708 }
709 }
710 }
711
712 void
713 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
714 {
715 const int t = !s;
716 const operation op = i->op;
717 Instruction *newi = i;
718
719 switch (i->op) {
720 case OP_MUL:
721 if (i->dType == TYPE_F32)
722 tryCollapseChainedMULs(i, s, imm0);
723
724 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
725 assert(!isFloatType(i->sType));
726 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
727 bld.setPosition(i, false);
728 // Need to set to the sign value, which is a compare.
729 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
730 TYPE_S32, i->getSrc(t), bld.mkImm(0));
731 delete_Instruction(prog, i);
732 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
733 // The high bits can't be set in this case (either mul by 0 or
734 // unsigned by 1)
735 i->op = OP_MOV;
736 i->subOp = 0;
737 i->setSrc(0, new_ImmediateValue(prog, 0u));
738 i->src(0).mod = Modifier(0);
739 i->setSrc(1, NULL);
740 } else if (!imm0.isNegative() && imm0.isPow2()) {
741 // Translate into a shift
742 imm0.applyLog2();
743 i->op = OP_SHR;
744 i->subOp = 0;
745 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
746 i->setSrc(0, i->getSrc(t));
747 i->src(0).mod = i->src(t).mod;
748 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
749 i->src(1).mod = 0;
750 }
751 } else
752 if (imm0.isInteger(0)) {
753 i->op = OP_MOV;
754 i->setSrc(0, new_ImmediateValue(prog, 0u));
755 i->src(0).mod = Modifier(0);
756 i->setSrc(1, NULL);
757 } else
758 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
759 if (imm0.isNegative())
760 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
761 i->op = i->src(t).mod.getOp();
762 if (s == 0) {
763 i->setSrc(0, i->getSrc(1));
764 i->src(0).mod = i->src(1).mod;
765 i->src(1).mod = 0;
766 }
767 if (i->op != OP_CVT)
768 i->src(0).mod = 0;
769 i->setSrc(1, NULL);
770 } else
771 if (imm0.isInteger(2) || imm0.isInteger(-2)) {
772 if (imm0.isNegative())
773 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
774 i->op = OP_ADD;
775 i->setSrc(s, i->getSrc(t));
776 i->src(s).mod = i->src(t).mod;
777 } else
778 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
779 i->op = OP_SHL;
780 imm0.applyLog2();
781 i->setSrc(0, i->getSrc(t));
782 i->src(0).mod = i->src(t).mod;
783 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
784 i->src(1).mod = 0;
785 }
786 break;
787 case OP_MAD:
788 if (imm0.isInteger(0)) {
789 i->setSrc(0, i->getSrc(2));
790 i->src(0).mod = i->src(2).mod;
791 i->setSrc(1, NULL);
792 i->setSrc(2, NULL);
793 i->op = i->src(0).mod.getOp();
794 if (i->op != OP_CVT)
795 i->src(0).mod = 0;
796 } else
797 if (imm0.isInteger(1) || imm0.isInteger(-1)) {
798 if (imm0.isNegative())
799 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
800 if (s == 0) {
801 i->setSrc(0, i->getSrc(1));
802 i->src(0).mod = i->src(1).mod;
803 }
804 i->setSrc(1, i->getSrc(2));
805 i->src(1).mod = i->src(2).mod;
806 i->setSrc(2, NULL);
807 i->op = OP_ADD;
808 }
809 break;
810 case OP_ADD:
811 if (i->usesFlags())
812 break;
813 if (imm0.isInteger(0)) {
814 if (s == 0) {
815 i->setSrc(0, i->getSrc(1));
816 i->src(0).mod = i->src(1).mod;
817 }
818 i->setSrc(1, NULL);
819 i->op = i->src(0).mod.getOp();
820 if (i->op != OP_CVT)
821 i->src(0).mod = Modifier(0);
822 }
823 break;
824
825 case OP_DIV:
826 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
827 break;
828 bld.setPosition(i, false);
829 if (imm0.reg.data.u32 == 0) {
830 break;
831 } else
832 if (imm0.reg.data.u32 == 1) {
833 i->op = OP_MOV;
834 i->setSrc(1, NULL);
835 } else
836 if (i->dType == TYPE_U32 && imm0.isPow2()) {
837 i->op = OP_SHR;
838 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
839 } else
840 if (i->dType == TYPE_U32) {
841 Instruction *mul;
842 Value *tA, *tB;
843 const uint32_t d = imm0.reg.data.u32;
844 uint32_t m;
845 int r, s;
846 uint32_t l = util_logbase2(d);
847 if (((uint32_t)1 << l) < d)
848 ++l;
849 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
850 r = l ? 1 : 0;
851 s = l ? (l - 1) : 0;
852
853 tA = bld.getSSA();
854 tB = bld.getSSA();
855 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
856 bld.loadImm(NULL, m));
857 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
858 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
859 tA = bld.getSSA();
860 if (r)
861 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
862 else
863 tA = tB;
864 tB = s ? bld.getSSA() : i->getDef(0);
865 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
866 if (s)
867 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
868
869 delete_Instruction(prog, i);
870 } else
871 if (imm0.reg.data.s32 == -1) {
872 i->op = OP_NEG;
873 i->setSrc(1, NULL);
874 } else {
875 LValue *tA, *tB;
876 LValue *tD;
877 const int32_t d = imm0.reg.data.s32;
878 int32_t m;
879 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
880 if ((1 << l) < abs(d))
881 ++l;
882 if (!l)
883 l = 1;
884 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
885
886 tA = bld.getSSA();
887 tB = bld.getSSA();
888 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
889 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
890 if (l > 1)
891 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
892 else
893 tB = tA;
894 tA = bld.getSSA();
895 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
896 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
897 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
898 if (d < 0)
899 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
900
901 delete_Instruction(prog, i);
902 }
903 break;
904
905 case OP_MOD:
906 if (i->sType == TYPE_U32 && imm0.isPow2()) {
907 bld.setPosition(i, false);
908 i->op = OP_AND;
909 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
910 }
911 break;
912
913 case OP_SET: // TODO: SET_AND,OR,XOR
914 {
915 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
916 CondCode cc, ccZ;
917 if (i->src(t).mod != Modifier(0))
918 return;
919 if (imm0.reg.data.u32 != 0 || !si || si->op != OP_SET)
920 return;
921 cc = si->setCond;
922 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
923 if (s == 0)
924 ccZ = reverseCondCode(ccZ);
925 switch (ccZ) {
926 case CC_LT: cc = CC_FL; break;
927 case CC_GE: cc = CC_TR; break;
928 case CC_EQ: cc = inverseCondCode(cc); break;
929 case CC_LE: cc = inverseCondCode(cc); break;
930 case CC_GT: break;
931 case CC_NE: break;
932 default:
933 return;
934 }
935 i->asCmp()->setCond = cc;
936 i->setSrc(0, si->src(0));
937 i->setSrc(1, si->src(1));
938 i->sType = si->sType;
939 }
940 break;
941
942 case OP_SHL:
943 {
944 if (s != 1 || i->src(0).mod != Modifier(0))
945 break;
946 // try to concatenate shifts
947 Instruction *si = i->getSrc(0)->getInsn();
948 if (!si || si->op != OP_SHL)
949 break;
950 ImmediateValue imm1;
951 if (si->src(1).getImmediate(imm1)) {
952 bld.setPosition(i, false);
953 i->setSrc(0, si->getSrc(0));
954 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
955 }
956 }
957 break;
958
959 case OP_ABS:
960 case OP_NEG:
961 case OP_SAT:
962 case OP_LG2:
963 case OP_RCP:
964 case OP_SQRT:
965 case OP_RSQ:
966 case OP_PRESIN:
967 case OP_SIN:
968 case OP_COS:
969 case OP_PREEX2:
970 case OP_EX2:
971 unary(i, imm0);
972 break;
973 case OP_BFIND: {
974 int32_t res;
975 switch (i->dType) {
976 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
977 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
978 default:
979 return;
980 }
981 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
982 res = 31 - res;
983 bld.setPosition(i, false); /* make sure bld is init'ed */
984 i->setSrc(0, bld.mkImm(res));
985 i->setSrc(1, NULL);
986 i->op = OP_MOV;
987 i->subOp = 0;
988 break;
989 }
990 case OP_POPCNT: {
991 // Only deal with 1-arg POPCNT here
992 if (i->srcExists(1))
993 break;
994 uint32_t res = util_bitcount(imm0.reg.data.u32);
995 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
996 i->setSrc(1, NULL);
997 i->op = OP_MOV;
998 break;
999 }
1000 default:
1001 return;
1002 }
1003 if (newi->op != op)
1004 foldCount++;
1005 }
1006
1007 // =============================================================================
1008
1009 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1010 class ModifierFolding : public Pass
1011 {
1012 private:
1013 virtual bool visit(BasicBlock *);
1014 };
1015
1016 bool
1017 ModifierFolding::visit(BasicBlock *bb)
1018 {
1019 const Target *target = prog->getTarget();
1020
1021 Instruction *i, *next, *mi;
1022 Modifier mod;
1023
1024 for (i = bb->getEntry(); i; i = next) {
1025 next = i->next;
1026
1027 if (0 && i->op == OP_SUB) {
1028 // turn "sub" into "add neg" (do we really want this ?)
1029 i->op = OP_ADD;
1030 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1031 }
1032
1033 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1034 mi = i->getSrc(s)->getInsn();
1035 if (!mi ||
1036 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1037 continue;
1038 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1039 if ((i->op != OP_ADD &&
1040 i->op != OP_MUL) ||
1041 (mi->op != OP_ABS &&
1042 mi->op != OP_NEG))
1043 continue;
1044 } else
1045 if (i->sType != mi->dType) {
1046 continue;
1047 }
1048 if ((mod = Modifier(mi->op)) == Modifier(0))
1049 continue;
1050 mod *= mi->src(0).mod;
1051
1052 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1053 // abs neg [abs] = abs
1054 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1055 } else
1056 if ((i->op == OP_NEG) && mod.neg()) {
1057 assert(s == 0);
1058 // neg as both opcode and modifier on same insn is prohibited
1059 // neg neg abs = abs, neg neg = identity
1060 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1061 i->op = mod.getOp();
1062 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1063 if (mod == Modifier(0))
1064 i->op = OP_MOV;
1065 }
1066
1067 if (target->isModSupported(i, s, mod)) {
1068 i->setSrc(s, mi->getSrc(0));
1069 i->src(s).mod *= mod;
1070 }
1071 }
1072
1073 if (i->op == OP_SAT) {
1074 mi = i->getSrc(0)->getInsn();
1075 if (mi &&
1076 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1077 mi->saturate = 1;
1078 mi->setDef(0, i->getDef(0));
1079 delete_Instruction(prog, i);
1080 }
1081 }
1082 }
1083
1084 return true;
1085 }
1086
1087 // =============================================================================
1088
1089 // MUL + ADD -> MAD/FMA
1090 // MIN/MAX(a, a) -> a, etc.
1091 // SLCT(a, b, const) -> cc(const) ? a : b
1092 // RCP(RCP(a)) -> a
1093 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1094 class AlgebraicOpt : public Pass
1095 {
1096 private:
1097 virtual bool visit(BasicBlock *);
1098
1099 void handleABS(Instruction *);
1100 bool handleADD(Instruction *);
1101 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1102 void handleMINMAX(Instruction *);
1103 void handleRCP(Instruction *);
1104 void handleSLCT(Instruction *);
1105 void handleLOGOP(Instruction *);
1106 void handleCVT(Instruction *);
1107 void handleSUCLAMP(Instruction *);
1108
1109 BuildUtil bld;
1110 };
1111
1112 void
1113 AlgebraicOpt::handleABS(Instruction *abs)
1114 {
1115 Instruction *sub = abs->getSrc(0)->getInsn();
1116 DataType ty;
1117 if (!sub ||
1118 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1119 return;
1120 // expect not to have mods yet, if we do, bail
1121 if (sub->src(0).mod || sub->src(1).mod)
1122 return;
1123 // hidden conversion ?
1124 ty = intTypeToSigned(sub->dType);
1125 if (abs->dType != abs->sType || ty != abs->sType)
1126 return;
1127
1128 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1129 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1130 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1131 return;
1132
1133 Value *src0 = sub->getSrc(0);
1134 Value *src1 = sub->getSrc(1);
1135
1136 if (sub->op == OP_ADD) {
1137 Instruction *neg = sub->getSrc(1)->getInsn();
1138 if (neg && neg->op != OP_NEG) {
1139 neg = sub->getSrc(0)->getInsn();
1140 src0 = sub->getSrc(1);
1141 }
1142 if (!neg || neg->op != OP_NEG ||
1143 neg->dType != neg->sType || neg->sType != ty)
1144 return;
1145 src1 = neg->getSrc(0);
1146 }
1147
1148 // found ABS(SUB))
1149 abs->moveSources(1, 2); // move sources >=1 up by 2
1150 abs->op = OP_SAD;
1151 abs->setType(sub->dType);
1152 abs->setSrc(0, src0);
1153 abs->setSrc(1, src1);
1154 bld.setPosition(abs, false);
1155 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1156 }
1157
1158 bool
1159 AlgebraicOpt::handleADD(Instruction *add)
1160 {
1161 Value *src0 = add->getSrc(0);
1162 Value *src1 = add->getSrc(1);
1163
1164 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1165 return false;
1166
1167 bool changed = false;
1168 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1169 changed = tryADDToMADOrSAD(add, OP_MAD);
1170 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1171 changed = tryADDToMADOrSAD(add, OP_SAD);
1172 return changed;
1173 }
1174
1175 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1176 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1177 bool
1178 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1179 {
1180 Value *src0 = add->getSrc(0);
1181 Value *src1 = add->getSrc(1);
1182 Value *src;
1183 int s;
1184 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1185 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1186 Modifier mod[4];
1187
1188 if (src0->refCount() == 1 &&
1189 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1190 s = 0;
1191 else
1192 if (src1->refCount() == 1 &&
1193 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1194 s = 1;
1195 else
1196 return false;
1197
1198 if ((src0->getUniqueInsn() && src0->getUniqueInsn()->bb != add->bb) ||
1199 (src1->getUniqueInsn() && src1->getUniqueInsn()->bb != add->bb))
1200 return false;
1201
1202 src = add->getSrc(s);
1203
1204 if (src->getInsn()->postFactor)
1205 return false;
1206 if (toOp == OP_SAD) {
1207 ImmediateValue imm;
1208 if (!src->getInsn()->src(2).getImmediate(imm))
1209 return false;
1210 if (!imm.isInteger(0))
1211 return false;
1212 }
1213
1214 mod[0] = add->src(0).mod;
1215 mod[1] = add->src(1).mod;
1216 mod[2] = src->getUniqueInsn()->src(0).mod;
1217 mod[3] = src->getUniqueInsn()->src(1).mod;
1218
1219 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1220 return false;
1221
1222 add->op = toOp;
1223 add->subOp = src->getInsn()->subOp; // potentially mul-high
1224
1225 add->setSrc(2, add->src(s ? 0 : 1));
1226
1227 add->setSrc(0, src->getInsn()->getSrc(0));
1228 add->src(0).mod = mod[2] ^ mod[s];
1229 add->setSrc(1, src->getInsn()->getSrc(1));
1230 add->src(1).mod = mod[3];
1231
1232 return true;
1233 }
1234
1235 void
1236 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1237 {
1238 Value *src0 = minmax->getSrc(0);
1239 Value *src1 = minmax->getSrc(1);
1240
1241 if (src0 != src1 || src0->reg.file != FILE_GPR)
1242 return;
1243 if (minmax->src(0).mod == minmax->src(1).mod) {
1244 if (minmax->def(0).mayReplace(minmax->src(0))) {
1245 minmax->def(0).replace(minmax->src(0), false);
1246 minmax->bb->remove(minmax);
1247 } else {
1248 minmax->op = OP_CVT;
1249 minmax->setSrc(1, NULL);
1250 }
1251 } else {
1252 // TODO:
1253 // min(x, -x) = -abs(x)
1254 // min(x, -abs(x)) = -abs(x)
1255 // min(x, abs(x)) = x
1256 // max(x, -abs(x)) = x
1257 // max(x, abs(x)) = abs(x)
1258 // max(x, -x) = abs(x)
1259 }
1260 }
1261
1262 void
1263 AlgebraicOpt::handleRCP(Instruction *rcp)
1264 {
1265 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1266
1267 if (si && si->op == OP_RCP) {
1268 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1269 rcp->op = mod.getOp();
1270 rcp->setSrc(0, si->getSrc(0));
1271 }
1272 }
1273
1274 void
1275 AlgebraicOpt::handleSLCT(Instruction *slct)
1276 {
1277 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1278 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1279 slct->setSrc(0, slct->getSrc(1));
1280 } else
1281 if (slct->getSrc(0) != slct->getSrc(1)) {
1282 return;
1283 }
1284 slct->op = OP_MOV;
1285 slct->setSrc(1, NULL);
1286 slct->setSrc(2, NULL);
1287 }
1288
1289 void
1290 AlgebraicOpt::handleLOGOP(Instruction *logop)
1291 {
1292 Value *src0 = logop->getSrc(0);
1293 Value *src1 = logop->getSrc(1);
1294
1295 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1296 return;
1297
1298 if (src0 == src1) {
1299 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1300 logop->def(0).mayReplace(logop->src(0))) {
1301 logop->def(0).replace(logop->src(0), false);
1302 delete_Instruction(prog, logop);
1303 }
1304 } else {
1305 // try AND(SET, SET) -> SET_AND(SET)
1306 Instruction *set0 = src0->getInsn();
1307 Instruction *set1 = src1->getInsn();
1308
1309 if (!set0 || set0->fixed || !set1 || set1->fixed)
1310 return;
1311 if (set1->op != OP_SET) {
1312 Instruction *xchg = set0;
1313 set0 = set1;
1314 set1 = xchg;
1315 if (set1->op != OP_SET)
1316 return;
1317 }
1318 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1319 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1320 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1321 return;
1322 if (set0->op != OP_SET &&
1323 set0->op != OP_SET_AND &&
1324 set0->op != OP_SET_OR &&
1325 set0->op != OP_SET_XOR)
1326 return;
1327 if (set0->getDef(0)->refCount() > 1 &&
1328 set1->getDef(0)->refCount() > 1)
1329 return;
1330 if (set0->getPredicate() || set1->getPredicate())
1331 return;
1332 // check that they don't source each other
1333 for (int s = 0; s < 2; ++s)
1334 if (set0->getSrc(s) == set1->getDef(0) ||
1335 set1->getSrc(s) == set0->getDef(0))
1336 return;
1337
1338 set0 = cloneForward(func, set0);
1339 set1 = cloneShallow(func, set1);
1340 logop->bb->insertAfter(logop, set1);
1341 logop->bb->insertAfter(logop, set0);
1342
1343 set0->dType = TYPE_U8;
1344 set0->getDef(0)->reg.file = FILE_PREDICATE;
1345 set0->getDef(0)->reg.size = 1;
1346 set1->setSrc(2, set0->getDef(0));
1347 set1->op = redOp;
1348 set1->setDef(0, logop->getDef(0));
1349 delete_Instruction(prog, logop);
1350 }
1351 }
1352
1353 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1354 // nv50:
1355 // F2I(NEG(I2F(ABS(SET))))
1356 void
1357 AlgebraicOpt::handleCVT(Instruction *cvt)
1358 {
1359 if (cvt->sType != TYPE_F32 ||
1360 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1361 return;
1362 Instruction *insn = cvt->getSrc(0)->getInsn();
1363 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1364 return;
1365 if (insn->src(0).mod != Modifier(0))
1366 return;
1367 insn = insn->getSrc(0)->getInsn();
1368
1369 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1370 if (insn && insn->op == OP_CVT &&
1371 insn->dType == TYPE_F32 &&
1372 insn->sType == TYPE_S32) {
1373 insn = insn->getSrc(0)->getInsn();
1374 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1375 insn->src(0).mod)
1376 return;
1377 insn = insn->getSrc(0)->getInsn();
1378 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1379 return;
1380 } else
1381 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1382 return;
1383 }
1384
1385 Instruction *bset = cloneShallow(func, insn);
1386 bset->dType = TYPE_U32;
1387 bset->setDef(0, cvt->getDef(0));
1388 cvt->bb->insertAfter(cvt, bset);
1389 delete_Instruction(prog, cvt);
1390 }
1391
1392 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1393 void
1394 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1395 {
1396 ImmediateValue imm;
1397 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1398 int s;
1399 Instruction *add;
1400
1401 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1402
1403 // look for ADD (TODO: only count references by non-SUCLAMP)
1404 if (insn->getSrc(0)->refCount() > 1)
1405 return;
1406 add = insn->getSrc(0)->getInsn();
1407 if (!add || add->op != OP_ADD ||
1408 (add->dType != TYPE_U32 &&
1409 add->dType != TYPE_S32))
1410 return;
1411
1412 // look for immediate
1413 for (s = 0; s < 2; ++s)
1414 if (add->src(s).getImmediate(imm))
1415 break;
1416 if (s >= 2)
1417 return;
1418 s = s ? 0 : 1;
1419 // determine if immediate fits
1420 val += imm.reg.data.s32;
1421 if (val > 31 || val < -32)
1422 return;
1423 // determine if other addend fits
1424 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
1425 return;
1426
1427 bld.setPosition(insn, false); // make sure bld is init'ed
1428 // replace sources
1429 insn->setSrc(2, bld.mkImm(val));
1430 insn->setSrc(0, add->getSrc(s));
1431 }
1432
1433 bool
1434 AlgebraicOpt::visit(BasicBlock *bb)
1435 {
1436 Instruction *next;
1437 for (Instruction *i = bb->getEntry(); i; i = next) {
1438 next = i->next;
1439 switch (i->op) {
1440 case OP_ABS:
1441 handleABS(i);
1442 break;
1443 case OP_ADD:
1444 handleADD(i);
1445 break;
1446 case OP_RCP:
1447 handleRCP(i);
1448 break;
1449 case OP_MIN:
1450 case OP_MAX:
1451 handleMINMAX(i);
1452 break;
1453 case OP_SLCT:
1454 handleSLCT(i);
1455 break;
1456 case OP_AND:
1457 case OP_OR:
1458 case OP_XOR:
1459 handleLOGOP(i);
1460 break;
1461 case OP_CVT:
1462 handleCVT(i);
1463 break;
1464 case OP_SUCLAMP:
1465 handleSUCLAMP(i);
1466 break;
1467 default:
1468 break;
1469 }
1470 }
1471
1472 return true;
1473 }
1474
1475 // =============================================================================
1476
1477 static inline void
1478 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
1479 {
1480 if (offset != ldst->getSrc(0)->reg.data.offset) {
1481 if (ldst->getSrc(0)->refCount() > 1)
1482 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
1483 ldst->getSrc(0)->reg.data.offset = offset;
1484 }
1485 }
1486
1487 // Combine loads and stores, forward stores to loads where possible.
1488 class MemoryOpt : public Pass
1489 {
1490 private:
1491 class Record
1492 {
1493 public:
1494 Record *next;
1495 Instruction *insn;
1496 const Value *rel[2];
1497 const Value *base;
1498 int32_t offset;
1499 int8_t fileIndex;
1500 uint8_t size;
1501 bool locked;
1502 Record *prev;
1503
1504 bool overlaps(const Instruction *ldst) const;
1505
1506 inline void link(Record **);
1507 inline void unlink(Record **);
1508 inline void set(const Instruction *ldst);
1509 };
1510
1511 public:
1512 MemoryOpt();
1513
1514 Record *loads[DATA_FILE_COUNT];
1515 Record *stores[DATA_FILE_COUNT];
1516
1517 MemoryPool recordPool;
1518
1519 private:
1520 virtual bool visit(BasicBlock *);
1521 bool runOpt(BasicBlock *);
1522
1523 Record **getList(const Instruction *);
1524
1525 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
1526
1527 // merge @insn into load/store instruction from @rec
1528 bool combineLd(Record *rec, Instruction *ld);
1529 bool combineSt(Record *rec, Instruction *st);
1530
1531 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
1532 bool replaceLdFromSt(Instruction *ld, Record *stRec);
1533 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
1534
1535 void addRecord(Instruction *ldst);
1536 void purgeRecords(Instruction *const st, DataFile);
1537 void lockStores(Instruction *const ld);
1538 void reset();
1539
1540 private:
1541 Record *prevRecord;
1542 };
1543
1544 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
1545 {
1546 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
1547 loads[i] = NULL;
1548 stores[i] = NULL;
1549 }
1550 prevRecord = NULL;
1551 }
1552
1553 void
1554 MemoryOpt::reset()
1555 {
1556 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
1557 Record *it, *next;
1558 for (it = loads[i]; it; it = next) {
1559 next = it->next;
1560 recordPool.release(it);
1561 }
1562 loads[i] = NULL;
1563 for (it = stores[i]; it; it = next) {
1564 next = it->next;
1565 recordPool.release(it);
1566 }
1567 stores[i] = NULL;
1568 }
1569 }
1570
1571 bool
1572 MemoryOpt::combineLd(Record *rec, Instruction *ld)
1573 {
1574 int32_t offRc = rec->offset;
1575 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1576 int sizeRc = rec->size;
1577 int sizeLd = typeSizeof(ld->dType);
1578 int size = sizeRc + sizeLd;
1579 int d, j;
1580
1581 if (!prog->getTarget()->
1582 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
1583 return false;
1584 // no unaligned loads
1585 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
1586 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
1587 return false;
1588
1589 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
1590
1591 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
1592
1593 if (offLd < offRc) {
1594 int sz;
1595 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
1596 // d: nr of definitions in ld
1597 // j: nr of definitions in rec->insn, move:
1598 for (d = d + j - 1; j > 0; --j, --d)
1599 rec->insn->setDef(d, rec->insn->getDef(j - 1));
1600
1601 if (rec->insn->getSrc(0)->refCount() > 1)
1602 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
1603 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
1604
1605 d = 0;
1606 } else {
1607 d = j;
1608 }
1609 // move definitions of @ld to @rec->insn
1610 for (j = 0; sizeLd; ++j, ++d) {
1611 sizeLd -= ld->getDef(j)->reg.size;
1612 rec->insn->setDef(d, ld->getDef(j));
1613 }
1614
1615 rec->size = size;
1616 rec->insn->getSrc(0)->reg.size = size;
1617 rec->insn->setType(typeOfSize(size));
1618
1619 delete_Instruction(prog, ld);
1620
1621 return true;
1622 }
1623
1624 bool
1625 MemoryOpt::combineSt(Record *rec, Instruction *st)
1626 {
1627 int32_t offRc = rec->offset;
1628 int32_t offSt = st->getSrc(0)->reg.data.offset;
1629 int sizeRc = rec->size;
1630 int sizeSt = typeSizeof(st->dType);
1631 int s = sizeSt / 4;
1632 int size = sizeRc + sizeSt;
1633 int j, k;
1634 Value *src[4]; // no modifiers in ValueRef allowed for st
1635 Value *extra[3];
1636
1637 if (!prog->getTarget()->
1638 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
1639 return false;
1640 if (size == 8 && MIN2(offRc, offSt) & 0x7)
1641 return false;
1642
1643 st->takeExtraSources(0, extra); // save predicate and indirect address
1644
1645 if (offRc < offSt) {
1646 // save values from @st
1647 for (s = 0; sizeSt; ++s) {
1648 sizeSt -= st->getSrc(s + 1)->reg.size;
1649 src[s] = st->getSrc(s + 1);
1650 }
1651 // set record's values as low sources of @st
1652 for (j = 1; sizeRc; ++j) {
1653 sizeRc -= rec->insn->getSrc(j)->reg.size;
1654 st->setSrc(j, rec->insn->getSrc(j));
1655 }
1656 // set saved values as high sources of @st
1657 for (k = j, j = 0; j < s; ++j)
1658 st->setSrc(k++, src[j]);
1659
1660 updateLdStOffset(st, offRc, func);
1661 } else {
1662 for (j = 1; sizeSt; ++j)
1663 sizeSt -= st->getSrc(j)->reg.size;
1664 for (s = 1; sizeRc; ++j, ++s) {
1665 sizeRc -= rec->insn->getSrc(s)->reg.size;
1666 st->setSrc(j, rec->insn->getSrc(s));
1667 }
1668 rec->offset = offSt;
1669 }
1670 st->putExtraSources(0, extra); // restore pointer and predicate
1671
1672 delete_Instruction(prog, rec->insn);
1673 rec->insn = st;
1674 rec->size = size;
1675 rec->insn->getSrc(0)->reg.size = size;
1676 rec->insn->setType(typeOfSize(size));
1677 return true;
1678 }
1679
1680 void
1681 MemoryOpt::Record::set(const Instruction *ldst)
1682 {
1683 const Symbol *mem = ldst->getSrc(0)->asSym();
1684 fileIndex = mem->reg.fileIndex;
1685 rel[0] = ldst->getIndirect(0, 0);
1686 rel[1] = ldst->getIndirect(0, 1);
1687 offset = mem->reg.data.offset;
1688 base = mem->getBase();
1689 size = typeSizeof(ldst->sType);
1690 }
1691
1692 void
1693 MemoryOpt::Record::link(Record **list)
1694 {
1695 next = *list;
1696 if (next)
1697 next->prev = this;
1698 prev = NULL;
1699 *list = this;
1700 }
1701
1702 void
1703 MemoryOpt::Record::unlink(Record **list)
1704 {
1705 if (next)
1706 next->prev = prev;
1707 if (prev)
1708 prev->next = next;
1709 else
1710 *list = next;
1711 }
1712
1713 MemoryOpt::Record **
1714 MemoryOpt::getList(const Instruction *insn)
1715 {
1716 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
1717 return &loads[insn->src(0).getFile()];
1718 return &stores[insn->src(0).getFile()];
1719 }
1720
1721 void
1722 MemoryOpt::addRecord(Instruction *i)
1723 {
1724 Record **list = getList(i);
1725 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
1726
1727 it->link(list);
1728 it->set(i);
1729 it->insn = i;
1730 it->locked = false;
1731 }
1732
1733 MemoryOpt::Record *
1734 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
1735 {
1736 const Symbol *sym = insn->getSrc(0)->asSym();
1737 const int size = typeSizeof(insn->sType);
1738 Record *rec = NULL;
1739 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
1740
1741 for (; it; it = it->next) {
1742 if (it->locked && insn->op != OP_LOAD)
1743 continue;
1744 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
1745 it->rel[0] != insn->getIndirect(0, 0) ||
1746 it->fileIndex != sym->reg.fileIndex ||
1747 it->rel[1] != insn->getIndirect(0, 1))
1748 continue;
1749
1750 if (it->offset < sym->reg.data.offset) {
1751 if (it->offset + it->size >= sym->reg.data.offset) {
1752 isAdj = (it->offset + it->size == sym->reg.data.offset);
1753 if (!isAdj)
1754 return it;
1755 if (!(it->offset & 0x7))
1756 rec = it;
1757 }
1758 } else {
1759 isAdj = it->offset != sym->reg.data.offset;
1760 if (size <= it->size && !isAdj)
1761 return it;
1762 else
1763 if (!(sym->reg.data.offset & 0x7))
1764 if (it->offset - size <= sym->reg.data.offset)
1765 rec = it;
1766 }
1767 }
1768 return rec;
1769 }
1770
1771 bool
1772 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
1773 {
1774 Instruction *st = rec->insn;
1775 int32_t offSt = rec->offset;
1776 int32_t offLd = ld->getSrc(0)->reg.data.offset;
1777 int d, s;
1778
1779 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
1780 offSt += st->getSrc(s)->reg.size;
1781 if (offSt != offLd)
1782 return false;
1783
1784 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
1785 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
1786 return false;
1787 if (st->getSrc(s)->reg.file != FILE_GPR)
1788 return false;
1789 ld->def(d).replace(st->src(s), false);
1790 }
1791 ld->bb->remove(ld);
1792 return true;
1793 }
1794
1795 bool
1796 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
1797 {
1798 Instruction *ldR = rec->insn;
1799 int32_t offR = rec->offset;
1800 int32_t offE = ldE->getSrc(0)->reg.data.offset;
1801 int dR, dE;
1802
1803 assert(offR <= offE);
1804 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
1805 offR += ldR->getDef(dR)->reg.size;
1806 if (offR != offE)
1807 return false;
1808
1809 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
1810 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
1811 return false;
1812 ldE->def(dE).replace(ldR->getDef(dR), false);
1813 }
1814
1815 delete_Instruction(prog, ldE);
1816 return true;
1817 }
1818
1819 bool
1820 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
1821 {
1822 const Instruction *const ri = rec->insn;
1823 Value *extra[3];
1824
1825 int32_t offS = st->getSrc(0)->reg.data.offset;
1826 int32_t offR = rec->offset;
1827 int32_t endS = offS + typeSizeof(st->dType);
1828 int32_t endR = offR + typeSizeof(ri->dType);
1829
1830 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
1831
1832 st->takeExtraSources(0, extra);
1833
1834 if (offR < offS) {
1835 Value *vals[10];
1836 int s, n;
1837 int k = 0;
1838 // get non-replaced sources of ri
1839 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
1840 vals[k++] = ri->getSrc(s);
1841 n = s;
1842 // get replaced sources of st
1843 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
1844 vals[k++] = st->getSrc(s);
1845 // skip replaced sources of ri
1846 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
1847 // get non-replaced sources after values covered by st
1848 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
1849 vals[k++] = ri->getSrc(s);
1850 assert((unsigned int)k <= Elements(vals));
1851 for (s = 0; s < k; ++s)
1852 st->setSrc(s + 1, vals[s]);
1853 st->setSrc(0, ri->getSrc(0));
1854 } else
1855 if (endR > endS) {
1856 int j, s;
1857 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
1858 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
1859 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
1860 st->setSrc(s++, ri->getSrc(j));
1861 }
1862 st->putExtraSources(0, extra);
1863
1864 delete_Instruction(prog, rec->insn);
1865
1866 rec->insn = st;
1867 rec->offset = st->getSrc(0)->reg.data.offset;
1868
1869 st->setType(typeOfSize(rec->size));
1870
1871 return true;
1872 }
1873
1874 bool
1875 MemoryOpt::Record::overlaps(const Instruction *ldst) const
1876 {
1877 Record that;
1878 that.set(ldst);
1879
1880 if (this->fileIndex != that.fileIndex)
1881 return false;
1882
1883 if (this->rel[0] || that.rel[0])
1884 return this->base == that.base;
1885 return
1886 (this->offset < that.offset + that.size) &&
1887 (this->offset + this->size > that.offset);
1888 }
1889
1890 // We must not eliminate stores that affect the result of @ld if
1891 // we find later stores to the same location, and we may no longer
1892 // merge them with later stores.
1893 // The stored value can, however, still be used to determine the value
1894 // returned by future loads.
1895 void
1896 MemoryOpt::lockStores(Instruction *const ld)
1897 {
1898 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
1899 if (!r->locked && r->overlaps(ld))
1900 r->locked = true;
1901 }
1902
1903 // Prior loads from the location of @st are no longer valid.
1904 // Stores to the location of @st may no longer be used to derive
1905 // the value at it nor be coalesced into later stores.
1906 void
1907 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
1908 {
1909 if (st)
1910 f = st->src(0).getFile();
1911
1912 for (Record *r = loads[f]; r; r = r->next)
1913 if (!st || r->overlaps(st))
1914 r->unlink(&loads[f]);
1915
1916 for (Record *r = stores[f]; r; r = r->next)
1917 if (!st || r->overlaps(st))
1918 r->unlink(&stores[f]);
1919 }
1920
1921 bool
1922 MemoryOpt::visit(BasicBlock *bb)
1923 {
1924 bool ret = runOpt(bb);
1925 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
1926 // where 96 bit memory operations are forbidden.
1927 if (ret)
1928 ret = runOpt(bb);
1929 return ret;
1930 }
1931
1932 bool
1933 MemoryOpt::runOpt(BasicBlock *bb)
1934 {
1935 Instruction *ldst, *next;
1936 Record *rec;
1937 bool isAdjacent = true;
1938
1939 for (ldst = bb->getEntry(); ldst; ldst = next) {
1940 bool keep = true;
1941 bool isLoad = true;
1942 next = ldst->next;
1943
1944 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
1945 if (ldst->isDead()) {
1946 // might have been produced by earlier optimization
1947 delete_Instruction(prog, ldst);
1948 continue;
1949 }
1950 } else
1951 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
1952 isLoad = false;
1953 } else {
1954 // TODO: maybe have all fixed ops act as barrier ?
1955 if (ldst->op == OP_CALL ||
1956 ldst->op == OP_BAR ||
1957 ldst->op == OP_MEMBAR) {
1958 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1959 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1960 purgeRecords(NULL, FILE_MEMORY_SHARED);
1961 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1962 } else
1963 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
1964 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
1965 purgeRecords(NULL, FILE_MEMORY_LOCAL);
1966 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
1967 purgeRecords(NULL, FILE_MEMORY_SHARED);
1968 } else {
1969 purgeRecords(NULL, ldst->src(0).getFile());
1970 }
1971 } else
1972 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
1973 purgeRecords(NULL, FILE_SHADER_OUTPUT);
1974 }
1975 continue;
1976 }
1977 if (ldst->getPredicate()) // TODO: handle predicated ld/st
1978 continue;
1979
1980 if (isLoad) {
1981 DataFile file = ldst->src(0).getFile();
1982
1983 // if ld l[]/g[] look for previous store to eliminate the reload
1984 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
1985 // TODO: shared memory ?
1986 rec = findRecord(ldst, false, isAdjacent);
1987 if (rec && !isAdjacent)
1988 keep = !replaceLdFromSt(ldst, rec);
1989 }
1990
1991 // or look for ld from the same location and replace this one
1992 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
1993 if (rec) {
1994 if (!isAdjacent)
1995 keep = !replaceLdFromLd(ldst, rec);
1996 else
1997 // or combine a previous load with this one
1998 keep = !combineLd(rec, ldst);
1999 }
2000 if (keep)
2001 lockStores(ldst);
2002 } else {
2003 rec = findRecord(ldst, false, isAdjacent);
2004 if (rec) {
2005 if (!isAdjacent)
2006 keep = !replaceStFromSt(ldst, rec);
2007 else
2008 keep = !combineSt(rec, ldst);
2009 }
2010 if (keep)
2011 purgeRecords(ldst, DATA_FILE_COUNT);
2012 }
2013 if (keep)
2014 addRecord(ldst);
2015 }
2016 reset();
2017
2018 return true;
2019 }
2020
2021 // =============================================================================
2022
2023 // Turn control flow into predicated instructions (after register allocation !).
2024 // TODO:
2025 // Could move this to before register allocation on NVC0 and also handle nested
2026 // constructs.
2027 class FlatteningPass : public Pass
2028 {
2029 private:
2030 virtual bool visit(BasicBlock *);
2031
2032 bool tryPredicateConditional(BasicBlock *);
2033 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2034 void tryPropagateBranch(BasicBlock *);
2035 inline bool isConstantCondition(Value *pred);
2036 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2037 inline void removeFlow(Instruction *);
2038 };
2039
2040 bool
2041 FlatteningPass::isConstantCondition(Value *pred)
2042 {
2043 Instruction *insn = pred->getUniqueInsn();
2044 assert(insn);
2045 if (insn->op != OP_SET || insn->srcExists(2))
2046 return false;
2047
2048 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2049 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2050 DataFile file;
2051 if (ld) {
2052 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2053 return false;
2054 if (ld->src(0).isIndirect(0))
2055 return false;
2056 file = ld->src(0).getFile();
2057 } else {
2058 file = insn->src(s).getFile();
2059 // catch $r63 on NVC0
2060 if (file == FILE_GPR && insn->getSrc(s)->reg.data.id > prog->maxGPR)
2061 file = FILE_IMMEDIATE;
2062 }
2063 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2064 return false;
2065 }
2066 return true;
2067 }
2068
2069 void
2070 FlatteningPass::removeFlow(Instruction *insn)
2071 {
2072 FlowInstruction *term = insn ? insn->asFlow() : NULL;
2073 if (!term)
2074 return;
2075 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
2076
2077 if (term->op == OP_BRA) {
2078 // TODO: this might get more difficult when we get arbitrary BRAs
2079 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
2080 return;
2081 } else
2082 if (term->op != OP_JOIN)
2083 return;
2084
2085 Value *pred = term->getPredicate();
2086
2087 delete_Instruction(prog, term);
2088
2089 if (pred && pred->refCount() == 0) {
2090 Instruction *pSet = pred->getUniqueInsn();
2091 pred->join->reg.data.id = -1; // deallocate
2092 if (pSet->isDead())
2093 delete_Instruction(prog, pSet);
2094 }
2095 }
2096
2097 void
2098 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
2099 {
2100 for (Instruction *i = bb->getEntry(); i; i = i->next) {
2101 if (i->isNop())
2102 continue;
2103 assert(!i->getPredicate());
2104 i->setPredicate(cc, pred);
2105 }
2106 removeFlow(bb->getExit());
2107 }
2108
2109 bool
2110 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
2111 {
2112 if (insn->isPseudo())
2113 return true;
2114 // TODO: calls where we don't know which registers are modified
2115
2116 if (!prog->getTarget()->mayPredicate(insn, pred))
2117 return false;
2118 for (int d = 0; insn->defExists(d); ++d)
2119 if (insn->getDef(d)->equals(pred))
2120 return false;
2121 return true;
2122 }
2123
2124 // If we jump to BRA/RET/EXIT, replace the jump with it.
2125 // NOTE: We do not update the CFG anymore here !
2126 //
2127 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2128 // BB:0
2129 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2130 // BB1:
2131 // bra BB:3
2132 // BB2:
2133 // ...
2134 // BB3:
2135 // ...
2136 void
2137 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
2138 {
2139 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
2140 BasicBlock *bf = i->asFlow()->target.bb;
2141
2142 if (bf->getInsnCount() != 1)
2143 continue;
2144
2145 FlowInstruction *bra = i->asFlow();
2146 FlowInstruction *rep = bf->getExit()->asFlow();
2147
2148 if (!rep || rep->getPredicate())
2149 continue;
2150 if (rep->op != OP_BRA &&
2151 rep->op != OP_JOIN &&
2152 rep->op != OP_EXIT)
2153 continue;
2154
2155 // TODO: If there are multiple branches to @rep, only the first would
2156 // be replaced, so only remove them after this pass is done ?
2157 // Also, need to check all incident blocks for fall-through exits and
2158 // add the branch there.
2159 bra->op = rep->op;
2160 bra->target.bb = rep->target.bb;
2161 if (bf->cfg.incidentCount() == 1)
2162 bf->remove(rep);
2163 }
2164 }
2165
2166 bool
2167 FlatteningPass::visit(BasicBlock *bb)
2168 {
2169 if (tryPredicateConditional(bb))
2170 return true;
2171
2172 // try to attach join to previous instruction
2173 if (prog->getTarget()->hasJoin) {
2174 Instruction *insn = bb->getExit();
2175 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2176 insn = insn->prev;
2177 if (insn && !insn->getPredicate() &&
2178 !insn->asFlow() &&
2179 insn->op != OP_TEXBAR &&
2180 !isTextureOp(insn->op) && // probably just nve4
2181 !isSurfaceOp(insn->op) && // not confirmed
2182 insn->op != OP_LINTERP && // probably just nve4
2183 insn->op != OP_PINTERP && // probably just nve4
2184 ((insn->op != OP_LOAD && insn->op != OP_STORE) ||
2185 typeSizeof(insn->dType) <= 4) &&
2186 !insn->isNop()) {
2187 insn->join = 1;
2188 bb->remove(bb->getExit());
2189 return true;
2190 }
2191 }
2192 }
2193
2194 tryPropagateBranch(bb);
2195
2196 return true;
2197 }
2198
2199 bool
2200 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2201 {
2202 BasicBlock *bL = NULL, *bR = NULL;
2203 unsigned int nL = 0, nR = 0, limit = 12;
2204 Instruction *insn;
2205 unsigned int mask;
2206
2207 mask = bb->initiatesSimpleConditional();
2208 if (!mask)
2209 return false;
2210
2211 assert(bb->getExit());
2212 Value *pred = bb->getExit()->getPredicate();
2213 assert(pred);
2214
2215 if (isConstantCondition(pred))
2216 limit = 4;
2217
2218 Graph::EdgeIterator ei = bb->cfg.outgoing();
2219
2220 if (mask & 1) {
2221 bL = BasicBlock::get(ei.getNode());
2222 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2223 if (!mayPredicate(insn, pred))
2224 return false;
2225 if (nL > limit)
2226 return false; // too long, do a real branch
2227 }
2228 ei.next();
2229
2230 if (mask & 2) {
2231 bR = BasicBlock::get(ei.getNode());
2232 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2233 if (!mayPredicate(insn, pred))
2234 return false;
2235 if (nR > limit)
2236 return false; // too long, do a real branch
2237 }
2238
2239 if (bL)
2240 predicateInstructions(bL, pred, bb->getExit()->cc);
2241 if (bR)
2242 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2243
2244 if (bb->joinAt) {
2245 bb->remove(bb->joinAt);
2246 bb->joinAt = NULL;
2247 }
2248 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2249
2250 // remove potential join operations at the end of the conditional
2251 if (prog->getTarget()->joinAnterior) {
2252 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2253 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2254 removeFlow(bb->getEntry());
2255 }
2256
2257 return true;
2258 }
2259
2260 // =============================================================================
2261
2262 // Fold Immediate into MAD; must be done after register allocation due to
2263 // constraint SDST == SSRC2
2264 // TODO:
2265 // Does NVC0+ have other situations where this pass makes sense?
2266 class NV50PostRaConstantFolding : public Pass
2267 {
2268 private:
2269 virtual bool visit(BasicBlock *);
2270 };
2271
2272 bool
2273 NV50PostRaConstantFolding::visit(BasicBlock *bb)
2274 {
2275 Value *vtmp;
2276 Instruction *def;
2277
2278 for (Instruction *i = bb->getFirst(); i; i = i->next) {
2279 switch (i->op) {
2280 case OP_MAD:
2281 if (i->def(0).getFile() != FILE_GPR ||
2282 i->src(0).getFile() != FILE_GPR ||
2283 i->src(1).getFile() != FILE_GPR ||
2284 i->src(2).getFile() != FILE_GPR ||
2285 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
2286 break;
2287
2288 def = i->getSrc(1)->getInsn();
2289 if (def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
2290 vtmp = i->getSrc(1);
2291 i->setSrc(1, def->getSrc(0));
2292
2293 /* There's no post-RA dead code elimination, so do it here
2294 * XXX: if we add more code-removing post-RA passes, we might
2295 * want to create a post-RA dead-code elim pass */
2296 if (vtmp->refCount() == 0)
2297 delete_Instruction(bb->getProgram(), def);
2298
2299 break;
2300 }
2301 break;
2302 default:
2303 break;
2304 }
2305 }
2306
2307 return true;
2308 }
2309
2310 // =============================================================================
2311
2312 // Common subexpression elimination. Stupid O^2 implementation.
2313 class LocalCSE : public Pass
2314 {
2315 private:
2316 virtual bool visit(BasicBlock *);
2317
2318 inline bool tryReplace(Instruction **, Instruction *);
2319
2320 DLList ops[OP_LAST + 1];
2321 };
2322
2323 class GlobalCSE : public Pass
2324 {
2325 private:
2326 virtual bool visit(BasicBlock *);
2327 };
2328
2329 bool
2330 Instruction::isActionEqual(const Instruction *that) const
2331 {
2332 if (this->op != that->op ||
2333 this->dType != that->dType ||
2334 this->sType != that->sType)
2335 return false;
2336 if (this->cc != that->cc)
2337 return false;
2338
2339 if (this->asTex()) {
2340 if (memcmp(&this->asTex()->tex,
2341 &that->asTex()->tex,
2342 sizeof(this->asTex()->tex)))
2343 return false;
2344 } else
2345 if (this->asCmp()) {
2346 if (this->asCmp()->setCond != that->asCmp()->setCond)
2347 return false;
2348 } else
2349 if (this->asFlow()) {
2350 return false;
2351 } else {
2352 if (this->ipa != that->ipa ||
2353 this->lanes != that->lanes ||
2354 this->perPatch != that->perPatch)
2355 return false;
2356 if (this->postFactor != that->postFactor)
2357 return false;
2358 }
2359
2360 if (this->subOp != that->subOp ||
2361 this->saturate != that->saturate ||
2362 this->rnd != that->rnd ||
2363 this->ftz != that->ftz ||
2364 this->dnz != that->dnz ||
2365 this->cache != that->cache ||
2366 this->mask != that->mask)
2367 return false;
2368
2369 return true;
2370 }
2371
2372 bool
2373 Instruction::isResultEqual(const Instruction *that) const
2374 {
2375 unsigned int d, s;
2376
2377 // NOTE: location of discard only affects tex with liveOnly and quadops
2378 if (!this->defExists(0) && this->op != OP_DISCARD)
2379 return false;
2380
2381 if (!isActionEqual(that))
2382 return false;
2383
2384 if (this->predSrc != that->predSrc)
2385 return false;
2386
2387 for (d = 0; this->defExists(d); ++d) {
2388 if (!that->defExists(d) ||
2389 !this->getDef(d)->equals(that->getDef(d), false))
2390 return false;
2391 }
2392 if (that->defExists(d))
2393 return false;
2394
2395 for (s = 0; this->srcExists(s); ++s) {
2396 if (!that->srcExists(s))
2397 return false;
2398 if (this->src(s).mod != that->src(s).mod)
2399 return false;
2400 if (!this->getSrc(s)->equals(that->getSrc(s), true))
2401 return false;
2402 }
2403 if (that->srcExists(s))
2404 return false;
2405
2406 if (op == OP_LOAD || op == OP_VFETCH) {
2407 switch (src(0).getFile()) {
2408 case FILE_MEMORY_CONST:
2409 case FILE_SHADER_INPUT:
2410 return true;
2411 default:
2412 return false;
2413 }
2414 }
2415
2416 return true;
2417 }
2418
2419 // pull through common expressions from different in-blocks
2420 bool
2421 GlobalCSE::visit(BasicBlock *bb)
2422 {
2423 Instruction *phi, *next, *ik;
2424 int s;
2425
2426 // TODO: maybe do this with OP_UNION, too
2427
2428 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
2429 next = phi->next;
2430 if (phi->getSrc(0)->refCount() > 1)
2431 continue;
2432 ik = phi->getSrc(0)->getInsn();
2433 if (!ik)
2434 continue; // probably a function input
2435 for (s = 1; phi->srcExists(s); ++s) {
2436 if (phi->getSrc(s)->refCount() > 1)
2437 break;
2438 if (!phi->getSrc(s)->getInsn() ||
2439 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
2440 break;
2441 }
2442 if (!phi->srcExists(s)) {
2443 Instruction *entry = bb->getEntry();
2444 ik->bb->remove(ik);
2445 if (!entry || entry->op != OP_JOIN)
2446 bb->insertHead(ik);
2447 else
2448 bb->insertAfter(entry, ik);
2449 ik->setDef(0, phi->getDef(0));
2450 delete_Instruction(prog, phi);
2451 }
2452 }
2453
2454 return true;
2455 }
2456
2457 bool
2458 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
2459 {
2460 Instruction *old = *ptr;
2461
2462 // TODO: maybe relax this later (causes trouble with OP_UNION)
2463 if (i->isPredicated())
2464 return false;
2465
2466 if (!old->isResultEqual(i))
2467 return false;
2468
2469 for (int d = 0; old->defExists(d); ++d)
2470 old->def(d).replace(i->getDef(d), false);
2471 delete_Instruction(prog, old);
2472 *ptr = NULL;
2473 return true;
2474 }
2475
2476 bool
2477 LocalCSE::visit(BasicBlock *bb)
2478 {
2479 unsigned int replaced;
2480
2481 do {
2482 Instruction *ir, *next;
2483
2484 replaced = 0;
2485
2486 // will need to know the order of instructions
2487 int serial = 0;
2488 for (ir = bb->getFirst(); ir; ir = ir->next)
2489 ir->serial = serial++;
2490
2491 for (ir = bb->getEntry(); ir; ir = next) {
2492 int s;
2493 Value *src = NULL;
2494
2495 next = ir->next;
2496
2497 if (ir->fixed) {
2498 ops[ir->op].insert(ir);
2499 continue;
2500 }
2501
2502 for (s = 0; ir->srcExists(s); ++s)
2503 if (ir->getSrc(s)->asLValue())
2504 if (!src || ir->getSrc(s)->refCount() < src->refCount())
2505 src = ir->getSrc(s);
2506
2507 if (src) {
2508 for (Value::UseIterator it = src->uses.begin();
2509 it != src->uses.end(); ++it) {
2510 Instruction *ik = (*it)->getInsn();
2511 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
2512 if (tryReplace(&ir, ik))
2513 break;
2514 }
2515 } else {
2516 DLLIST_FOR_EACH(&ops[ir->op], iter)
2517 {
2518 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
2519 if (tryReplace(&ir, ik))
2520 break;
2521 }
2522 }
2523
2524 if (ir)
2525 ops[ir->op].insert(ir);
2526 else
2527 ++replaced;
2528 }
2529 for (unsigned int i = 0; i <= OP_LAST; ++i)
2530 ops[i].clear();
2531
2532 } while (replaced);
2533
2534 return true;
2535 }
2536
2537 // =============================================================================
2538
2539 // Remove computations of unused values.
2540 class DeadCodeElim : public Pass
2541 {
2542 public:
2543 bool buryAll(Program *);
2544
2545 private:
2546 virtual bool visit(BasicBlock *);
2547
2548 void checkSplitLoad(Instruction *ld); // for partially dead loads
2549
2550 unsigned int deadCount;
2551 };
2552
2553 bool
2554 DeadCodeElim::buryAll(Program *prog)
2555 {
2556 do {
2557 deadCount = 0;
2558 if (!this->run(prog, false, false))
2559 return false;
2560 } while (deadCount);
2561
2562 return true;
2563 }
2564
2565 bool
2566 DeadCodeElim::visit(BasicBlock *bb)
2567 {
2568 Instruction *next;
2569
2570 for (Instruction *i = bb->getFirst(); i; i = next) {
2571 next = i->next;
2572 if (i->isDead()) {
2573 ++deadCount;
2574 delete_Instruction(prog, i);
2575 } else
2576 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
2577 checkSplitLoad(i);
2578 } else
2579 if (i->defExists(0) && !i->getDef(0)->refCount()) {
2580 if (i->op == OP_ATOM ||
2581 i->op == OP_SUREDP ||
2582 i->op == OP_SUREDB)
2583 i->setDef(0, NULL);
2584 }
2585 }
2586 return true;
2587 }
2588
2589 void
2590 DeadCodeElim::checkSplitLoad(Instruction *ld1)
2591 {
2592 Instruction *ld2 = NULL; // can get at most 2 loads
2593 Value *def1[4];
2594 Value *def2[4];
2595 int32_t addr1, addr2;
2596 int32_t size1, size2;
2597 int d, n1, n2;
2598 uint32_t mask = 0xffffffff;
2599
2600 for (d = 0; ld1->defExists(d); ++d)
2601 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
2602 mask &= ~(1 << d);
2603 if (mask == 0xffffffff)
2604 return;
2605
2606 addr1 = ld1->getSrc(0)->reg.data.offset;
2607 n1 = n2 = 0;
2608 size1 = size2 = 0;
2609 for (d = 0; ld1->defExists(d); ++d) {
2610 if (mask & (1 << d)) {
2611 if (size1 && (addr1 & 0x7))
2612 break;
2613 def1[n1] = ld1->getDef(d);
2614 size1 += def1[n1++]->reg.size;
2615 } else
2616 if (!n1) {
2617 addr1 += ld1->getDef(d)->reg.size;
2618 } else {
2619 break;
2620 }
2621 }
2622 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
2623 if (mask & (1 << d)) {
2624 def2[n2] = ld1->getDef(d);
2625 size2 += def2[n2++]->reg.size;
2626 } else {
2627 assert(!n2);
2628 addr2 += ld1->getDef(d)->reg.size;
2629 }
2630 }
2631
2632 updateLdStOffset(ld1, addr1, func);
2633 ld1->setType(typeOfSize(size1));
2634 for (d = 0; d < 4; ++d)
2635 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
2636
2637 if (!n2)
2638 return;
2639
2640 ld2 = cloneShallow(func, ld1);
2641 updateLdStOffset(ld2, addr2, func);
2642 ld2->setType(typeOfSize(size2));
2643 for (d = 0; d < 4; ++d)
2644 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
2645
2646 ld1->bb->insertAfter(ld1, ld2);
2647 }
2648
2649 // =============================================================================
2650
2651 #define RUN_PASS(l, n, f) \
2652 if (level >= (l)) { \
2653 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
2654 INFO("PEEPHOLE: %s\n", #n); \
2655 n pass; \
2656 if (!pass.f(this)) \
2657 return false; \
2658 }
2659
2660 bool
2661 Program::optimizeSSA(int level)
2662 {
2663 RUN_PASS(1, DeadCodeElim, buryAll);
2664 RUN_PASS(1, CopyPropagation, run);
2665 RUN_PASS(2, GlobalCSE, run);
2666 RUN_PASS(1, LocalCSE, run);
2667 RUN_PASS(2, AlgebraicOpt, run);
2668 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
2669 RUN_PASS(1, ConstantFolding, foldAll);
2670 RUN_PASS(1, LoadPropagation, run);
2671 RUN_PASS(2, MemoryOpt, run);
2672 RUN_PASS(2, LocalCSE, run);
2673 RUN_PASS(0, DeadCodeElim, buryAll);
2674
2675 return true;
2676 }
2677
2678 bool
2679 Program::optimizePostRA(int level)
2680 {
2681 RUN_PASS(2, FlatteningPass, run);
2682 if (getTarget()->getChipset() < 0xc0)
2683 RUN_PASS(2, NV50PostRaConstantFolding, run);
2684
2685 return true;
2686 }
2687
2688 }