nv50/ir: disable mul+add to mad for precise instructions
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative)
195 if (insn->op != OP_SET && insn->op != OP_SLCT && insn->op != OP_SUB)
196 return;
197 if (insn->src(1).getFile() != FILE_GPR)
198 return;
199 // This is the special OP_SET used for alphatesting, we can't reverse its
200 // arguments as that will confuse the fixup code.
201 if (insn->op == OP_SET && insn->subOp)
202 return;
203
204 Instruction *i0 = insn->getSrc(0)->getInsn();
205 Instruction *i1 = insn->getSrc(1)->getInsn();
206
207 // Swap sources to inline the less frequently used source. That way,
208 // optimistically, it will eventually be able to remove the instruction.
209 int i0refs = insn->getSrc(0)->refCount();
210 int i1refs = insn->getSrc(1)->refCount();
211
212 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
213 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
214 !targ->insnCanLoad(insn, 1, i1) ||
215 i0refs < i1refs)
216 insn->swapSources(0, 1);
217 else
218 return;
219 } else
220 if (isAttribOrSharedLoad(i1)) {
221 if (!isAttribOrSharedLoad(i0))
222 insn->swapSources(0, 1);
223 else
224 return;
225 } else {
226 return;
227 }
228
229 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
230 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
231 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
232 else
233 if (insn->op == OP_SLCT)
234 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
235 else
236 if (insn->op == OP_SUB) {
237 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
238 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
239 }
240 }
241
242 bool
243 LoadPropagation::visit(BasicBlock *bb)
244 {
245 const Target *targ = prog->getTarget();
246 Instruction *next;
247
248 for (Instruction *i = bb->getEntry(); i; i = next) {
249 next = i->next;
250
251 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
252 continue;
253
254 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
255 continue;
256
257 if (i->srcExists(1))
258 checkSwapSrc01(i);
259
260 for (int s = 0; i->srcExists(s); ++s) {
261 Instruction *ld = i->getSrc(s)->getInsn();
262
263 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
264 continue;
265 if (!targ->insnCanLoad(i, s, ld))
266 continue;
267
268 // propagate !
269 i->setSrc(s, ld->getSrc(0));
270 if (ld->src(0).isIndirect(0))
271 i->setIndirect(s, 0, ld->getIndirect(0, 0));
272
273 if (ld->getDef(0)->refCount() == 0)
274 delete_Instruction(prog, ld);
275 }
276 }
277 return true;
278 }
279
280 // =============================================================================
281
282 class IndirectPropagation : public Pass
283 {
284 private:
285 virtual bool visit(BasicBlock *);
286 };
287
288 bool
289 IndirectPropagation::visit(BasicBlock *bb)
290 {
291 const Target *targ = prog->getTarget();
292 Instruction *next;
293
294 for (Instruction *i = bb->getEntry(); i; i = next) {
295 next = i->next;
296
297 for (int s = 0; i->srcExists(s); ++s) {
298 Instruction *insn;
299 ImmediateValue imm;
300 if (!i->src(s).isIndirect(0))
301 continue;
302 insn = i->getIndirect(s, 0)->getInsn();
303 if (!insn)
304 continue;
305 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
306 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
307 !insn->src(1).getImmediate(imm) ||
308 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
309 continue;
310 i->setIndirect(s, 0, insn->getSrc(0));
311 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
312 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
313 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
314 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
315 !insn->src(1).getImmediate(imm) ||
316 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
317 continue;
318 i->setIndirect(s, 0, insn->getSrc(0));
319 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
320 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
321 } else if (insn->op == OP_MOV) {
322 if (!insn->src(0).getImmediate(imm) ||
323 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
324 continue;
325 i->setIndirect(s, 0, NULL);
326 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
327 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
328 }
329 }
330 }
331 return true;
332 }
333
334 // =============================================================================
335
336 // Evaluate constant expressions.
337 class ConstantFolding : public Pass
338 {
339 public:
340 bool foldAll(Program *);
341
342 private:
343 virtual bool visit(BasicBlock *);
344
345 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
346 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
347 void opnd(Instruction *, ImmediateValue&, int s);
348 void opnd3(Instruction *, ImmediateValue&);
349
350 void unary(Instruction *, const ImmediateValue&);
351
352 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
353
354 CmpInstruction *findOriginForTestWithZero(Value *);
355
356 unsigned int foldCount;
357
358 BuildUtil bld;
359 };
360
361 // TODO: remember generated immediates and only revisit these
362 bool
363 ConstantFolding::foldAll(Program *prog)
364 {
365 unsigned int iterCount = 0;
366 do {
367 foldCount = 0;
368 if (!run(prog))
369 return false;
370 } while (foldCount && ++iterCount < 2);
371 return true;
372 }
373
374 bool
375 ConstantFolding::visit(BasicBlock *bb)
376 {
377 Instruction *i, *next;
378
379 for (i = bb->getEntry(); i; i = next) {
380 next = i->next;
381 if (i->op == OP_MOV || i->op == OP_CALL)
382 continue;
383
384 ImmediateValue src0, src1, src2;
385
386 if (i->srcExists(2) &&
387 i->src(0).getImmediate(src0) &&
388 i->src(1).getImmediate(src1) &&
389 i->src(2).getImmediate(src2))
390 expr(i, src0, src1, src2);
391 else
392 if (i->srcExists(1) &&
393 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
394 expr(i, src0, src1);
395 else
396 if (i->srcExists(0) && i->src(0).getImmediate(src0))
397 opnd(i, src0, 0);
398 else
399 if (i->srcExists(1) && i->src(1).getImmediate(src1))
400 opnd(i, src1, 1);
401 if (i->srcExists(2) && i->src(2).getImmediate(src2))
402 opnd3(i, src2);
403 }
404 return true;
405 }
406
407 CmpInstruction *
408 ConstantFolding::findOriginForTestWithZero(Value *value)
409 {
410 if (!value)
411 return NULL;
412 Instruction *insn = value->getInsn();
413 if (!insn)
414 return NULL;
415
416 if (insn->asCmp() && insn->op != OP_SLCT)
417 return insn->asCmp();
418
419 /* Sometimes mov's will sneak in as a result of other folding. This gets
420 * cleaned up later.
421 */
422 if (insn->op == OP_MOV)
423 return findOriginForTestWithZero(insn->getSrc(0));
424
425 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
426 if (insn->op == OP_AND) {
427 int s = 0;
428 ImmediateValue imm;
429 if (!insn->src(s).getImmediate(imm)) {
430 s = 1;
431 if (!insn->src(s).getImmediate(imm))
432 return NULL;
433 }
434 if (imm.reg.data.f32 != 1.0f)
435 return NULL;
436 /* TODO: Come up with a way to handle the condition being inverted */
437 if (insn->src(!s).mod != Modifier(0))
438 return NULL;
439 return findOriginForTestWithZero(insn->getSrc(!s));
440 }
441
442 return NULL;
443 }
444
445 void
446 Modifier::applyTo(ImmediateValue& imm) const
447 {
448 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
449 return;
450 switch (imm.reg.type) {
451 case TYPE_F32:
452 if (bits & NV50_IR_MOD_ABS)
453 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
454 if (bits & NV50_IR_MOD_NEG)
455 imm.reg.data.f32 = -imm.reg.data.f32;
456 if (bits & NV50_IR_MOD_SAT) {
457 if (imm.reg.data.f32 < 0.0f)
458 imm.reg.data.f32 = 0.0f;
459 else
460 if (imm.reg.data.f32 > 1.0f)
461 imm.reg.data.f32 = 1.0f;
462 }
463 assert(!(bits & NV50_IR_MOD_NOT));
464 break;
465
466 case TYPE_S8: // NOTE: will be extended
467 case TYPE_S16:
468 case TYPE_S32:
469 case TYPE_U8: // NOTE: treated as signed
470 case TYPE_U16:
471 case TYPE_U32:
472 if (bits & NV50_IR_MOD_ABS)
473 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
474 imm.reg.data.s32 : -imm.reg.data.s32;
475 if (bits & NV50_IR_MOD_NEG)
476 imm.reg.data.s32 = -imm.reg.data.s32;
477 if (bits & NV50_IR_MOD_NOT)
478 imm.reg.data.s32 = ~imm.reg.data.s32;
479 break;
480
481 case TYPE_F64:
482 if (bits & NV50_IR_MOD_ABS)
483 imm.reg.data.f64 = fabs(imm.reg.data.f64);
484 if (bits & NV50_IR_MOD_NEG)
485 imm.reg.data.f64 = -imm.reg.data.f64;
486 if (bits & NV50_IR_MOD_SAT) {
487 if (imm.reg.data.f64 < 0.0)
488 imm.reg.data.f64 = 0.0;
489 else
490 if (imm.reg.data.f64 > 1.0)
491 imm.reg.data.f64 = 1.0;
492 }
493 assert(!(bits & NV50_IR_MOD_NOT));
494 break;
495
496 default:
497 assert(!"invalid/unhandled type");
498 imm.reg.data.u64 = 0;
499 break;
500 }
501 }
502
503 operation
504 Modifier::getOp() const
505 {
506 switch (bits) {
507 case NV50_IR_MOD_ABS: return OP_ABS;
508 case NV50_IR_MOD_NEG: return OP_NEG;
509 case NV50_IR_MOD_SAT: return OP_SAT;
510 case NV50_IR_MOD_NOT: return OP_NOT;
511 case 0:
512 return OP_MOV;
513 default:
514 return OP_CVT;
515 }
516 }
517
518 void
519 ConstantFolding::expr(Instruction *i,
520 ImmediateValue &imm0, ImmediateValue &imm1)
521 {
522 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
523 struct Storage res;
524 DataType type = i->dType;
525
526 memset(&res.data, 0, sizeof(res.data));
527
528 switch (i->op) {
529 case OP_MAD:
530 case OP_FMA:
531 case OP_MUL:
532 if (i->dnz && i->dType == TYPE_F32) {
533 if (!isfinite(a->data.f32))
534 a->data.f32 = 0.0f;
535 if (!isfinite(b->data.f32))
536 b->data.f32 = 0.0f;
537 }
538 switch (i->dType) {
539 case TYPE_F32:
540 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
541 break;
542 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
543 case TYPE_S32:
544 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
545 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
546 break;
547 }
548 /* fallthrough */
549 case TYPE_U32:
550 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
551 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
552 break;
553 }
554 res.data.u32 = a->data.u32 * b->data.u32; break;
555 default:
556 return;
557 }
558 break;
559 case OP_DIV:
560 if (b->data.u32 == 0)
561 break;
562 switch (i->dType) {
563 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
564 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
565 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
566 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
567 default:
568 return;
569 }
570 break;
571 case OP_ADD:
572 switch (i->dType) {
573 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
574 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
575 case TYPE_S32:
576 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
577 default:
578 return;
579 }
580 break;
581 case OP_SUB:
582 switch (i->dType) {
583 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
584 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
585 case TYPE_S32:
586 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
587 default:
588 return;
589 }
590 break;
591 case OP_POW:
592 switch (i->dType) {
593 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
594 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
595 default:
596 return;
597 }
598 break;
599 case OP_MAX:
600 switch (i->dType) {
601 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
602 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
603 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
604 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
605 default:
606 return;
607 }
608 break;
609 case OP_MIN:
610 switch (i->dType) {
611 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
612 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
613 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
614 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
615 default:
616 return;
617 }
618 break;
619 case OP_AND:
620 res.data.u64 = a->data.u64 & b->data.u64;
621 break;
622 case OP_OR:
623 res.data.u64 = a->data.u64 | b->data.u64;
624 break;
625 case OP_XOR:
626 res.data.u64 = a->data.u64 ^ b->data.u64;
627 break;
628 case OP_SHL:
629 res.data.u32 = a->data.u32 << b->data.u32;
630 break;
631 case OP_SHR:
632 switch (i->dType) {
633 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
634 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
635 default:
636 return;
637 }
638 break;
639 case OP_SLCT:
640 if (a->data.u32 != b->data.u32)
641 return;
642 res.data.u32 = a->data.u32;
643 break;
644 case OP_EXTBF: {
645 int offset = b->data.u32 & 0xff;
646 int width = (b->data.u32 >> 8) & 0xff;
647 int rshift = offset;
648 int lshift = 0;
649 if (width == 0) {
650 res.data.u32 = 0;
651 break;
652 }
653 if (width + offset < 32) {
654 rshift = 32 - width;
655 lshift = 32 - width - offset;
656 }
657 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
658 res.data.u32 = util_bitreverse(a->data.u32);
659 else
660 res.data.u32 = a->data.u32;
661 switch (i->dType) {
662 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
663 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
664 default:
665 return;
666 }
667 break;
668 }
669 case OP_POPCNT:
670 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
671 break;
672 case OP_PFETCH:
673 // The two arguments to pfetch are logically added together. Normally
674 // the second argument will not be constant, but that can happen.
675 res.data.u32 = a->data.u32 + b->data.u32;
676 type = TYPE_U32;
677 break;
678 case OP_MERGE:
679 switch (i->dType) {
680 case TYPE_U64:
681 case TYPE_S64:
682 case TYPE_F64:
683 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
684 break;
685 default:
686 return;
687 }
688 break;
689 default:
690 return;
691 }
692 ++foldCount;
693
694 i->src(0).mod = Modifier(0);
695 i->src(1).mod = Modifier(0);
696 i->postFactor = 0;
697
698 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
699 i->setSrc(1, NULL);
700
701 i->getSrc(0)->reg.data = res.data;
702 i->getSrc(0)->reg.type = type;
703 i->getSrc(0)->reg.size = typeSizeof(type);
704
705 switch (i->op) {
706 case OP_MAD:
707 case OP_FMA: {
708 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
709
710 // Move the immediate into position 1, where we know it might be
711 // emittable. However it might not be anyways, as there may be other
712 // restrictions, so move it into a separate LValue.
713 bld.setPosition(i, false);
714 i->op = OP_ADD;
715 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
716 i->setSrc(0, i->getSrc(2));
717 i->src(0).mod = i->src(2).mod;
718 i->setSrc(2, NULL);
719
720 if (i->src(0).getImmediate(src0))
721 expr(i, src0, src1);
722 else
723 opnd(i, src1, 1);
724 break;
725 }
726 case OP_PFETCH:
727 // Leave PFETCH alone... we just folded its 2 args into 1.
728 break;
729 default:
730 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
731 break;
732 }
733 i->subOp = 0;
734 }
735
736 void
737 ConstantFolding::expr(Instruction *i,
738 ImmediateValue &imm0,
739 ImmediateValue &imm1,
740 ImmediateValue &imm2)
741 {
742 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
743 struct Storage res;
744
745 memset(&res.data, 0, sizeof(res.data));
746
747 switch (i->op) {
748 case OP_INSBF: {
749 int offset = b->data.u32 & 0xff;
750 int width = (b->data.u32 >> 8) & 0xff;
751 unsigned bitmask = ((1 << width) - 1) << offset;
752 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
753 break;
754 }
755 case OP_MAD:
756 case OP_FMA: {
757 switch (i->dType) {
758 case TYPE_F32:
759 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
760 c->data.f32;
761 break;
762 case TYPE_F64:
763 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
764 break;
765 case TYPE_S32:
766 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
767 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
768 break;
769 }
770 /* fallthrough */
771 case TYPE_U32:
772 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
773 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
774 break;
775 }
776 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
777 break;
778 default:
779 return;
780 }
781 break;
782 }
783 case OP_SHLADD:
784 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
785 break;
786 default:
787 return;
788 }
789
790 ++foldCount;
791 i->src(0).mod = Modifier(0);
792 i->src(1).mod = Modifier(0);
793 i->src(2).mod = Modifier(0);
794
795 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
796 i->setSrc(1, NULL);
797 i->setSrc(2, NULL);
798
799 i->getSrc(0)->reg.data = res.data;
800 i->getSrc(0)->reg.type = i->dType;
801 i->getSrc(0)->reg.size = typeSizeof(i->dType);
802
803 i->op = OP_MOV;
804 }
805
806 void
807 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
808 {
809 Storage res;
810
811 if (i->dType != TYPE_F32)
812 return;
813 switch (i->op) {
814 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
815 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
816 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
817 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
818 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
819 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
820 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
821 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
822 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
823 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
824 case OP_PRESIN:
825 case OP_PREEX2:
826 // these should be handled in subsequent OP_SIN/COS/EX2
827 res.data.f32 = imm.reg.data.f32;
828 break;
829 default:
830 return;
831 }
832 i->op = OP_MOV;
833 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
834 i->src(0).mod = Modifier(0);
835 }
836
837 void
838 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
839 const int s, ImmediateValue& imm2)
840 {
841 const int t = s ? 0 : 1;
842 Instruction *insn;
843 Instruction *mul1 = NULL; // mul1 before mul2
844 int e = 0;
845 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
846 ImmediateValue imm1;
847
848 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
849
850 if (mul2->getSrc(t)->refCount() == 1) {
851 insn = mul2->getSrc(t)->getInsn();
852 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
853 mul1 = insn;
854 if (mul1 && !mul1->saturate) {
855 int s1;
856
857 if (mul1->src(s1 = 0).getImmediate(imm1) ||
858 mul1->src(s1 = 1).getImmediate(imm1)) {
859 bld.setPosition(mul1, false);
860 // a = mul r, imm1
861 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
862 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
863 mul1->src(s1).mod = Modifier(0);
864 mul2->def(0).replace(mul1->getDef(0), false);
865 mul1->saturate = mul2->saturate;
866 } else
867 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
868 // c = mul a, b
869 // d = mul c, imm -> d = mul_x_imm a, b
870 mul1->postFactor = e;
871 mul2->def(0).replace(mul1->getDef(0), false);
872 if (f < 0)
873 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
874 mul1->saturate = mul2->saturate;
875 }
876 return;
877 }
878 }
879 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
880 // b = mul a, imm
881 // d = mul b, c -> d = mul_x_imm a, c
882 int s2, t2;
883 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
884 if (!insn)
885 return;
886 mul1 = mul2;
887 mul2 = NULL;
888 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
889 t2 = s2 ? 0 : 1;
890 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
891 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
892 mul2 = insn;
893 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
894 mul2->postFactor = e;
895 mul2->setSrc(s2, mul1->src(t));
896 if (f < 0)
897 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
898 }
899 }
900 }
901
902 void
903 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
904 {
905 switch (i->op) {
906 case OP_MAD:
907 case OP_FMA:
908 if (imm2.isInteger(0)) {
909 i->op = OP_MUL;
910 i->setSrc(2, NULL);
911 foldCount++;
912 return;
913 }
914 break;
915 case OP_SHLADD:
916 if (imm2.isInteger(0)) {
917 i->op = OP_SHL;
918 i->setSrc(2, NULL);
919 foldCount++;
920 return;
921 }
922 break;
923 default:
924 return;
925 }
926 }
927
928 void
929 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
930 {
931 const Target *target = prog->getTarget();
932 const int t = !s;
933 const operation op = i->op;
934 Instruction *newi = i;
935
936 switch (i->op) {
937 case OP_SPLIT: {
938 bld.setPosition(i, false);
939
940 uint8_t size = i->getDef(0)->reg.size;
941 uint8_t bitsize = size * 8;
942 uint32_t mask = (1ULL << bitsize) - 1;
943 assert(bitsize <= 32);
944
945 uint64_t val = imm0.reg.data.u64;
946 for (int8_t d = 0; i->defExists(d); ++d) {
947 Value *def = i->getDef(d);
948 assert(def->reg.size == size);
949
950 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
951 val >>= bitsize;
952 }
953 delete_Instruction(prog, i);
954 break;
955 }
956 case OP_MUL:
957 if (i->dType == TYPE_F32)
958 tryCollapseChainedMULs(i, s, imm0);
959
960 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
961 assert(!isFloatType(i->sType));
962 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
963 bld.setPosition(i, false);
964 // Need to set to the sign value, which is a compare.
965 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
966 TYPE_S32, i->getSrc(t), bld.mkImm(0));
967 delete_Instruction(prog, i);
968 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
969 // The high bits can't be set in this case (either mul by 0 or
970 // unsigned by 1)
971 i->op = OP_MOV;
972 i->subOp = 0;
973 i->setSrc(0, new_ImmediateValue(prog, 0u));
974 i->src(0).mod = Modifier(0);
975 i->setSrc(1, NULL);
976 } else if (!imm0.isNegative() && imm0.isPow2()) {
977 // Translate into a shift
978 imm0.applyLog2();
979 i->op = OP_SHR;
980 i->subOp = 0;
981 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
982 i->setSrc(0, i->getSrc(t));
983 i->src(0).mod = i->src(t).mod;
984 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
985 i->src(1).mod = 0;
986 }
987 } else
988 if (imm0.isInteger(0)) {
989 i->op = OP_MOV;
990 i->setSrc(0, new_ImmediateValue(prog, 0u));
991 i->src(0).mod = Modifier(0);
992 i->postFactor = 0;
993 i->setSrc(1, NULL);
994 } else
995 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
996 if (imm0.isNegative())
997 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
998 i->op = i->src(t).mod.getOp();
999 if (s == 0) {
1000 i->setSrc(0, i->getSrc(1));
1001 i->src(0).mod = i->src(1).mod;
1002 i->src(1).mod = 0;
1003 }
1004 if (i->op != OP_CVT)
1005 i->src(0).mod = 0;
1006 i->setSrc(1, NULL);
1007 } else
1008 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1009 if (imm0.isNegative())
1010 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1011 i->op = OP_ADD;
1012 i->setSrc(s, i->getSrc(t));
1013 i->src(s).mod = i->src(t).mod;
1014 } else
1015 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
1016 i->op = OP_SHL;
1017 imm0.applyLog2();
1018 i->setSrc(0, i->getSrc(t));
1019 i->src(0).mod = i->src(t).mod;
1020 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1021 i->src(1).mod = 0;
1022 } else
1023 if (i->postFactor && i->sType == TYPE_F32) {
1024 /* Can't emit a postfactor with an immediate, have to fold it in */
1025 i->setSrc(s, new_ImmediateValue(
1026 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1027 i->postFactor = 0;
1028 }
1029 break;
1030 case OP_FMA:
1031 case OP_MAD:
1032 if (imm0.isInteger(0)) {
1033 i->setSrc(0, i->getSrc(2));
1034 i->src(0).mod = i->src(2).mod;
1035 i->setSrc(1, NULL);
1036 i->setSrc(2, NULL);
1037 i->op = i->src(0).mod.getOp();
1038 if (i->op != OP_CVT)
1039 i->src(0).mod = 0;
1040 } else
1041 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1042 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1043 if (imm0.isNegative())
1044 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1045 if (s == 0) {
1046 i->setSrc(0, i->getSrc(1));
1047 i->src(0).mod = i->src(1).mod;
1048 }
1049 i->setSrc(1, i->getSrc(2));
1050 i->src(1).mod = i->src(2).mod;
1051 i->setSrc(2, NULL);
1052 i->op = OP_ADD;
1053 } else
1054 if (s == 1 && !imm0.isNegative() && imm0.isPow2() &&
1055 target->isOpSupported(OP_SHLADD, i->dType)) {
1056 i->op = OP_SHLADD;
1057 imm0.applyLog2();
1058 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1059 }
1060 break;
1061 case OP_SUB:
1062 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1063 !isFloatType(i->dType))
1064 break;
1065 /* fallthrough */
1066 case OP_ADD:
1067 if (i->usesFlags())
1068 break;
1069 if (imm0.isInteger(0)) {
1070 if (s == 0) {
1071 i->setSrc(0, i->getSrc(1));
1072 i->src(0).mod = i->src(1).mod;
1073 if (i->op == OP_SUB)
1074 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1075 }
1076 i->setSrc(1, NULL);
1077 i->op = i->src(0).mod.getOp();
1078 if (i->op != OP_CVT)
1079 i->src(0).mod = Modifier(0);
1080 }
1081 break;
1082
1083 case OP_DIV:
1084 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1085 break;
1086 bld.setPosition(i, false);
1087 if (imm0.reg.data.u32 == 0) {
1088 break;
1089 } else
1090 if (imm0.reg.data.u32 == 1) {
1091 i->op = OP_MOV;
1092 i->setSrc(1, NULL);
1093 } else
1094 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1095 i->op = OP_SHR;
1096 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1097 } else
1098 if (i->dType == TYPE_U32) {
1099 Instruction *mul;
1100 Value *tA, *tB;
1101 const uint32_t d = imm0.reg.data.u32;
1102 uint32_t m;
1103 int r, s;
1104 uint32_t l = util_logbase2(d);
1105 if (((uint32_t)1 << l) < d)
1106 ++l;
1107 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1108 r = l ? 1 : 0;
1109 s = l ? (l - 1) : 0;
1110
1111 tA = bld.getSSA();
1112 tB = bld.getSSA();
1113 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1114 bld.loadImm(NULL, m));
1115 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1116 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1117 tA = bld.getSSA();
1118 if (r)
1119 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1120 else
1121 tA = tB;
1122 tB = s ? bld.getSSA() : i->getDef(0);
1123 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1124 if (s)
1125 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1126
1127 delete_Instruction(prog, i);
1128 } else
1129 if (imm0.reg.data.s32 == -1) {
1130 i->op = OP_NEG;
1131 i->setSrc(1, NULL);
1132 } else {
1133 LValue *tA, *tB;
1134 LValue *tD;
1135 const int32_t d = imm0.reg.data.s32;
1136 int32_t m;
1137 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1138 if ((1 << l) < abs(d))
1139 ++l;
1140 if (!l)
1141 l = 1;
1142 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1143
1144 tA = bld.getSSA();
1145 tB = bld.getSSA();
1146 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1147 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1148 if (l > 1)
1149 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1150 else
1151 tB = tA;
1152 tA = bld.getSSA();
1153 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1154 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1155 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1156 if (d < 0)
1157 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1158
1159 delete_Instruction(prog, i);
1160 }
1161 break;
1162
1163 case OP_MOD:
1164 if (i->sType == TYPE_U32 && imm0.isPow2()) {
1165 bld.setPosition(i, false);
1166 i->op = OP_AND;
1167 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1168 }
1169 break;
1170
1171 case OP_SET: // TODO: SET_AND,OR,XOR
1172 {
1173 /* This optimizes the case where the output of a set is being compared
1174 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1175 * can be a lot cleverer in our comparison.
1176 */
1177 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1178 CondCode cc, ccZ;
1179 if (imm0.reg.data.u32 != 0 || !si)
1180 return;
1181 cc = si->setCond;
1182 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1183 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1184 // first.
1185 if (s == 0)
1186 ccZ = reverseCondCode(ccZ);
1187 // If there is a negative modifier, we need to undo that, by flipping
1188 // the comparison to zero.
1189 if (i->src(t).mod.neg())
1190 ccZ = reverseCondCode(ccZ);
1191 // If this is a signed comparison, we expect the input to be a regular
1192 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1193 // is positive, so just flip the sign.
1194 if (i->sType == TYPE_S32) {
1195 assert(!isFloatType(si->dType));
1196 ccZ = reverseCondCode(ccZ);
1197 }
1198 switch (ccZ) {
1199 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1200 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1201 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1202 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1203 case CC_GT: break; // bool > 0 -- bool
1204 case CC_NE: break; // bool != 0 -- bool
1205 default:
1206 return;
1207 }
1208
1209 // Update the condition of this SET to be identical to the origin set,
1210 // but with the updated condition code. The original SET should get
1211 // DCE'd, ideally.
1212 i->op = si->op;
1213 i->asCmp()->setCond = cc;
1214 i->setSrc(0, si->src(0));
1215 i->setSrc(1, si->src(1));
1216 if (si->srcExists(2))
1217 i->setSrc(2, si->src(2));
1218 i->sType = si->sType;
1219 }
1220 break;
1221
1222 case OP_AND:
1223 {
1224 Instruction *src = i->getSrc(t)->getInsn();
1225 ImmediateValue imm1;
1226 if (imm0.reg.data.u32 == 0) {
1227 i->op = OP_MOV;
1228 i->setSrc(0, new_ImmediateValue(prog, 0u));
1229 i->src(0).mod = Modifier(0);
1230 i->setSrc(1, NULL);
1231 } else if (imm0.reg.data.u32 == ~0U) {
1232 i->op = i->src(t).mod.getOp();
1233 if (t) {
1234 i->setSrc(0, i->getSrc(t));
1235 i->src(0).mod = i->src(t).mod;
1236 }
1237 i->setSrc(1, NULL);
1238 } else if (src->asCmp()) {
1239 CmpInstruction *cmp = src->asCmp();
1240 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1241 return;
1242 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1243 return;
1244 if (imm0.reg.data.f32 != 1.0)
1245 return;
1246 if (cmp->dType != TYPE_U32)
1247 return;
1248
1249 cmp->dType = TYPE_F32;
1250 if (i->src(t).mod != Modifier(0)) {
1251 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1252 i->src(t).mod = Modifier(0);
1253 cmp->setCond = inverseCondCode(cmp->setCond);
1254 }
1255 i->op = OP_MOV;
1256 i->setSrc(s, NULL);
1257 if (t) {
1258 i->setSrc(0, i->getSrc(t));
1259 i->setSrc(t, NULL);
1260 }
1261 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1262 src->op == OP_SHR &&
1263 src->src(1).getImmediate(imm1) &&
1264 i->src(t).mod == Modifier(0) &&
1265 util_is_power_of_two(imm0.reg.data.u32 + 1)) {
1266 // low byte = offset, high byte = width
1267 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1268 i->op = OP_EXTBF;
1269 i->setSrc(0, src->getSrc(0));
1270 i->setSrc(1, new_ImmediateValue(prog, ext));
1271 } else if (src->op == OP_SHL &&
1272 src->src(1).getImmediate(imm1) &&
1273 i->src(t).mod == Modifier(0) &&
1274 util_is_power_of_two(~imm0.reg.data.u32 + 1) &&
1275 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1276 i->op = OP_MOV;
1277 i->setSrc(s, NULL);
1278 if (t) {
1279 i->setSrc(0, i->getSrc(t));
1280 i->setSrc(t, NULL);
1281 }
1282 }
1283 }
1284 break;
1285
1286 case OP_SHL:
1287 {
1288 if (s != 1 || i->src(0).mod != Modifier(0))
1289 break;
1290 // try to concatenate shifts
1291 Instruction *si = i->getSrc(0)->getInsn();
1292 if (!si)
1293 break;
1294 ImmediateValue imm1;
1295 switch (si->op) {
1296 case OP_SHL:
1297 if (si->src(1).getImmediate(imm1)) {
1298 bld.setPosition(i, false);
1299 i->setSrc(0, si->getSrc(0));
1300 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1301 }
1302 break;
1303 case OP_SHR:
1304 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1305 bld.setPosition(i, false);
1306 i->op = OP_AND;
1307 i->setSrc(0, si->getSrc(0));
1308 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1309 }
1310 break;
1311 case OP_MUL:
1312 int muls;
1313 if (isFloatType(si->dType))
1314 return;
1315 if (si->src(1).getImmediate(imm1))
1316 muls = 1;
1317 else if (si->src(0).getImmediate(imm1))
1318 muls = 0;
1319 else
1320 return;
1321
1322 bld.setPosition(i, false);
1323 i->op = OP_MUL;
1324 i->setSrc(0, si->getSrc(!muls));
1325 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1326 break;
1327 case OP_SUB:
1328 case OP_ADD:
1329 int adds;
1330 if (isFloatType(si->dType))
1331 return;
1332 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1333 adds = 0;
1334 else if (si->src(1).getImmediate(imm1))
1335 adds = 1;
1336 else
1337 return;
1338 if (si->src(!adds).mod != Modifier(0))
1339 return;
1340 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1341
1342 // This is more operations, but if one of x, y is an immediate, then
1343 // we can get a situation where (a) we can use ISCADD, or (b)
1344 // propagate the add bit into an indirect load.
1345 bld.setPosition(i, false);
1346 i->op = si->op;
1347 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1348 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1349 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1350 si->getSrc(!adds),
1351 bld.mkImm(imm0.reg.data.u32)));
1352 break;
1353 default:
1354 return;
1355 }
1356 }
1357 break;
1358
1359 case OP_ABS:
1360 case OP_NEG:
1361 case OP_SAT:
1362 case OP_LG2:
1363 case OP_RCP:
1364 case OP_SQRT:
1365 case OP_RSQ:
1366 case OP_PRESIN:
1367 case OP_SIN:
1368 case OP_COS:
1369 case OP_PREEX2:
1370 case OP_EX2:
1371 unary(i, imm0);
1372 break;
1373 case OP_BFIND: {
1374 int32_t res;
1375 switch (i->dType) {
1376 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1377 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1378 default:
1379 return;
1380 }
1381 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1382 res = 31 - res;
1383 bld.setPosition(i, false); /* make sure bld is init'ed */
1384 i->setSrc(0, bld.mkImm(res));
1385 i->setSrc(1, NULL);
1386 i->op = OP_MOV;
1387 i->subOp = 0;
1388 break;
1389 }
1390 case OP_POPCNT: {
1391 // Only deal with 1-arg POPCNT here
1392 if (i->srcExists(1))
1393 break;
1394 uint32_t res = util_bitcount(imm0.reg.data.u32);
1395 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1396 i->setSrc(1, NULL);
1397 i->op = OP_MOV;
1398 break;
1399 }
1400 case OP_CVT: {
1401 Storage res;
1402
1403 // TODO: handle 64-bit values properly
1404 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1405 return;
1406
1407 // TODO: handle single byte/word extractions
1408 if (i->subOp)
1409 return;
1410
1411 bld.setPosition(i, true); /* make sure bld is init'ed */
1412
1413 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1414 case type: \
1415 switch (i->sType) { \
1416 case TYPE_F64: \
1417 res.data.dst = util_iround(i->saturate ? \
1418 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1419 imm0.reg.data.f64); \
1420 break; \
1421 case TYPE_F32: \
1422 res.data.dst = util_iround(i->saturate ? \
1423 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1424 imm0.reg.data.f32); \
1425 break; \
1426 case TYPE_S32: \
1427 res.data.dst = i->saturate ? \
1428 CLAMP(imm0.reg.data.s32, imin, imax) : \
1429 imm0.reg.data.s32; \
1430 break; \
1431 case TYPE_U32: \
1432 res.data.dst = i->saturate ? \
1433 CLAMP(imm0.reg.data.u32, umin, umax) : \
1434 imm0.reg.data.u32; \
1435 break; \
1436 case TYPE_S16: \
1437 res.data.dst = i->saturate ? \
1438 CLAMP(imm0.reg.data.s16, imin, imax) : \
1439 imm0.reg.data.s16; \
1440 break; \
1441 case TYPE_U16: \
1442 res.data.dst = i->saturate ? \
1443 CLAMP(imm0.reg.data.u16, umin, umax) : \
1444 imm0.reg.data.u16; \
1445 break; \
1446 default: return; \
1447 } \
1448 i->setSrc(0, bld.mkImm(res.data.dst)); \
1449 break
1450
1451 switch(i->dType) {
1452 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1453 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1454 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1455 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1456 case TYPE_F32:
1457 switch (i->sType) {
1458 case TYPE_F64:
1459 res.data.f32 = i->saturate ?
1460 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1461 imm0.reg.data.f64;
1462 break;
1463 case TYPE_F32:
1464 res.data.f32 = i->saturate ?
1465 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1466 imm0.reg.data.f32;
1467 break;
1468 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1469 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1470 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1471 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1472 default:
1473 return;
1474 }
1475 i->setSrc(0, bld.mkImm(res.data.f32));
1476 break;
1477 case TYPE_F64:
1478 switch (i->sType) {
1479 case TYPE_F64:
1480 res.data.f64 = i->saturate ?
1481 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1482 imm0.reg.data.f64;
1483 break;
1484 case TYPE_F32:
1485 res.data.f64 = i->saturate ?
1486 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1487 imm0.reg.data.f32;
1488 break;
1489 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1490 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1491 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1492 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1493 default:
1494 return;
1495 }
1496 i->setSrc(0, bld.mkImm(res.data.f64));
1497 break;
1498 default:
1499 return;
1500 }
1501 #undef CASE
1502
1503 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1504 i->op = OP_MOV;
1505 i->saturate = 0;
1506 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1507 break;
1508 }
1509 default:
1510 return;
1511 }
1512 if (newi->op != op)
1513 foldCount++;
1514 }
1515
1516 // =============================================================================
1517
1518 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1519 class ModifierFolding : public Pass
1520 {
1521 private:
1522 virtual bool visit(BasicBlock *);
1523 };
1524
1525 bool
1526 ModifierFolding::visit(BasicBlock *bb)
1527 {
1528 const Target *target = prog->getTarget();
1529
1530 Instruction *i, *next, *mi;
1531 Modifier mod;
1532
1533 for (i = bb->getEntry(); i; i = next) {
1534 next = i->next;
1535
1536 if (0 && i->op == OP_SUB) {
1537 // turn "sub" into "add neg" (do we really want this ?)
1538 i->op = OP_ADD;
1539 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1540 }
1541
1542 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1543 mi = i->getSrc(s)->getInsn();
1544 if (!mi ||
1545 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1546 continue;
1547 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1548 if ((i->op != OP_ADD &&
1549 i->op != OP_MUL) ||
1550 (mi->op != OP_ABS &&
1551 mi->op != OP_NEG))
1552 continue;
1553 } else
1554 if (i->sType != mi->dType) {
1555 continue;
1556 }
1557 if ((mod = Modifier(mi->op)) == Modifier(0))
1558 continue;
1559 mod *= mi->src(0).mod;
1560
1561 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1562 // abs neg [abs] = abs
1563 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1564 } else
1565 if ((i->op == OP_NEG) && mod.neg()) {
1566 assert(s == 0);
1567 // neg as both opcode and modifier on same insn is prohibited
1568 // neg neg abs = abs, neg neg = identity
1569 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1570 i->op = mod.getOp();
1571 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1572 if (mod == Modifier(0))
1573 i->op = OP_MOV;
1574 }
1575
1576 if (target->isModSupported(i, s, mod)) {
1577 i->setSrc(s, mi->getSrc(0));
1578 i->src(s).mod *= mod;
1579 }
1580 }
1581
1582 if (i->op == OP_SAT) {
1583 mi = i->getSrc(0)->getInsn();
1584 if (mi &&
1585 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1586 mi->saturate = 1;
1587 mi->setDef(0, i->getDef(0));
1588 delete_Instruction(prog, i);
1589 }
1590 }
1591 }
1592
1593 return true;
1594 }
1595
1596 // =============================================================================
1597
1598 // MUL + ADD -> MAD/FMA
1599 // MIN/MAX(a, a) -> a, etc.
1600 // SLCT(a, b, const) -> cc(const) ? a : b
1601 // RCP(RCP(a)) -> a
1602 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1603 class AlgebraicOpt : public Pass
1604 {
1605 private:
1606 virtual bool visit(BasicBlock *);
1607
1608 void handleABS(Instruction *);
1609 bool handleADD(Instruction *);
1610 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1611 void handleMINMAX(Instruction *);
1612 void handleRCP(Instruction *);
1613 void handleSLCT(Instruction *);
1614 void handleLOGOP(Instruction *);
1615 void handleCVT_NEG(Instruction *);
1616 void handleCVT_CVT(Instruction *);
1617 void handleCVT_EXTBF(Instruction *);
1618 void handleSUCLAMP(Instruction *);
1619 void handleNEG(Instruction *);
1620
1621 BuildUtil bld;
1622 };
1623
1624 void
1625 AlgebraicOpt::handleABS(Instruction *abs)
1626 {
1627 Instruction *sub = abs->getSrc(0)->getInsn();
1628 DataType ty;
1629 if (!sub ||
1630 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1631 return;
1632 // expect not to have mods yet, if we do, bail
1633 if (sub->src(0).mod || sub->src(1).mod)
1634 return;
1635 // hidden conversion ?
1636 ty = intTypeToSigned(sub->dType);
1637 if (abs->dType != abs->sType || ty != abs->sType)
1638 return;
1639
1640 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1641 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1642 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1643 return;
1644
1645 Value *src0 = sub->getSrc(0);
1646 Value *src1 = sub->getSrc(1);
1647
1648 if (sub->op == OP_ADD) {
1649 Instruction *neg = sub->getSrc(1)->getInsn();
1650 if (neg && neg->op != OP_NEG) {
1651 neg = sub->getSrc(0)->getInsn();
1652 src0 = sub->getSrc(1);
1653 }
1654 if (!neg || neg->op != OP_NEG ||
1655 neg->dType != neg->sType || neg->sType != ty)
1656 return;
1657 src1 = neg->getSrc(0);
1658 }
1659
1660 // found ABS(SUB))
1661 abs->moveSources(1, 2); // move sources >=1 up by 2
1662 abs->op = OP_SAD;
1663 abs->setType(sub->dType);
1664 abs->setSrc(0, src0);
1665 abs->setSrc(1, src1);
1666 bld.setPosition(abs, false);
1667 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1668 }
1669
1670 bool
1671 AlgebraicOpt::handleADD(Instruction *add)
1672 {
1673 Value *src0 = add->getSrc(0);
1674 Value *src1 = add->getSrc(1);
1675
1676 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1677 return false;
1678
1679 bool changed = false;
1680 // we can't optimize to MAD if the add is precise
1681 if (!add->precise && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1682 changed = tryADDToMADOrSAD(add, OP_MAD);
1683 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1684 changed = tryADDToMADOrSAD(add, OP_SAD);
1685 return changed;
1686 }
1687
1688 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1689 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1690 bool
1691 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1692 {
1693 Value *src0 = add->getSrc(0);
1694 Value *src1 = add->getSrc(1);
1695 Value *src;
1696 int s;
1697 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1698 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1699 Modifier mod[4];
1700
1701 if (src0->refCount() == 1 &&
1702 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1703 s = 0;
1704 else
1705 if (src1->refCount() == 1 &&
1706 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1707 s = 1;
1708 else
1709 return false;
1710
1711 src = add->getSrc(s);
1712
1713 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1714 return false;
1715
1716 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1717 src->getInsn()->dnz || src->getInsn()->precise)
1718 return false;
1719
1720 if (toOp == OP_SAD) {
1721 ImmediateValue imm;
1722 if (!src->getInsn()->src(2).getImmediate(imm))
1723 return false;
1724 if (!imm.isInteger(0))
1725 return false;
1726 }
1727
1728 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1729 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1730 return false;
1731
1732 mod[0] = add->src(0).mod;
1733 mod[1] = add->src(1).mod;
1734 mod[2] = src->getUniqueInsn()->src(0).mod;
1735 mod[3] = src->getUniqueInsn()->src(1).mod;
1736
1737 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1738 return false;
1739
1740 add->op = toOp;
1741 add->subOp = src->getInsn()->subOp; // potentially mul-high
1742 add->dnz = src->getInsn()->dnz;
1743 add->dType = src->getInsn()->dType; // sign matters for imad hi
1744 add->sType = src->getInsn()->sType;
1745
1746 add->setSrc(2, add->src(s ? 0 : 1));
1747
1748 add->setSrc(0, src->getInsn()->getSrc(0));
1749 add->src(0).mod = mod[2] ^ mod[s];
1750 add->setSrc(1, src->getInsn()->getSrc(1));
1751 add->src(1).mod = mod[3];
1752
1753 return true;
1754 }
1755
1756 void
1757 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1758 {
1759 Value *src0 = minmax->getSrc(0);
1760 Value *src1 = minmax->getSrc(1);
1761
1762 if (src0 != src1 || src0->reg.file != FILE_GPR)
1763 return;
1764 if (minmax->src(0).mod == minmax->src(1).mod) {
1765 if (minmax->def(0).mayReplace(minmax->src(0))) {
1766 minmax->def(0).replace(minmax->src(0), false);
1767 minmax->bb->remove(minmax);
1768 } else {
1769 minmax->op = OP_CVT;
1770 minmax->setSrc(1, NULL);
1771 }
1772 } else {
1773 // TODO:
1774 // min(x, -x) = -abs(x)
1775 // min(x, -abs(x)) = -abs(x)
1776 // min(x, abs(x)) = x
1777 // max(x, -abs(x)) = x
1778 // max(x, abs(x)) = abs(x)
1779 // max(x, -x) = abs(x)
1780 }
1781 }
1782
1783 void
1784 AlgebraicOpt::handleRCP(Instruction *rcp)
1785 {
1786 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1787
1788 if (si && si->op == OP_RCP) {
1789 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1790 rcp->op = mod.getOp();
1791 rcp->setSrc(0, si->getSrc(0));
1792 }
1793 }
1794
1795 void
1796 AlgebraicOpt::handleSLCT(Instruction *slct)
1797 {
1798 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1799 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1800 slct->setSrc(0, slct->getSrc(1));
1801 } else
1802 if (slct->getSrc(0) != slct->getSrc(1)) {
1803 return;
1804 }
1805 slct->op = OP_MOV;
1806 slct->setSrc(1, NULL);
1807 slct->setSrc(2, NULL);
1808 }
1809
1810 void
1811 AlgebraicOpt::handleLOGOP(Instruction *logop)
1812 {
1813 Value *src0 = logop->getSrc(0);
1814 Value *src1 = logop->getSrc(1);
1815
1816 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1817 return;
1818
1819 if (src0 == src1) {
1820 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1821 logop->def(0).mayReplace(logop->src(0))) {
1822 logop->def(0).replace(logop->src(0), false);
1823 delete_Instruction(prog, logop);
1824 }
1825 } else {
1826 // try AND(SET, SET) -> SET_AND(SET)
1827 Instruction *set0 = src0->getInsn();
1828 Instruction *set1 = src1->getInsn();
1829
1830 if (!set0 || set0->fixed || !set1 || set1->fixed)
1831 return;
1832 if (set1->op != OP_SET) {
1833 Instruction *xchg = set0;
1834 set0 = set1;
1835 set1 = xchg;
1836 if (set1->op != OP_SET)
1837 return;
1838 }
1839 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1840 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1841 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1842 return;
1843 if (set0->op != OP_SET &&
1844 set0->op != OP_SET_AND &&
1845 set0->op != OP_SET_OR &&
1846 set0->op != OP_SET_XOR)
1847 return;
1848 if (set0->getDef(0)->refCount() > 1 &&
1849 set1->getDef(0)->refCount() > 1)
1850 return;
1851 if (set0->getPredicate() || set1->getPredicate())
1852 return;
1853 // check that they don't source each other
1854 for (int s = 0; s < 2; ++s)
1855 if (set0->getSrc(s) == set1->getDef(0) ||
1856 set1->getSrc(s) == set0->getDef(0))
1857 return;
1858
1859 set0 = cloneForward(func, set0);
1860 set1 = cloneShallow(func, set1);
1861 logop->bb->insertAfter(logop, set1);
1862 logop->bb->insertAfter(logop, set0);
1863
1864 set0->dType = TYPE_U8;
1865 set0->getDef(0)->reg.file = FILE_PREDICATE;
1866 set0->getDef(0)->reg.size = 1;
1867 set1->setSrc(2, set0->getDef(0));
1868 set1->op = redOp;
1869 set1->setDef(0, logop->getDef(0));
1870 delete_Instruction(prog, logop);
1871 }
1872 }
1873
1874 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1875 // nv50:
1876 // F2I(NEG(I2F(ABS(SET))))
1877 void
1878 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1879 {
1880 Instruction *insn = cvt->getSrc(0)->getInsn();
1881 if (cvt->sType != TYPE_F32 ||
1882 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1883 return;
1884 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1885 return;
1886 if (insn->src(0).mod != Modifier(0))
1887 return;
1888 insn = insn->getSrc(0)->getInsn();
1889
1890 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1891 if (insn && insn->op == OP_CVT &&
1892 insn->dType == TYPE_F32 &&
1893 insn->sType == TYPE_S32) {
1894 insn = insn->getSrc(0)->getInsn();
1895 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1896 insn->src(0).mod)
1897 return;
1898 insn = insn->getSrc(0)->getInsn();
1899 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1900 return;
1901 } else
1902 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1903 return;
1904 }
1905
1906 Instruction *bset = cloneShallow(func, insn);
1907 bset->dType = TYPE_U32;
1908 bset->setDef(0, cvt->getDef(0));
1909 cvt->bb->insertAfter(cvt, bset);
1910 delete_Instruction(prog, cvt);
1911 }
1912
1913 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
1914 // does a type conversion, this becomes trickier as there might be range
1915 // changes/etc. We could handle those in theory as long as the range was being
1916 // reduced or kept the same.
1917 void
1918 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
1919 {
1920 Instruction *insn = cvt->getSrc(0)->getInsn();
1921 RoundMode rnd = insn->rnd;
1922
1923 if (insn->saturate ||
1924 insn->subOp ||
1925 insn->dType != insn->sType ||
1926 insn->dType != cvt->sType)
1927 return;
1928
1929 switch (insn->op) {
1930 case OP_CEIL:
1931 rnd = ROUND_PI;
1932 break;
1933 case OP_FLOOR:
1934 rnd = ROUND_MI;
1935 break;
1936 case OP_TRUNC:
1937 rnd = ROUND_ZI;
1938 break;
1939 case OP_CVT:
1940 break;
1941 default:
1942 return;
1943 }
1944
1945 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
1946 rnd = (RoundMode)(rnd & 3);
1947
1948 cvt->rnd = rnd;
1949 cvt->setSrc(0, insn->getSrc(0));
1950 cvt->src(0).mod *= insn->src(0).mod;
1951 cvt->sType = insn->sType;
1952 }
1953
1954 // Some shaders extract packed bytes out of words and convert them to
1955 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
1956 // nv50 for word sizes.
1957 //
1958 // CVT(EXTBF(x, byte/word))
1959 // CVT(AND(bytemask, x))
1960 // CVT(AND(bytemask, SHR(x, 8/16/24)))
1961 // CVT(SHR(x, 16/24))
1962 void
1963 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
1964 {
1965 Instruction *insn = cvt->getSrc(0)->getInsn();
1966 ImmediateValue imm;
1967 Value *arg = NULL;
1968 unsigned width, offset;
1969 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
1970 return;
1971 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
1972 width = (imm.reg.data.u32 >> 8) & 0xff;
1973 offset = imm.reg.data.u32 & 0xff;
1974 arg = insn->getSrc(0);
1975
1976 if (width != 8 && width != 16)
1977 return;
1978 if (width == 8 && offset & 0x7)
1979 return;
1980 if (width == 16 && offset & 0xf)
1981 return;
1982 } else if (insn->op == OP_AND) {
1983 int s;
1984 if (insn->src(0).getImmediate(imm))
1985 s = 0;
1986 else if (insn->src(1).getImmediate(imm))
1987 s = 1;
1988 else
1989 return;
1990
1991 if (imm.reg.data.u32 == 0xff)
1992 width = 8;
1993 else if (imm.reg.data.u32 == 0xffff)
1994 width = 16;
1995 else
1996 return;
1997
1998 arg = insn->getSrc(!s);
1999 Instruction *shift = arg->getInsn();
2000 offset = 0;
2001 if (shift && shift->op == OP_SHR &&
2002 shift->sType == cvt->sType &&
2003 shift->src(1).getImmediate(imm) &&
2004 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2005 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2006 arg = shift->getSrc(0);
2007 offset = imm.reg.data.u32;
2008 }
2009 // We just AND'd the high bits away, which means this is effectively an
2010 // unsigned value.
2011 cvt->sType = TYPE_U32;
2012 } else if (insn->op == OP_SHR &&
2013 insn->sType == cvt->sType &&
2014 insn->src(1).getImmediate(imm)) {
2015 arg = insn->getSrc(0);
2016 if (imm.reg.data.u32 == 24) {
2017 width = 8;
2018 offset = 24;
2019 } else if (imm.reg.data.u32 == 16) {
2020 width = 16;
2021 offset = 16;
2022 } else {
2023 return;
2024 }
2025 }
2026
2027 if (!arg)
2028 return;
2029
2030 // Irrespective of what came earlier, we can undo a shift on the argument
2031 // by adjusting the offset.
2032 Instruction *shift = arg->getInsn();
2033 if (shift && shift->op == OP_SHL &&
2034 shift->src(1).getImmediate(imm) &&
2035 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2036 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2037 imm.reg.data.u32 <= offset) {
2038 arg = shift->getSrc(0);
2039 offset -= imm.reg.data.u32;
2040 }
2041
2042 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2043 // annoying to detect them.
2044
2045 if (width == 8) {
2046 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2047 } else {
2048 assert(width == 16);
2049 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2050 }
2051 cvt->setSrc(0, arg);
2052 cvt->subOp = offset >> 3;
2053 }
2054
2055 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2056 void
2057 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2058 {
2059 ImmediateValue imm;
2060 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2061 int s;
2062 Instruction *add;
2063
2064 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2065
2066 // look for ADD (TODO: only count references by non-SUCLAMP)
2067 if (insn->getSrc(0)->refCount() > 1)
2068 return;
2069 add = insn->getSrc(0)->getInsn();
2070 if (!add || add->op != OP_ADD ||
2071 (add->dType != TYPE_U32 &&
2072 add->dType != TYPE_S32))
2073 return;
2074
2075 // look for immediate
2076 for (s = 0; s < 2; ++s)
2077 if (add->src(s).getImmediate(imm))
2078 break;
2079 if (s >= 2)
2080 return;
2081 s = s ? 0 : 1;
2082 // determine if immediate fits
2083 val += imm.reg.data.s32;
2084 if (val > 31 || val < -32)
2085 return;
2086 // determine if other addend fits
2087 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2088 return;
2089
2090 bld.setPosition(insn, false); // make sure bld is init'ed
2091 // replace sources
2092 insn->setSrc(2, bld.mkImm(val));
2093 insn->setSrc(0, add->getSrc(s));
2094 }
2095
2096 // NEG(AND(SET, 1)) -> SET
2097 void
2098 AlgebraicOpt::handleNEG(Instruction *i) {
2099 Instruction *src = i->getSrc(0)->getInsn();
2100 ImmediateValue imm;
2101 int b;
2102
2103 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2104 return;
2105
2106 if (src->src(0).getImmediate(imm))
2107 b = 1;
2108 else if (src->src(1).getImmediate(imm))
2109 b = 0;
2110 else
2111 return;
2112
2113 if (!imm.isInteger(1))
2114 return;
2115
2116 Instruction *set = src->getSrc(b)->getInsn();
2117 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2118 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2119 !isFloatType(set->dType)) {
2120 i->def(0).replace(set->getDef(0), false);
2121 }
2122 }
2123
2124 bool
2125 AlgebraicOpt::visit(BasicBlock *bb)
2126 {
2127 Instruction *next;
2128 for (Instruction *i = bb->getEntry(); i; i = next) {
2129 next = i->next;
2130 switch (i->op) {
2131 case OP_ABS:
2132 handleABS(i);
2133 break;
2134 case OP_ADD:
2135 handleADD(i);
2136 break;
2137 case OP_RCP:
2138 handleRCP(i);
2139 break;
2140 case OP_MIN:
2141 case OP_MAX:
2142 handleMINMAX(i);
2143 break;
2144 case OP_SLCT:
2145 handleSLCT(i);
2146 break;
2147 case OP_AND:
2148 case OP_OR:
2149 case OP_XOR:
2150 handleLOGOP(i);
2151 break;
2152 case OP_CVT:
2153 handleCVT_NEG(i);
2154 handleCVT_CVT(i);
2155 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2156 handleCVT_EXTBF(i);
2157 break;
2158 case OP_SUCLAMP:
2159 handleSUCLAMP(i);
2160 break;
2161 case OP_NEG:
2162 handleNEG(i);
2163 break;
2164 default:
2165 break;
2166 }
2167 }
2168
2169 return true;
2170 }
2171
2172 // =============================================================================
2173
2174 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2175 class LateAlgebraicOpt : public Pass
2176 {
2177 private:
2178 virtual bool visit(Instruction *);
2179
2180 void handleADD(Instruction *);
2181 bool tryADDToSHLADD(Instruction *);
2182 };
2183
2184 void
2185 LateAlgebraicOpt::handleADD(Instruction *add)
2186 {
2187 Value *src0 = add->getSrc(0);
2188 Value *src1 = add->getSrc(1);
2189
2190 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2191 return;
2192
2193 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2194 tryADDToSHLADD(add);
2195 }
2196
2197 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2198 bool
2199 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2200 {
2201 Value *src0 = add->getSrc(0);
2202 Value *src1 = add->getSrc(1);
2203 ImmediateValue imm;
2204 Instruction *shl;
2205 Value *src;
2206 int s;
2207
2208 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2209 || isFloatType(add->dType))
2210 return false;
2211
2212 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2213 s = 0;
2214 else
2215 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2216 s = 1;
2217 else
2218 return false;
2219
2220 src = add->getSrc(s);
2221 shl = src->getUniqueInsn();
2222
2223 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2224 return false;
2225
2226 if (!shl->src(1).getImmediate(imm))
2227 return false;
2228
2229 add->op = OP_SHLADD;
2230 add->setSrc(2, add->src(!s));
2231 // SHL can't have any modifiers, but the ADD source may have had
2232 // one. Preserve it.
2233 add->setSrc(0, shl->getSrc(0));
2234 if (s == 1)
2235 add->src(0).mod = add->src(1).mod;
2236 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2237 add->src(1).mod = Modifier(0);
2238
2239 return true;
2240 }
2241
2242 bool
2243 LateAlgebraicOpt::visit(Instruction *i)
2244 {
2245 switch (i->op) {
2246 case OP_ADD:
2247 handleADD(i);
2248 break;
2249 default:
2250 break;
2251 }
2252
2253 return true;
2254 }
2255
2256 // =============================================================================
2257
2258 // Split 64-bit MUL and MAD
2259 class Split64BitOpPreRA : public Pass
2260 {
2261 private:
2262 virtual bool visit(BasicBlock *);
2263 void split64MulMad(Function *, Instruction *, DataType);
2264
2265 BuildUtil bld;
2266 };
2267
2268 bool
2269 Split64BitOpPreRA::visit(BasicBlock *bb)
2270 {
2271 Instruction *i, *next;
2272 Modifier mod;
2273
2274 for (i = bb->getEntry(); i; i = next) {
2275 next = i->next;
2276
2277 DataType hTy;
2278 switch (i->dType) {
2279 case TYPE_U64: hTy = TYPE_U32; break;
2280 case TYPE_S64: hTy = TYPE_S32; break;
2281 default:
2282 continue;
2283 }
2284
2285 if (i->op == OP_MAD || i->op == OP_MUL)
2286 split64MulMad(func, i, hTy);
2287 }
2288
2289 return true;
2290 }
2291
2292 void
2293 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2294 {
2295 assert(i->op == OP_MAD || i->op == OP_MUL);
2296 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2297 assert(typeSizeof(hTy) == 4);
2298
2299 bld.setPosition(i, true);
2300
2301 Value *zero = bld.mkImm(0u);
2302 Value *carry = bld.getSSA(1, FILE_FLAGS);
2303
2304 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2305 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2306 // gives the following operations:
2307 // * `d.low = low(a.low * b.low) (+ c.low)?`
2308 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2309 // + high(a.low * b.low) (+ c.high)?`
2310 //
2311 // To compute the high bits, we can split in the following operations:
2312 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2313 // * `tmp2 = low(a.low * b.high) + tmp1`
2314 // * `d.high = high(a.low * b.low) + tmp2`
2315 //
2316 // mkSplit put lower bits at index 0 and higher bits at index 1
2317
2318 Value *op1[2];
2319 if (i->getSrc(0)->reg.size == 8)
2320 bld.mkSplit(op1, 4, i->getSrc(0));
2321 else {
2322 op1[0] = i->getSrc(0);
2323 op1[1] = zero;
2324 }
2325 Value *op2[2];
2326 if (i->getSrc(1)->reg.size == 8)
2327 bld.mkSplit(op2, 4, i->getSrc(1));
2328 else {
2329 op2[0] = i->getSrc(1);
2330 op2[1] = zero;
2331 }
2332
2333 Value *op3[2] = { NULL, NULL };
2334 if (i->op == OP_MAD) {
2335 if (i->getSrc(2)->reg.size == 8)
2336 bld.mkSplit(op3, 4, i->getSrc(2));
2337 else {
2338 op3[0] = i->getSrc(2);
2339 op3[1] = zero;
2340 }
2341 }
2342
2343 Value *tmpRes1Hi = bld.getSSA();
2344 if (i->op == OP_MAD)
2345 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2346 else
2347 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2348
2349 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2350
2351 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2352
2353 // If it was a MAD, add the carry from the low bits
2354 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2355 // d.high
2356 if (i->op == OP_MAD)
2357 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2358 else
2359 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2360
2361 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2362 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2363 if (i->op == OP_MAD)
2364 hiPart3->setFlagsSrc(3, carry);
2365
2366 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2367
2368 delete_Instruction(fn->getProgram(), i);
2369 }
2370
2371 // =============================================================================
2372
2373 static inline void
2374 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2375 {
2376 if (offset != ldst->getSrc(0)->reg.data.offset) {
2377 if (ldst->getSrc(0)->refCount() > 1)
2378 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2379 ldst->getSrc(0)->reg.data.offset = offset;
2380 }
2381 }
2382
2383 // Combine loads and stores, forward stores to loads where possible.
2384 class MemoryOpt : public Pass
2385 {
2386 private:
2387 class Record
2388 {
2389 public:
2390 Record *next;
2391 Instruction *insn;
2392 const Value *rel[2];
2393 const Value *base;
2394 int32_t offset;
2395 int8_t fileIndex;
2396 uint8_t size;
2397 bool locked;
2398 Record *prev;
2399
2400 bool overlaps(const Instruction *ldst) const;
2401
2402 inline void link(Record **);
2403 inline void unlink(Record **);
2404 inline void set(const Instruction *ldst);
2405 };
2406
2407 public:
2408 MemoryOpt();
2409
2410 Record *loads[DATA_FILE_COUNT];
2411 Record *stores[DATA_FILE_COUNT];
2412
2413 MemoryPool recordPool;
2414
2415 private:
2416 virtual bool visit(BasicBlock *);
2417 bool runOpt(BasicBlock *);
2418
2419 Record **getList(const Instruction *);
2420
2421 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2422
2423 // merge @insn into load/store instruction from @rec
2424 bool combineLd(Record *rec, Instruction *ld);
2425 bool combineSt(Record *rec, Instruction *st);
2426
2427 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2428 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2429 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2430
2431 void addRecord(Instruction *ldst);
2432 void purgeRecords(Instruction *const st, DataFile);
2433 void lockStores(Instruction *const ld);
2434 void reset();
2435
2436 private:
2437 Record *prevRecord;
2438 };
2439
2440 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2441 {
2442 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2443 loads[i] = NULL;
2444 stores[i] = NULL;
2445 }
2446 prevRecord = NULL;
2447 }
2448
2449 void
2450 MemoryOpt::reset()
2451 {
2452 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2453 Record *it, *next;
2454 for (it = loads[i]; it; it = next) {
2455 next = it->next;
2456 recordPool.release(it);
2457 }
2458 loads[i] = NULL;
2459 for (it = stores[i]; it; it = next) {
2460 next = it->next;
2461 recordPool.release(it);
2462 }
2463 stores[i] = NULL;
2464 }
2465 }
2466
2467 bool
2468 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2469 {
2470 int32_t offRc = rec->offset;
2471 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2472 int sizeRc = rec->size;
2473 int sizeLd = typeSizeof(ld->dType);
2474 int size = sizeRc + sizeLd;
2475 int d, j;
2476
2477 if (!prog->getTarget()->
2478 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2479 return false;
2480 // no unaligned loads
2481 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2482 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2483 return false;
2484 // for compute indirect loads are not guaranteed to be aligned
2485 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2486 return false;
2487
2488 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2489
2490 // lock any stores that overlap with the load being merged into the
2491 // existing record.
2492 lockStores(ld);
2493
2494 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2495
2496 if (offLd < offRc) {
2497 int sz;
2498 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2499 // d: nr of definitions in ld
2500 // j: nr of definitions in rec->insn, move:
2501 for (d = d + j - 1; j > 0; --j, --d)
2502 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2503
2504 if (rec->insn->getSrc(0)->refCount() > 1)
2505 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2506 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2507
2508 d = 0;
2509 } else {
2510 d = j;
2511 }
2512 // move definitions of @ld to @rec->insn
2513 for (j = 0; sizeLd; ++j, ++d) {
2514 sizeLd -= ld->getDef(j)->reg.size;
2515 rec->insn->setDef(d, ld->getDef(j));
2516 }
2517
2518 rec->size = size;
2519 rec->insn->getSrc(0)->reg.size = size;
2520 rec->insn->setType(typeOfSize(size));
2521
2522 delete_Instruction(prog, ld);
2523
2524 return true;
2525 }
2526
2527 bool
2528 MemoryOpt::combineSt(Record *rec, Instruction *st)
2529 {
2530 int32_t offRc = rec->offset;
2531 int32_t offSt = st->getSrc(0)->reg.data.offset;
2532 int sizeRc = rec->size;
2533 int sizeSt = typeSizeof(st->dType);
2534 int s = sizeSt / 4;
2535 int size = sizeRc + sizeSt;
2536 int j, k;
2537 Value *src[4]; // no modifiers in ValueRef allowed for st
2538 Value *extra[3];
2539
2540 if (!prog->getTarget()->
2541 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2542 return false;
2543 // no unaligned stores
2544 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2545 return false;
2546 // for compute indirect stores are not guaranteed to be aligned
2547 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2548 return false;
2549
2550 // remove any existing load/store records for the store being merged into
2551 // the existing record.
2552 purgeRecords(st, DATA_FILE_COUNT);
2553
2554 st->takeExtraSources(0, extra); // save predicate and indirect address
2555
2556 if (offRc < offSt) {
2557 // save values from @st
2558 for (s = 0; sizeSt; ++s) {
2559 sizeSt -= st->getSrc(s + 1)->reg.size;
2560 src[s] = st->getSrc(s + 1);
2561 }
2562 // set record's values as low sources of @st
2563 for (j = 1; sizeRc; ++j) {
2564 sizeRc -= rec->insn->getSrc(j)->reg.size;
2565 st->setSrc(j, rec->insn->getSrc(j));
2566 }
2567 // set saved values as high sources of @st
2568 for (k = j, j = 0; j < s; ++j)
2569 st->setSrc(k++, src[j]);
2570
2571 updateLdStOffset(st, offRc, func);
2572 } else {
2573 for (j = 1; sizeSt; ++j)
2574 sizeSt -= st->getSrc(j)->reg.size;
2575 for (s = 1; sizeRc; ++j, ++s) {
2576 sizeRc -= rec->insn->getSrc(s)->reg.size;
2577 st->setSrc(j, rec->insn->getSrc(s));
2578 }
2579 rec->offset = offSt;
2580 }
2581 st->putExtraSources(0, extra); // restore pointer and predicate
2582
2583 delete_Instruction(prog, rec->insn);
2584 rec->insn = st;
2585 rec->size = size;
2586 rec->insn->getSrc(0)->reg.size = size;
2587 rec->insn->setType(typeOfSize(size));
2588 return true;
2589 }
2590
2591 void
2592 MemoryOpt::Record::set(const Instruction *ldst)
2593 {
2594 const Symbol *mem = ldst->getSrc(0)->asSym();
2595 fileIndex = mem->reg.fileIndex;
2596 rel[0] = ldst->getIndirect(0, 0);
2597 rel[1] = ldst->getIndirect(0, 1);
2598 offset = mem->reg.data.offset;
2599 base = mem->getBase();
2600 size = typeSizeof(ldst->sType);
2601 }
2602
2603 void
2604 MemoryOpt::Record::link(Record **list)
2605 {
2606 next = *list;
2607 if (next)
2608 next->prev = this;
2609 prev = NULL;
2610 *list = this;
2611 }
2612
2613 void
2614 MemoryOpt::Record::unlink(Record **list)
2615 {
2616 if (next)
2617 next->prev = prev;
2618 if (prev)
2619 prev->next = next;
2620 else
2621 *list = next;
2622 }
2623
2624 MemoryOpt::Record **
2625 MemoryOpt::getList(const Instruction *insn)
2626 {
2627 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2628 return &loads[insn->src(0).getFile()];
2629 return &stores[insn->src(0).getFile()];
2630 }
2631
2632 void
2633 MemoryOpt::addRecord(Instruction *i)
2634 {
2635 Record **list = getList(i);
2636 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2637
2638 it->link(list);
2639 it->set(i);
2640 it->insn = i;
2641 it->locked = false;
2642 }
2643
2644 MemoryOpt::Record *
2645 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2646 {
2647 const Symbol *sym = insn->getSrc(0)->asSym();
2648 const int size = typeSizeof(insn->sType);
2649 Record *rec = NULL;
2650 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2651
2652 for (; it; it = it->next) {
2653 if (it->locked && insn->op != OP_LOAD && insn->op != OP_VFETCH)
2654 continue;
2655 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2656 it->rel[0] != insn->getIndirect(0, 0) ||
2657 it->fileIndex != sym->reg.fileIndex ||
2658 it->rel[1] != insn->getIndirect(0, 1))
2659 continue;
2660
2661 if (it->offset < sym->reg.data.offset) {
2662 if (it->offset + it->size >= sym->reg.data.offset) {
2663 isAdj = (it->offset + it->size == sym->reg.data.offset);
2664 if (!isAdj)
2665 return it;
2666 if (!(it->offset & 0x7))
2667 rec = it;
2668 }
2669 } else {
2670 isAdj = it->offset != sym->reg.data.offset;
2671 if (size <= it->size && !isAdj)
2672 return it;
2673 else
2674 if (!(sym->reg.data.offset & 0x7))
2675 if (it->offset - size <= sym->reg.data.offset)
2676 rec = it;
2677 }
2678 }
2679 return rec;
2680 }
2681
2682 bool
2683 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2684 {
2685 Instruction *st = rec->insn;
2686 int32_t offSt = rec->offset;
2687 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2688 int d, s;
2689
2690 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2691 offSt += st->getSrc(s)->reg.size;
2692 if (offSt != offLd)
2693 return false;
2694
2695 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2696 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2697 return false;
2698 if (st->getSrc(s)->reg.file != FILE_GPR)
2699 return false;
2700 ld->def(d).replace(st->src(s), false);
2701 }
2702 ld->bb->remove(ld);
2703 return true;
2704 }
2705
2706 bool
2707 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2708 {
2709 Instruction *ldR = rec->insn;
2710 int32_t offR = rec->offset;
2711 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2712 int dR, dE;
2713
2714 assert(offR <= offE);
2715 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2716 offR += ldR->getDef(dR)->reg.size;
2717 if (offR != offE)
2718 return false;
2719
2720 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2721 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2722 return false;
2723 ldE->def(dE).replace(ldR->getDef(dR), false);
2724 }
2725
2726 delete_Instruction(prog, ldE);
2727 return true;
2728 }
2729
2730 bool
2731 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2732 {
2733 const Instruction *const ri = rec->insn;
2734 Value *extra[3];
2735
2736 int32_t offS = st->getSrc(0)->reg.data.offset;
2737 int32_t offR = rec->offset;
2738 int32_t endS = offS + typeSizeof(st->dType);
2739 int32_t endR = offR + typeSizeof(ri->dType);
2740
2741 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2742
2743 st->takeExtraSources(0, extra);
2744
2745 if (offR < offS) {
2746 Value *vals[10];
2747 int s, n;
2748 int k = 0;
2749 // get non-replaced sources of ri
2750 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2751 vals[k++] = ri->getSrc(s);
2752 n = s;
2753 // get replaced sources of st
2754 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2755 vals[k++] = st->getSrc(s);
2756 // skip replaced sources of ri
2757 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2758 // get non-replaced sources after values covered by st
2759 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2760 vals[k++] = ri->getSrc(s);
2761 assert((unsigned int)k <= ARRAY_SIZE(vals));
2762 for (s = 0; s < k; ++s)
2763 st->setSrc(s + 1, vals[s]);
2764 st->setSrc(0, ri->getSrc(0));
2765 } else
2766 if (endR > endS) {
2767 int j, s;
2768 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2769 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2770 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2771 st->setSrc(s++, ri->getSrc(j));
2772 }
2773 st->putExtraSources(0, extra);
2774
2775 delete_Instruction(prog, rec->insn);
2776
2777 rec->insn = st;
2778 rec->offset = st->getSrc(0)->reg.data.offset;
2779
2780 st->setType(typeOfSize(rec->size));
2781
2782 return true;
2783 }
2784
2785 bool
2786 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2787 {
2788 Record that;
2789 that.set(ldst);
2790
2791 // This assumes that images/buffers can't overlap. They can.
2792 // TODO: Plumb the restrict logic through, and only skip when it's a
2793 // restrict situation, or there can implicitly be no writes.
2794 if (this->fileIndex != that.fileIndex && this->rel[1] == that.rel[1])
2795 return false;
2796
2797 if (this->rel[0] || that.rel[0])
2798 return this->base == that.base;
2799
2800 return
2801 (this->offset < that.offset + that.size) &&
2802 (this->offset + this->size > that.offset);
2803 }
2804
2805 // We must not eliminate stores that affect the result of @ld if
2806 // we find later stores to the same location, and we may no longer
2807 // merge them with later stores.
2808 // The stored value can, however, still be used to determine the value
2809 // returned by future loads.
2810 void
2811 MemoryOpt::lockStores(Instruction *const ld)
2812 {
2813 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2814 if (!r->locked && r->overlaps(ld))
2815 r->locked = true;
2816 }
2817
2818 // Prior loads from the location of @st are no longer valid.
2819 // Stores to the location of @st may no longer be used to derive
2820 // the value at it nor be coalesced into later stores.
2821 void
2822 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
2823 {
2824 if (st)
2825 f = st->src(0).getFile();
2826
2827 for (Record *r = loads[f]; r; r = r->next)
2828 if (!st || r->overlaps(st))
2829 r->unlink(&loads[f]);
2830
2831 for (Record *r = stores[f]; r; r = r->next)
2832 if (!st || r->overlaps(st))
2833 r->unlink(&stores[f]);
2834 }
2835
2836 bool
2837 MemoryOpt::visit(BasicBlock *bb)
2838 {
2839 bool ret = runOpt(bb);
2840 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
2841 // where 96 bit memory operations are forbidden.
2842 if (ret)
2843 ret = runOpt(bb);
2844 return ret;
2845 }
2846
2847 bool
2848 MemoryOpt::runOpt(BasicBlock *bb)
2849 {
2850 Instruction *ldst, *next;
2851 Record *rec;
2852 bool isAdjacent = true;
2853
2854 for (ldst = bb->getEntry(); ldst; ldst = next) {
2855 bool keep = true;
2856 bool isLoad = true;
2857 next = ldst->next;
2858
2859 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
2860 if (ldst->isDead()) {
2861 // might have been produced by earlier optimization
2862 delete_Instruction(prog, ldst);
2863 continue;
2864 }
2865 } else
2866 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
2867 if (typeSizeof(ldst->dType) == 4 &&
2868 ldst->src(1).getFile() == FILE_GPR &&
2869 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
2870 delete_Instruction(prog, ldst);
2871 continue;
2872 }
2873 isLoad = false;
2874 } else {
2875 // TODO: maybe have all fixed ops act as barrier ?
2876 if (ldst->op == OP_CALL ||
2877 ldst->op == OP_BAR ||
2878 ldst->op == OP_MEMBAR) {
2879 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2880 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2881 purgeRecords(NULL, FILE_MEMORY_SHARED);
2882 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2883 } else
2884 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
2885 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
2886 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2887 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2888 purgeRecords(NULL, FILE_MEMORY_SHARED);
2889 } else {
2890 purgeRecords(NULL, ldst->src(0).getFile());
2891 }
2892 } else
2893 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
2894 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2895 }
2896 continue;
2897 }
2898 if (ldst->getPredicate()) // TODO: handle predicated ld/st
2899 continue;
2900 if (ldst->perPatch) // TODO: create separate per-patch lists
2901 continue;
2902
2903 if (isLoad) {
2904 DataFile file = ldst->src(0).getFile();
2905
2906 // if ld l[]/g[] look for previous store to eliminate the reload
2907 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
2908 // TODO: shared memory ?
2909 rec = findRecord(ldst, false, isAdjacent);
2910 if (rec && !isAdjacent)
2911 keep = !replaceLdFromSt(ldst, rec);
2912 }
2913
2914 // or look for ld from the same location and replace this one
2915 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
2916 if (rec) {
2917 if (!isAdjacent)
2918 keep = !replaceLdFromLd(ldst, rec);
2919 else
2920 // or combine a previous load with this one
2921 keep = !combineLd(rec, ldst);
2922 }
2923 if (keep)
2924 lockStores(ldst);
2925 } else {
2926 rec = findRecord(ldst, false, isAdjacent);
2927 if (rec) {
2928 if (!isAdjacent)
2929 keep = !replaceStFromSt(ldst, rec);
2930 else
2931 keep = !combineSt(rec, ldst);
2932 }
2933 if (keep)
2934 purgeRecords(ldst, DATA_FILE_COUNT);
2935 }
2936 if (keep)
2937 addRecord(ldst);
2938 }
2939 reset();
2940
2941 return true;
2942 }
2943
2944 // =============================================================================
2945
2946 // Turn control flow into predicated instructions (after register allocation !).
2947 // TODO:
2948 // Could move this to before register allocation on NVC0 and also handle nested
2949 // constructs.
2950 class FlatteningPass : public Pass
2951 {
2952 private:
2953 virtual bool visit(Function *);
2954 virtual bool visit(BasicBlock *);
2955
2956 bool tryPredicateConditional(BasicBlock *);
2957 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2958 void tryPropagateBranch(BasicBlock *);
2959 inline bool isConstantCondition(Value *pred);
2960 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2961 inline void removeFlow(Instruction *);
2962
2963 uint8_t gpr_unit;
2964 };
2965
2966 bool
2967 FlatteningPass::isConstantCondition(Value *pred)
2968 {
2969 Instruction *insn = pred->getUniqueInsn();
2970 assert(insn);
2971 if (insn->op != OP_SET || insn->srcExists(2))
2972 return false;
2973
2974 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2975 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2976 DataFile file;
2977 if (ld) {
2978 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2979 return false;
2980 if (ld->src(0).isIndirect(0))
2981 return false;
2982 file = ld->src(0).getFile();
2983 } else {
2984 file = insn->src(s).getFile();
2985 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
2986 // in register "units", which can vary between targets.
2987 if (file == FILE_GPR) {
2988 Value *v = insn->getSrc(s);
2989 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
2990 int units = bytes >> gpr_unit;
2991 if (units > prog->maxGPR)
2992 file = FILE_IMMEDIATE;
2993 }
2994 }
2995 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2996 return false;
2997 }
2998 return true;
2999 }
3000
3001 void
3002 FlatteningPass::removeFlow(Instruction *insn)
3003 {
3004 FlowInstruction *term = insn ? insn->asFlow() : NULL;
3005 if (!term)
3006 return;
3007 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
3008
3009 if (term->op == OP_BRA) {
3010 // TODO: this might get more difficult when we get arbitrary BRAs
3011 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
3012 return;
3013 } else
3014 if (term->op != OP_JOIN)
3015 return;
3016
3017 Value *pred = term->getPredicate();
3018
3019 delete_Instruction(prog, term);
3020
3021 if (pred && pred->refCount() == 0) {
3022 Instruction *pSet = pred->getUniqueInsn();
3023 pred->join->reg.data.id = -1; // deallocate
3024 if (pSet->isDead())
3025 delete_Instruction(prog, pSet);
3026 }
3027 }
3028
3029 void
3030 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3031 {
3032 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3033 if (i->isNop())
3034 continue;
3035 assert(!i->getPredicate());
3036 i->setPredicate(cc, pred);
3037 }
3038 removeFlow(bb->getExit());
3039 }
3040
3041 bool
3042 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3043 {
3044 if (insn->isPseudo())
3045 return true;
3046 // TODO: calls where we don't know which registers are modified
3047
3048 if (!prog->getTarget()->mayPredicate(insn, pred))
3049 return false;
3050 for (int d = 0; insn->defExists(d); ++d)
3051 if (insn->getDef(d)->equals(pred))
3052 return false;
3053 return true;
3054 }
3055
3056 // If we jump to BRA/RET/EXIT, replace the jump with it.
3057 // NOTE: We do not update the CFG anymore here !
3058 //
3059 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3060 // BB:0
3061 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3062 // BB1:
3063 // bra BB:3
3064 // BB2:
3065 // ...
3066 // BB3:
3067 // ...
3068 void
3069 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3070 {
3071 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3072 BasicBlock *bf = i->asFlow()->target.bb;
3073
3074 if (bf->getInsnCount() != 1)
3075 continue;
3076
3077 FlowInstruction *bra = i->asFlow();
3078 FlowInstruction *rep = bf->getExit()->asFlow();
3079
3080 if (!rep || rep->getPredicate())
3081 continue;
3082 if (rep->op != OP_BRA &&
3083 rep->op != OP_JOIN &&
3084 rep->op != OP_EXIT)
3085 continue;
3086
3087 // TODO: If there are multiple branches to @rep, only the first would
3088 // be replaced, so only remove them after this pass is done ?
3089 // Also, need to check all incident blocks for fall-through exits and
3090 // add the branch there.
3091 bra->op = rep->op;
3092 bra->target.bb = rep->target.bb;
3093 if (bf->cfg.incidentCount() == 1)
3094 bf->remove(rep);
3095 }
3096 }
3097
3098 bool
3099 FlatteningPass::visit(Function *fn)
3100 {
3101 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3102
3103 return true;
3104 }
3105
3106 bool
3107 FlatteningPass::visit(BasicBlock *bb)
3108 {
3109 if (tryPredicateConditional(bb))
3110 return true;
3111
3112 // try to attach join to previous instruction
3113 if (prog->getTarget()->hasJoin) {
3114 Instruction *insn = bb->getExit();
3115 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3116 insn = insn->prev;
3117 if (insn && !insn->getPredicate() &&
3118 !insn->asFlow() &&
3119 insn->op != OP_DISCARD &&
3120 insn->op != OP_TEXBAR &&
3121 !isTextureOp(insn->op) && // probably just nve4
3122 !isSurfaceOp(insn->op) && // not confirmed
3123 insn->op != OP_LINTERP && // probably just nve4
3124 insn->op != OP_PINTERP && // probably just nve4
3125 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3126 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3127 !insn->isNop()) {
3128 insn->join = 1;
3129 bb->remove(bb->getExit());
3130 return true;
3131 }
3132 }
3133 }
3134
3135 tryPropagateBranch(bb);
3136
3137 return true;
3138 }
3139
3140 bool
3141 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3142 {
3143 BasicBlock *bL = NULL, *bR = NULL;
3144 unsigned int nL = 0, nR = 0, limit = 12;
3145 Instruction *insn;
3146 unsigned int mask;
3147
3148 mask = bb->initiatesSimpleConditional();
3149 if (!mask)
3150 return false;
3151
3152 assert(bb->getExit());
3153 Value *pred = bb->getExit()->getPredicate();
3154 assert(pred);
3155
3156 if (isConstantCondition(pred))
3157 limit = 4;
3158
3159 Graph::EdgeIterator ei = bb->cfg.outgoing();
3160
3161 if (mask & 1) {
3162 bL = BasicBlock::get(ei.getNode());
3163 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3164 if (!mayPredicate(insn, pred))
3165 return false;
3166 if (nL > limit)
3167 return false; // too long, do a real branch
3168 }
3169 ei.next();
3170
3171 if (mask & 2) {
3172 bR = BasicBlock::get(ei.getNode());
3173 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3174 if (!mayPredicate(insn, pred))
3175 return false;
3176 if (nR > limit)
3177 return false; // too long, do a real branch
3178 }
3179
3180 if (bL)
3181 predicateInstructions(bL, pred, bb->getExit()->cc);
3182 if (bR)
3183 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3184
3185 if (bb->joinAt) {
3186 bb->remove(bb->joinAt);
3187 bb->joinAt = NULL;
3188 }
3189 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3190
3191 // remove potential join operations at the end of the conditional
3192 if (prog->getTarget()->joinAnterior) {
3193 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3194 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3195 removeFlow(bb->getEntry());
3196 }
3197
3198 return true;
3199 }
3200
3201 // =============================================================================
3202
3203 // Fold Immediate into MAD; must be done after register allocation due to
3204 // constraint SDST == SSRC2
3205 // TODO:
3206 // Does NVC0+ have other situations where this pass makes sense?
3207 class PostRaLoadPropagation : public Pass
3208 {
3209 private:
3210 virtual bool visit(Instruction *);
3211
3212 void handleMADforNV50(Instruction *);
3213 void handleMADforNVC0(Instruction *);
3214 };
3215
3216 static bool
3217 post_ra_dead(Instruction *i)
3218 {
3219 for (int d = 0; i->defExists(d); ++d)
3220 if (i->getDef(d)->refCount())
3221 return false;
3222 return true;
3223 }
3224
3225 // Fold Immediate into MAD; must be done after register allocation due to
3226 // constraint SDST == SSRC2
3227 void
3228 PostRaLoadPropagation::handleMADforNV50(Instruction *i)
3229 {
3230 if (i->def(0).getFile() != FILE_GPR ||
3231 i->src(0).getFile() != FILE_GPR ||
3232 i->src(1).getFile() != FILE_GPR ||
3233 i->src(2).getFile() != FILE_GPR ||
3234 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3235 return;
3236
3237 if (i->getDef(0)->reg.data.id >= 64 ||
3238 i->getSrc(0)->reg.data.id >= 64)
3239 return;
3240
3241 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3242 return;
3243
3244 if (i->getPredicate())
3245 return;
3246
3247 Value *vtmp;
3248 Instruction *def = i->getSrc(1)->getInsn();
3249
3250 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3251 def = def->getSrc(0)->getInsn();
3252 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3253 vtmp = i->getSrc(1);
3254 if (isFloatType(i->sType)) {
3255 i->setSrc(1, def->getSrc(0));
3256 } else {
3257 ImmediateValue val;
3258 bool ret = def->src(0).getImmediate(val);
3259 assert(ret);
3260 if (i->getSrc(1)->reg.data.id & 1)
3261 val.reg.data.u32 >>= 16;
3262 val.reg.data.u32 &= 0xffff;
3263 i->setSrc(1, new_ImmediateValue(prog, val.reg.data.u32));
3264 }
3265
3266 /* There's no post-RA dead code elimination, so do it here
3267 * XXX: if we add more code-removing post-RA passes, we might
3268 * want to create a post-RA dead-code elim pass */
3269 if (post_ra_dead(vtmp->getInsn())) {
3270 Value *src = vtmp->getInsn()->getSrc(0);
3271 // Careful -- splits will have already been removed from the
3272 // functions. Don't double-delete.
3273 if (vtmp->getInsn()->bb)
3274 delete_Instruction(prog, vtmp->getInsn());
3275 if (src->getInsn() && post_ra_dead(src->getInsn()))
3276 delete_Instruction(prog, src->getInsn());
3277 }
3278 }
3279 }
3280
3281 void
3282 PostRaLoadPropagation::handleMADforNVC0(Instruction *i)
3283 {
3284 if (i->def(0).getFile() != FILE_GPR ||
3285 i->src(0).getFile() != FILE_GPR ||
3286 i->src(1).getFile() != FILE_GPR ||
3287 i->src(2).getFile() != FILE_GPR ||
3288 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3289 return;
3290
3291 // TODO: gm107 can also do this for S32, maybe other chipsets as well
3292 if (i->dType != TYPE_F32)
3293 return;
3294
3295 if ((i->src(2).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3296 return;
3297
3298 ImmediateValue val;
3299 int s;
3300
3301 if (i->src(0).getImmediate(val))
3302 s = 1;
3303 else if (i->src(1).getImmediate(val))
3304 s = 0;
3305 else
3306 return;
3307
3308 if ((i->src(s).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3309 return;
3310
3311 if (s == 1)
3312 i->swapSources(0, 1);
3313
3314 Instruction *imm = i->getSrc(1)->getInsn();
3315 i->setSrc(1, imm->getSrc(0));
3316 if (post_ra_dead(imm))
3317 delete_Instruction(prog, imm);
3318 }
3319
3320 bool
3321 PostRaLoadPropagation::visit(Instruction *i)
3322 {
3323 switch (i->op) {
3324 case OP_FMA:
3325 case OP_MAD:
3326 if (prog->getTarget()->getChipset() < 0xc0)
3327 handleMADforNV50(i);
3328 else
3329 handleMADforNVC0(i);
3330 break;
3331 default:
3332 break;
3333 }
3334
3335 return true;
3336 }
3337
3338 // =============================================================================
3339
3340 // Common subexpression elimination. Stupid O^2 implementation.
3341 class LocalCSE : public Pass
3342 {
3343 private:
3344 virtual bool visit(BasicBlock *);
3345
3346 inline bool tryReplace(Instruction **, Instruction *);
3347
3348 DLList ops[OP_LAST + 1];
3349 };
3350
3351 class GlobalCSE : public Pass
3352 {
3353 private:
3354 virtual bool visit(BasicBlock *);
3355 };
3356
3357 bool
3358 Instruction::isActionEqual(const Instruction *that) const
3359 {
3360 if (this->op != that->op ||
3361 this->dType != that->dType ||
3362 this->sType != that->sType)
3363 return false;
3364 if (this->cc != that->cc)
3365 return false;
3366
3367 if (this->asTex()) {
3368 if (memcmp(&this->asTex()->tex,
3369 &that->asTex()->tex,
3370 sizeof(this->asTex()->tex)))
3371 return false;
3372 } else
3373 if (this->asCmp()) {
3374 if (this->asCmp()->setCond != that->asCmp()->setCond)
3375 return false;
3376 } else
3377 if (this->asFlow()) {
3378 return false;
3379 } else {
3380 if (this->ipa != that->ipa ||
3381 this->lanes != that->lanes ||
3382 this->perPatch != that->perPatch)
3383 return false;
3384 if (this->postFactor != that->postFactor)
3385 return false;
3386 }
3387
3388 if (this->subOp != that->subOp ||
3389 this->saturate != that->saturate ||
3390 this->rnd != that->rnd ||
3391 this->ftz != that->ftz ||
3392 this->dnz != that->dnz ||
3393 this->cache != that->cache ||
3394 this->mask != that->mask)
3395 return false;
3396
3397 return true;
3398 }
3399
3400 bool
3401 Instruction::isResultEqual(const Instruction *that) const
3402 {
3403 unsigned int d, s;
3404
3405 // NOTE: location of discard only affects tex with liveOnly and quadops
3406 if (!this->defExists(0) && this->op != OP_DISCARD)
3407 return false;
3408
3409 if (!isActionEqual(that))
3410 return false;
3411
3412 if (this->predSrc != that->predSrc)
3413 return false;
3414
3415 for (d = 0; this->defExists(d); ++d) {
3416 if (!that->defExists(d) ||
3417 !this->getDef(d)->equals(that->getDef(d), false))
3418 return false;
3419 }
3420 if (that->defExists(d))
3421 return false;
3422
3423 for (s = 0; this->srcExists(s); ++s) {
3424 if (!that->srcExists(s))
3425 return false;
3426 if (this->src(s).mod != that->src(s).mod)
3427 return false;
3428 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3429 return false;
3430 }
3431 if (that->srcExists(s))
3432 return false;
3433
3434 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3435 switch (src(0).getFile()) {
3436 case FILE_MEMORY_CONST:
3437 case FILE_SHADER_INPUT:
3438 return true;
3439 case FILE_SHADER_OUTPUT:
3440 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3441 default:
3442 return false;
3443 }
3444 }
3445
3446 return true;
3447 }
3448
3449 // pull through common expressions from different in-blocks
3450 bool
3451 GlobalCSE::visit(BasicBlock *bb)
3452 {
3453 Instruction *phi, *next, *ik;
3454 int s;
3455
3456 // TODO: maybe do this with OP_UNION, too
3457
3458 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3459 next = phi->next;
3460 if (phi->getSrc(0)->refCount() > 1)
3461 continue;
3462 ik = phi->getSrc(0)->getInsn();
3463 if (!ik)
3464 continue; // probably a function input
3465 if (ik->defCount(0xff) > 1)
3466 continue; // too painful to check if we can really push this forward
3467 for (s = 1; phi->srcExists(s); ++s) {
3468 if (phi->getSrc(s)->refCount() > 1)
3469 break;
3470 if (!phi->getSrc(s)->getInsn() ||
3471 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3472 break;
3473 }
3474 if (!phi->srcExists(s)) {
3475 Instruction *entry = bb->getEntry();
3476 ik->bb->remove(ik);
3477 if (!entry || entry->op != OP_JOIN)
3478 bb->insertHead(ik);
3479 else
3480 bb->insertAfter(entry, ik);
3481 ik->setDef(0, phi->getDef(0));
3482 delete_Instruction(prog, phi);
3483 }
3484 }
3485
3486 return true;
3487 }
3488
3489 bool
3490 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3491 {
3492 Instruction *old = *ptr;
3493
3494 // TODO: maybe relax this later (causes trouble with OP_UNION)
3495 if (i->isPredicated())
3496 return false;
3497
3498 if (!old->isResultEqual(i))
3499 return false;
3500
3501 for (int d = 0; old->defExists(d); ++d)
3502 old->def(d).replace(i->getDef(d), false);
3503 delete_Instruction(prog, old);
3504 *ptr = NULL;
3505 return true;
3506 }
3507
3508 bool
3509 LocalCSE::visit(BasicBlock *bb)
3510 {
3511 unsigned int replaced;
3512
3513 do {
3514 Instruction *ir, *next;
3515
3516 replaced = 0;
3517
3518 // will need to know the order of instructions
3519 int serial = 0;
3520 for (ir = bb->getFirst(); ir; ir = ir->next)
3521 ir->serial = serial++;
3522
3523 for (ir = bb->getFirst(); ir; ir = next) {
3524 int s;
3525 Value *src = NULL;
3526
3527 next = ir->next;
3528
3529 if (ir->fixed) {
3530 ops[ir->op].insert(ir);
3531 continue;
3532 }
3533
3534 for (s = 0; ir->srcExists(s); ++s)
3535 if (ir->getSrc(s)->asLValue())
3536 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3537 src = ir->getSrc(s);
3538
3539 if (src) {
3540 for (Value::UseIterator it = src->uses.begin();
3541 it != src->uses.end(); ++it) {
3542 Instruction *ik = (*it)->getInsn();
3543 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3544 if (tryReplace(&ir, ik))
3545 break;
3546 }
3547 } else {
3548 DLLIST_FOR_EACH(&ops[ir->op], iter)
3549 {
3550 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3551 if (tryReplace(&ir, ik))
3552 break;
3553 }
3554 }
3555
3556 if (ir)
3557 ops[ir->op].insert(ir);
3558 else
3559 ++replaced;
3560 }
3561 for (unsigned int i = 0; i <= OP_LAST; ++i)
3562 ops[i].clear();
3563
3564 } while (replaced);
3565
3566 return true;
3567 }
3568
3569 // =============================================================================
3570
3571 // Remove computations of unused values.
3572 class DeadCodeElim : public Pass
3573 {
3574 public:
3575 bool buryAll(Program *);
3576
3577 private:
3578 virtual bool visit(BasicBlock *);
3579
3580 void checkSplitLoad(Instruction *ld); // for partially dead loads
3581
3582 unsigned int deadCount;
3583 };
3584
3585 bool
3586 DeadCodeElim::buryAll(Program *prog)
3587 {
3588 do {
3589 deadCount = 0;
3590 if (!this->run(prog, false, false))
3591 return false;
3592 } while (deadCount);
3593
3594 return true;
3595 }
3596
3597 bool
3598 DeadCodeElim::visit(BasicBlock *bb)
3599 {
3600 Instruction *prev;
3601
3602 for (Instruction *i = bb->getExit(); i; i = prev) {
3603 prev = i->prev;
3604 if (i->isDead()) {
3605 ++deadCount;
3606 delete_Instruction(prog, i);
3607 } else
3608 if (i->defExists(1) &&
3609 i->subOp == 0 &&
3610 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3611 checkSplitLoad(i);
3612 } else
3613 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3614 if (i->op == OP_ATOM ||
3615 i->op == OP_SUREDP ||
3616 i->op == OP_SUREDB) {
3617 i->setDef(0, NULL);
3618 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3619 i->cache = CACHE_CV;
3620 i->op = OP_STORE;
3621 i->subOp = 0;
3622 }
3623 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3624 i->setDef(0, i->getDef(1));
3625 i->setDef(1, NULL);
3626 }
3627 }
3628 }
3629 return true;
3630 }
3631
3632 // Each load can go into up to 4 destinations, any of which might potentially
3633 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3634 // of where the holes are. We find the first contiguous region, put it into
3635 // the first load, and then put the second contiguous region into the second
3636 // load. There can be at most 2 contiguous regions.
3637 //
3638 // Note that there are some restrictions, for example it's not possible to do
3639 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3640 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3641 // split into a 64-bit and 32-bit load.
3642 void
3643 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3644 {
3645 Instruction *ld2 = NULL; // can get at most 2 loads
3646 Value *def1[4];
3647 Value *def2[4];
3648 int32_t addr1, addr2;
3649 int32_t size1, size2;
3650 int d, n1, n2;
3651 uint32_t mask = 0xffffffff;
3652
3653 for (d = 0; ld1->defExists(d); ++d)
3654 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3655 mask &= ~(1 << d);
3656 if (mask == 0xffffffff)
3657 return;
3658
3659 addr1 = ld1->getSrc(0)->reg.data.offset;
3660 n1 = n2 = 0;
3661 size1 = size2 = 0;
3662
3663 // Compute address/width for first load
3664 for (d = 0; ld1->defExists(d); ++d) {
3665 if (mask & (1 << d)) {
3666 if (size1 && (addr1 & 0x7))
3667 break;
3668 def1[n1] = ld1->getDef(d);
3669 size1 += def1[n1++]->reg.size;
3670 } else
3671 if (!n1) {
3672 addr1 += ld1->getDef(d)->reg.size;
3673 } else {
3674 break;
3675 }
3676 }
3677
3678 // Scale back the size of the first load until it can be loaded. This
3679 // typically happens for TYPE_B96 loads.
3680 while (n1 &&
3681 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3682 typeOfSize(size1))) {
3683 size1 -= def1[--n1]->reg.size;
3684 d--;
3685 }
3686
3687 // Compute address/width for second load
3688 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3689 if (mask & (1 << d)) {
3690 assert(!size2 || !(addr2 & 0x7));
3691 def2[n2] = ld1->getDef(d);
3692 size2 += def2[n2++]->reg.size;
3693 } else if (!n2) {
3694 assert(!n2);
3695 addr2 += ld1->getDef(d)->reg.size;
3696 } else {
3697 break;
3698 }
3699 }
3700
3701 // Make sure that we've processed all the values
3702 for (; ld1->defExists(d); ++d)
3703 assert(!(mask & (1 << d)));
3704
3705 updateLdStOffset(ld1, addr1, func);
3706 ld1->setType(typeOfSize(size1));
3707 for (d = 0; d < 4; ++d)
3708 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3709
3710 if (!n2)
3711 return;
3712
3713 ld2 = cloneShallow(func, ld1);
3714 updateLdStOffset(ld2, addr2, func);
3715 ld2->setType(typeOfSize(size2));
3716 for (d = 0; d < 4; ++d)
3717 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3718
3719 ld1->bb->insertAfter(ld1, ld2);
3720 }
3721
3722 // =============================================================================
3723
3724 #define RUN_PASS(l, n, f) \
3725 if (level >= (l)) { \
3726 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3727 INFO("PEEPHOLE: %s\n", #n); \
3728 n pass; \
3729 if (!pass.f(this)) \
3730 return false; \
3731 }
3732
3733 bool
3734 Program::optimizeSSA(int level)
3735 {
3736 RUN_PASS(1, DeadCodeElim, buryAll);
3737 RUN_PASS(1, CopyPropagation, run);
3738 RUN_PASS(1, MergeSplits, run);
3739 RUN_PASS(2, GlobalCSE, run);
3740 RUN_PASS(1, LocalCSE, run);
3741 RUN_PASS(2, AlgebraicOpt, run);
3742 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
3743 RUN_PASS(1, ConstantFolding, foldAll);
3744 RUN_PASS(2, LateAlgebraicOpt, run);
3745 RUN_PASS(1, Split64BitOpPreRA, run);
3746 RUN_PASS(1, LoadPropagation, run);
3747 RUN_PASS(1, IndirectPropagation, run);
3748 RUN_PASS(2, MemoryOpt, run);
3749 RUN_PASS(2, LocalCSE, run);
3750 RUN_PASS(0, DeadCodeElim, buryAll);
3751
3752 return true;
3753 }
3754
3755 bool
3756 Program::optimizePostRA(int level)
3757 {
3758 RUN_PASS(2, FlatteningPass, run);
3759 RUN_PASS(2, PostRaLoadPropagation, run);
3760
3761 return true;
3762 }
3763
3764 }