bfdb923379b65b25b2e05c41ed230310d4306659
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative) {
195 if (insn->op != OP_SET && insn->op != OP_SLCT &&
196 insn->op != OP_SUB && insn->op != OP_XMAD)
197 return;
198 // XMAD is only commutative if both the CBCC and MRG flags are not set.
199 if (insn->op == OP_XMAD &&
200 (insn->subOp & NV50_IR_SUBOP_XMAD_CMODE_MASK) == NV50_IR_SUBOP_XMAD_CBCC)
201 return;
202 if (insn->op == OP_XMAD && (insn->subOp & NV50_IR_SUBOP_XMAD_MRG))
203 return;
204 }
205 if (insn->src(1).getFile() != FILE_GPR)
206 return;
207 // This is the special OP_SET used for alphatesting, we can't reverse its
208 // arguments as that will confuse the fixup code.
209 if (insn->op == OP_SET && insn->subOp)
210 return;
211
212 Instruction *i0 = insn->getSrc(0)->getInsn();
213 Instruction *i1 = insn->getSrc(1)->getInsn();
214
215 // Swap sources to inline the less frequently used source. That way,
216 // optimistically, it will eventually be able to remove the instruction.
217 int i0refs = insn->getSrc(0)->refCount();
218 int i1refs = insn->getSrc(1)->refCount();
219
220 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
221 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
222 !targ->insnCanLoad(insn, 1, i1) ||
223 i0refs < i1refs)
224 insn->swapSources(0, 1);
225 else
226 return;
227 } else
228 if (isAttribOrSharedLoad(i1)) {
229 if (!isAttribOrSharedLoad(i0))
230 insn->swapSources(0, 1);
231 else
232 return;
233 } else {
234 return;
235 }
236
237 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
238 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
239 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
240 else
241 if (insn->op == OP_SLCT)
242 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
243 else
244 if (insn->op == OP_SUB) {
245 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
246 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
247 } else
248 if (insn->op == OP_XMAD) {
249 // swap h1 flags
250 uint16_t h1 = (insn->subOp >> 1 & NV50_IR_SUBOP_XMAD_H1(0)) |
251 (insn->subOp << 1 & NV50_IR_SUBOP_XMAD_H1(1));
252 insn->subOp = (insn->subOp & ~NV50_IR_SUBOP_XMAD_H1_MASK) | h1;
253 }
254 }
255
256 bool
257 LoadPropagation::visit(BasicBlock *bb)
258 {
259 const Target *targ = prog->getTarget();
260 Instruction *next;
261
262 for (Instruction *i = bb->getEntry(); i; i = next) {
263 next = i->next;
264
265 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
266 continue;
267
268 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
269 continue;
270
271 if (i->srcExists(1))
272 checkSwapSrc01(i);
273
274 for (int s = 0; i->srcExists(s); ++s) {
275 Instruction *ld = i->getSrc(s)->getInsn();
276
277 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
278 continue;
279 if (!targ->insnCanLoad(i, s, ld))
280 continue;
281
282 // propagate !
283 i->setSrc(s, ld->getSrc(0));
284 if (ld->src(0).isIndirect(0))
285 i->setIndirect(s, 0, ld->getIndirect(0, 0));
286
287 if (ld->getDef(0)->refCount() == 0)
288 delete_Instruction(prog, ld);
289 }
290 }
291 return true;
292 }
293
294 // =============================================================================
295
296 class IndirectPropagation : public Pass
297 {
298 private:
299 virtual bool visit(BasicBlock *);
300
301 BuildUtil bld;
302 };
303
304 bool
305 IndirectPropagation::visit(BasicBlock *bb)
306 {
307 const Target *targ = prog->getTarget();
308 Instruction *next;
309
310 for (Instruction *i = bb->getEntry(); i; i = next) {
311 next = i->next;
312
313 bld.setPosition(i, false);
314
315 for (int s = 0; i->srcExists(s); ++s) {
316 Instruction *insn;
317 ImmediateValue imm;
318 if (!i->src(s).isIndirect(0))
319 continue;
320 insn = i->getIndirect(s, 0)->getInsn();
321 if (!insn)
322 continue;
323 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
324 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
325 !insn->src(1).getImmediate(imm) ||
326 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
327 continue;
328 i->setIndirect(s, 0, insn->getSrc(0));
329 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
330 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
331 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
332 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
333 !insn->src(1).getImmediate(imm) ||
334 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
335 continue;
336 i->setIndirect(s, 0, insn->getSrc(0));
337 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
338 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
339 } else if (insn->op == OP_MOV) {
340 if (!insn->src(0).getImmediate(imm) ||
341 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
342 continue;
343 i->setIndirect(s, 0, NULL);
344 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
345 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
346 } else if (insn->op == OP_SHLADD) {
347 if (!insn->src(2).getImmediate(imm) ||
348 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
349 continue;
350 i->setIndirect(s, 0, bld.mkOp2v(
351 OP_SHL, TYPE_U32, bld.getSSA(), insn->getSrc(0), insn->getSrc(1)));
352 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
353 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
354 }
355 }
356 }
357 return true;
358 }
359
360 // =============================================================================
361
362 // Evaluate constant expressions.
363 class ConstantFolding : public Pass
364 {
365 public:
366 bool foldAll(Program *);
367
368 private:
369 virtual bool visit(BasicBlock *);
370
371 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
372 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
373 /* true if i was deleted */
374 bool opnd(Instruction *i, ImmediateValue&, int s);
375 void opnd3(Instruction *, ImmediateValue&);
376
377 void unary(Instruction *, const ImmediateValue&);
378
379 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
380
381 CmpInstruction *findOriginForTestWithZero(Value *);
382
383 bool createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c);
384
385 unsigned int foldCount;
386
387 BuildUtil bld;
388 };
389
390 // TODO: remember generated immediates and only revisit these
391 bool
392 ConstantFolding::foldAll(Program *prog)
393 {
394 unsigned int iterCount = 0;
395 do {
396 foldCount = 0;
397 if (!run(prog))
398 return false;
399 } while (foldCount && ++iterCount < 2);
400 return true;
401 }
402
403 bool
404 ConstantFolding::visit(BasicBlock *bb)
405 {
406 Instruction *i, *next;
407
408 for (i = bb->getEntry(); i; i = next) {
409 next = i->next;
410 if (i->op == OP_MOV || i->op == OP_CALL)
411 continue;
412
413 ImmediateValue src0, src1, src2;
414
415 if (i->srcExists(2) &&
416 i->src(0).getImmediate(src0) &&
417 i->src(1).getImmediate(src1) &&
418 i->src(2).getImmediate(src2)) {
419 expr(i, src0, src1, src2);
420 } else
421 if (i->srcExists(1) &&
422 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1)) {
423 expr(i, src0, src1);
424 } else
425 if (i->srcExists(0) && i->src(0).getImmediate(src0)) {
426 if (opnd(i, src0, 0))
427 continue;
428 } else
429 if (i->srcExists(1) && i->src(1).getImmediate(src1)) {
430 if (opnd(i, src1, 1))
431 continue;
432 }
433 if (i->srcExists(2) && i->src(2).getImmediate(src2))
434 opnd3(i, src2);
435 }
436 return true;
437 }
438
439 CmpInstruction *
440 ConstantFolding::findOriginForTestWithZero(Value *value)
441 {
442 if (!value)
443 return NULL;
444 Instruction *insn = value->getInsn();
445 if (!insn)
446 return NULL;
447
448 if (insn->asCmp() && insn->op != OP_SLCT)
449 return insn->asCmp();
450
451 /* Sometimes mov's will sneak in as a result of other folding. This gets
452 * cleaned up later.
453 */
454 if (insn->op == OP_MOV)
455 return findOriginForTestWithZero(insn->getSrc(0));
456
457 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
458 if (insn->op == OP_AND) {
459 int s = 0;
460 ImmediateValue imm;
461 if (!insn->src(s).getImmediate(imm)) {
462 s = 1;
463 if (!insn->src(s).getImmediate(imm))
464 return NULL;
465 }
466 if (imm.reg.data.f32 != 1.0f)
467 return NULL;
468 /* TODO: Come up with a way to handle the condition being inverted */
469 if (insn->src(!s).mod != Modifier(0))
470 return NULL;
471 return findOriginForTestWithZero(insn->getSrc(!s));
472 }
473
474 return NULL;
475 }
476
477 void
478 Modifier::applyTo(ImmediateValue& imm) const
479 {
480 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
481 return;
482 switch (imm.reg.type) {
483 case TYPE_F32:
484 if (bits & NV50_IR_MOD_ABS)
485 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
486 if (bits & NV50_IR_MOD_NEG)
487 imm.reg.data.f32 = -imm.reg.data.f32;
488 if (bits & NV50_IR_MOD_SAT) {
489 if (imm.reg.data.f32 < 0.0f)
490 imm.reg.data.f32 = 0.0f;
491 else
492 if (imm.reg.data.f32 > 1.0f)
493 imm.reg.data.f32 = 1.0f;
494 }
495 assert(!(bits & NV50_IR_MOD_NOT));
496 break;
497
498 case TYPE_S8: // NOTE: will be extended
499 case TYPE_S16:
500 case TYPE_S32:
501 case TYPE_U8: // NOTE: treated as signed
502 case TYPE_U16:
503 case TYPE_U32:
504 if (bits & NV50_IR_MOD_ABS)
505 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
506 imm.reg.data.s32 : -imm.reg.data.s32;
507 if (bits & NV50_IR_MOD_NEG)
508 imm.reg.data.s32 = -imm.reg.data.s32;
509 if (bits & NV50_IR_MOD_NOT)
510 imm.reg.data.s32 = ~imm.reg.data.s32;
511 break;
512
513 case TYPE_F64:
514 if (bits & NV50_IR_MOD_ABS)
515 imm.reg.data.f64 = fabs(imm.reg.data.f64);
516 if (bits & NV50_IR_MOD_NEG)
517 imm.reg.data.f64 = -imm.reg.data.f64;
518 if (bits & NV50_IR_MOD_SAT) {
519 if (imm.reg.data.f64 < 0.0)
520 imm.reg.data.f64 = 0.0;
521 else
522 if (imm.reg.data.f64 > 1.0)
523 imm.reg.data.f64 = 1.0;
524 }
525 assert(!(bits & NV50_IR_MOD_NOT));
526 break;
527
528 default:
529 assert(!"invalid/unhandled type");
530 imm.reg.data.u64 = 0;
531 break;
532 }
533 }
534
535 operation
536 Modifier::getOp() const
537 {
538 switch (bits) {
539 case NV50_IR_MOD_ABS: return OP_ABS;
540 case NV50_IR_MOD_NEG: return OP_NEG;
541 case NV50_IR_MOD_SAT: return OP_SAT;
542 case NV50_IR_MOD_NOT: return OP_NOT;
543 case 0:
544 return OP_MOV;
545 default:
546 return OP_CVT;
547 }
548 }
549
550 void
551 ConstantFolding::expr(Instruction *i,
552 ImmediateValue &imm0, ImmediateValue &imm1)
553 {
554 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
555 struct Storage res;
556 DataType type = i->dType;
557
558 memset(&res.data, 0, sizeof(res.data));
559
560 switch (i->op) {
561 case OP_MAD:
562 case OP_FMA:
563 case OP_MUL:
564 if (i->dnz && i->dType == TYPE_F32) {
565 if (!isfinite(a->data.f32))
566 a->data.f32 = 0.0f;
567 if (!isfinite(b->data.f32))
568 b->data.f32 = 0.0f;
569 }
570 switch (i->dType) {
571 case TYPE_F32:
572 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
573 break;
574 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
575 case TYPE_S32:
576 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
577 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
578 break;
579 }
580 /* fallthrough */
581 case TYPE_U32:
582 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
583 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
584 break;
585 }
586 res.data.u32 = a->data.u32 * b->data.u32; break;
587 default:
588 return;
589 }
590 break;
591 case OP_DIV:
592 if (b->data.u32 == 0)
593 break;
594 switch (i->dType) {
595 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
596 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
597 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
598 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
599 default:
600 return;
601 }
602 break;
603 case OP_ADD:
604 switch (i->dType) {
605 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
606 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
607 case TYPE_S32:
608 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
609 default:
610 return;
611 }
612 break;
613 case OP_SUB:
614 switch (i->dType) {
615 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
616 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
617 case TYPE_S32:
618 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
619 default:
620 return;
621 }
622 break;
623 case OP_POW:
624 switch (i->dType) {
625 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
626 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
627 default:
628 return;
629 }
630 break;
631 case OP_MAX:
632 switch (i->dType) {
633 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
634 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
635 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
636 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
637 default:
638 return;
639 }
640 break;
641 case OP_MIN:
642 switch (i->dType) {
643 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
644 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
645 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
646 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
647 default:
648 return;
649 }
650 break;
651 case OP_AND:
652 res.data.u64 = a->data.u64 & b->data.u64;
653 break;
654 case OP_OR:
655 res.data.u64 = a->data.u64 | b->data.u64;
656 break;
657 case OP_XOR:
658 res.data.u64 = a->data.u64 ^ b->data.u64;
659 break;
660 case OP_SHL:
661 res.data.u32 = a->data.u32 << b->data.u32;
662 break;
663 case OP_SHR:
664 switch (i->dType) {
665 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
666 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
667 default:
668 return;
669 }
670 break;
671 case OP_SLCT:
672 if (a->data.u32 != b->data.u32)
673 return;
674 res.data.u32 = a->data.u32;
675 break;
676 case OP_EXTBF: {
677 int offset = b->data.u32 & 0xff;
678 int width = (b->data.u32 >> 8) & 0xff;
679 int rshift = offset;
680 int lshift = 0;
681 if (width == 0) {
682 res.data.u32 = 0;
683 break;
684 }
685 if (width + offset < 32) {
686 rshift = 32 - width;
687 lshift = 32 - width - offset;
688 }
689 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
690 res.data.u32 = util_bitreverse(a->data.u32);
691 else
692 res.data.u32 = a->data.u32;
693 switch (i->dType) {
694 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
695 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
696 default:
697 return;
698 }
699 break;
700 }
701 case OP_POPCNT:
702 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
703 break;
704 case OP_PFETCH:
705 // The two arguments to pfetch are logically added together. Normally
706 // the second argument will not be constant, but that can happen.
707 res.data.u32 = a->data.u32 + b->data.u32;
708 type = TYPE_U32;
709 break;
710 case OP_MERGE:
711 switch (i->dType) {
712 case TYPE_U64:
713 case TYPE_S64:
714 case TYPE_F64:
715 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
716 break;
717 default:
718 return;
719 }
720 break;
721 default:
722 return;
723 }
724 ++foldCount;
725
726 i->src(0).mod = Modifier(0);
727 i->src(1).mod = Modifier(0);
728 i->postFactor = 0;
729
730 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
731 i->setSrc(1, NULL);
732
733 i->getSrc(0)->reg.data = res.data;
734 i->getSrc(0)->reg.type = type;
735 i->getSrc(0)->reg.size = typeSizeof(type);
736
737 switch (i->op) {
738 case OP_MAD:
739 case OP_FMA: {
740 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
741
742 // Move the immediate into position 1, where we know it might be
743 // emittable. However it might not be anyways, as there may be other
744 // restrictions, so move it into a separate LValue.
745 bld.setPosition(i, false);
746 i->op = OP_ADD;
747 i->dnz = 0;
748 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
749 i->setSrc(0, i->getSrc(2));
750 i->src(0).mod = i->src(2).mod;
751 i->setSrc(2, NULL);
752
753 if (i->src(0).getImmediate(src0))
754 expr(i, src0, src1);
755 else
756 opnd(i, src1, 1);
757 break;
758 }
759 case OP_PFETCH:
760 // Leave PFETCH alone... we just folded its 2 args into 1.
761 break;
762 default:
763 i->op = i->saturate ? OP_SAT : OP_MOV;
764 if (i->saturate)
765 unary(i, *i->getSrc(0)->asImm());
766 break;
767 }
768 i->subOp = 0;
769 }
770
771 void
772 ConstantFolding::expr(Instruction *i,
773 ImmediateValue &imm0,
774 ImmediateValue &imm1,
775 ImmediateValue &imm2)
776 {
777 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
778 struct Storage res;
779
780 memset(&res.data, 0, sizeof(res.data));
781
782 switch (i->op) {
783 case OP_INSBF: {
784 int offset = b->data.u32 & 0xff;
785 int width = (b->data.u32 >> 8) & 0xff;
786 unsigned bitmask = ((1 << width) - 1) << offset;
787 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
788 break;
789 }
790 case OP_MAD:
791 case OP_FMA: {
792 switch (i->dType) {
793 case TYPE_F32:
794 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
795 c->data.f32;
796 break;
797 case TYPE_F64:
798 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
799 break;
800 case TYPE_S32:
801 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
802 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
803 break;
804 }
805 /* fallthrough */
806 case TYPE_U32:
807 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
808 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
809 break;
810 }
811 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
812 break;
813 default:
814 return;
815 }
816 break;
817 }
818 case OP_SHLADD:
819 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
820 break;
821 default:
822 return;
823 }
824
825 ++foldCount;
826 i->src(0).mod = Modifier(0);
827 i->src(1).mod = Modifier(0);
828 i->src(2).mod = Modifier(0);
829
830 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
831 i->setSrc(1, NULL);
832 i->setSrc(2, NULL);
833
834 i->getSrc(0)->reg.data = res.data;
835 i->getSrc(0)->reg.type = i->dType;
836 i->getSrc(0)->reg.size = typeSizeof(i->dType);
837
838 i->op = OP_MOV;
839 }
840
841 void
842 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
843 {
844 Storage res;
845
846 if (i->dType != TYPE_F32)
847 return;
848 switch (i->op) {
849 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
850 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
851 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
852 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
853 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
854 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
855 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
856 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
857 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
858 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
859 case OP_PRESIN:
860 case OP_PREEX2:
861 // these should be handled in subsequent OP_SIN/COS/EX2
862 res.data.f32 = imm.reg.data.f32;
863 break;
864 default:
865 return;
866 }
867 i->op = OP_MOV;
868 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
869 i->src(0).mod = Modifier(0);
870 }
871
872 void
873 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
874 const int s, ImmediateValue& imm2)
875 {
876 const int t = s ? 0 : 1;
877 Instruction *insn;
878 Instruction *mul1 = NULL; // mul1 before mul2
879 int e = 0;
880 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
881 ImmediateValue imm1;
882
883 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
884
885 if (mul2->getSrc(t)->refCount() == 1) {
886 insn = mul2->getSrc(t)->getInsn();
887 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
888 mul1 = insn;
889 if (mul1 && !mul1->saturate) {
890 int s1;
891
892 if (mul1->src(s1 = 0).getImmediate(imm1) ||
893 mul1->src(s1 = 1).getImmediate(imm1)) {
894 bld.setPosition(mul1, false);
895 // a = mul r, imm1
896 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
897 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
898 mul1->src(s1).mod = Modifier(0);
899 mul2->def(0).replace(mul1->getDef(0), false);
900 mul1->saturate = mul2->saturate;
901 } else
902 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
903 // c = mul a, b
904 // d = mul c, imm -> d = mul_x_imm a, b
905 mul1->postFactor = e;
906 mul2->def(0).replace(mul1->getDef(0), false);
907 if (f < 0)
908 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
909 mul1->saturate = mul2->saturate;
910 }
911 return;
912 }
913 }
914 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
915 // b = mul a, imm
916 // d = mul b, c -> d = mul_x_imm a, c
917 int s2, t2;
918 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
919 if (!insn)
920 return;
921 mul1 = mul2;
922 mul2 = NULL;
923 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
924 t2 = s2 ? 0 : 1;
925 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
926 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
927 mul2 = insn;
928 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
929 mul2->postFactor = e;
930 mul2->setSrc(s2, mul1->src(t));
931 if (f < 0)
932 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
933 }
934 }
935 }
936
937 void
938 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
939 {
940 switch (i->op) {
941 case OP_MAD:
942 case OP_FMA:
943 if (imm2.isInteger(0)) {
944 i->op = OP_MUL;
945 i->setSrc(2, NULL);
946 foldCount++;
947 return;
948 }
949 break;
950 case OP_SHLADD:
951 if (imm2.isInteger(0)) {
952 i->op = OP_SHL;
953 i->setSrc(2, NULL);
954 foldCount++;
955 return;
956 }
957 break;
958 default:
959 return;
960 }
961 }
962
963 bool
964 ConstantFolding::createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c)
965 {
966 const Target *target = prog->getTarget();
967 int64_t absB = llabs(b);
968
969 //a * (2^shl) -> a << shl
970 if (b >= 0 && util_is_power_of_two_or_zero64(b)) {
971 int shl = util_logbase2_64(b);
972
973 Value *res = c ? bld.getSSA(typeSizeof(ty)) : def;
974 bld.mkOp2(OP_SHL, ty, res, a, bld.mkImm(shl));
975 if (c)
976 bld.mkOp2(OP_ADD, ty, def, res, c);
977
978 return true;
979 }
980
981 //a * (2^shl + 1) -> a << shl + a
982 //a * -(2^shl + 1) -> -a << shl + a
983 //a * (2^shl - 1) -> a << shl - a
984 //a * -(2^shl - 1) -> -a << shl - a
985 if (typeSizeof(ty) == 4 &&
986 (util_is_power_of_two_or_zero64(absB - 1) ||
987 util_is_power_of_two_or_zero64(absB + 1)) &&
988 target->isOpSupported(OP_SHLADD, TYPE_U32)) {
989 bool subA = util_is_power_of_two_or_zero64(absB + 1);
990 int shl = subA ? util_logbase2_64(absB + 1) : util_logbase2_64(absB - 1);
991
992 Value *res = c ? bld.getSSA() : def;
993 Instruction *insn = bld.mkOp3(OP_SHLADD, TYPE_U32, res, a, bld.mkImm(shl), a);
994 if (b < 0)
995 insn->src(0).mod = Modifier(NV50_IR_MOD_NEG);
996 if (subA)
997 insn->src(2).mod = Modifier(NV50_IR_MOD_NEG);
998
999 if (c)
1000 bld.mkOp2(OP_ADD, TYPE_U32, def, res, c);
1001
1002 return true;
1003 }
1004
1005 if (typeSizeof(ty) == 4 && b >= 0 && b <= 0xffff &&
1006 target->isOpSupported(OP_XMAD, TYPE_U32)) {
1007 Value *tmp = bld.mkOp3v(OP_XMAD, TYPE_U32, bld.getSSA(),
1008 a, bld.mkImm((uint32_t)b), c ? c : bld.mkImm(0));
1009 bld.mkOp3(OP_XMAD, TYPE_U32, def, a, bld.mkImm((uint32_t)b), tmp)->subOp =
1010 NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_H1(0);
1011
1012 return true;
1013 }
1014
1015 return false;
1016 }
1017
1018 bool
1019 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
1020 {
1021 const int t = !s;
1022 const operation op = i->op;
1023 Instruction *newi = i;
1024 bool deleted = false;
1025
1026 switch (i->op) {
1027 case OP_SPLIT: {
1028 bld.setPosition(i, false);
1029
1030 uint8_t size = i->getDef(0)->reg.size;
1031 uint8_t bitsize = size * 8;
1032 uint32_t mask = (1ULL << bitsize) - 1;
1033 assert(bitsize <= 32);
1034
1035 uint64_t val = imm0.reg.data.u64;
1036 for (int8_t d = 0; i->defExists(d); ++d) {
1037 Value *def = i->getDef(d);
1038 assert(def->reg.size == size);
1039
1040 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
1041 val >>= bitsize;
1042 }
1043 delete_Instruction(prog, i);
1044 deleted = true;
1045 break;
1046 }
1047 case OP_MUL:
1048 if (i->dType == TYPE_F32 && !i->precise)
1049 tryCollapseChainedMULs(i, s, imm0);
1050
1051 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
1052 assert(!isFloatType(i->sType));
1053 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
1054 bld.setPosition(i, false);
1055 // Need to set to the sign value, which is a compare.
1056 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
1057 TYPE_S32, i->getSrc(t), bld.mkImm(0));
1058 delete_Instruction(prog, i);
1059 deleted = true;
1060 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
1061 // The high bits can't be set in this case (either mul by 0 or
1062 // unsigned by 1)
1063 i->op = OP_MOV;
1064 i->subOp = 0;
1065 i->setSrc(0, new_ImmediateValue(prog, 0u));
1066 i->src(0).mod = Modifier(0);
1067 i->setSrc(1, NULL);
1068 } else if (!imm0.isNegative() && imm0.isPow2()) {
1069 // Translate into a shift
1070 imm0.applyLog2();
1071 i->op = OP_SHR;
1072 i->subOp = 0;
1073 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
1074 i->setSrc(0, i->getSrc(t));
1075 i->src(0).mod = i->src(t).mod;
1076 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1077 i->src(1).mod = 0;
1078 }
1079 } else
1080 if (imm0.isInteger(0)) {
1081 i->op = OP_MOV;
1082 i->setSrc(0, new_ImmediateValue(prog, 0u));
1083 i->src(0).mod = Modifier(0);
1084 i->postFactor = 0;
1085 i->setSrc(1, NULL);
1086 } else
1087 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
1088 if (imm0.isNegative())
1089 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1090 i->op = i->src(t).mod.getOp();
1091 if (s == 0) {
1092 i->setSrc(0, i->getSrc(1));
1093 i->src(0).mod = i->src(1).mod;
1094 i->src(1).mod = 0;
1095 }
1096 if (i->op != OP_CVT)
1097 i->src(0).mod = 0;
1098 i->setSrc(1, NULL);
1099 } else
1100 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1101 if (imm0.isNegative())
1102 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1103 i->op = OP_ADD;
1104 i->dnz = 0;
1105 i->setSrc(s, i->getSrc(t));
1106 i->src(s).mod = i->src(t).mod;
1107 } else
1108 if (!isFloatType(i->dType) && !i->src(t).mod) {
1109 bld.setPosition(i, false);
1110 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1111 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, NULL)) {
1112 delete_Instruction(prog, i);
1113 deleted = true;
1114 }
1115 } else
1116 if (i->postFactor && i->sType == TYPE_F32) {
1117 /* Can't emit a postfactor with an immediate, have to fold it in */
1118 i->setSrc(s, new_ImmediateValue(
1119 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1120 i->postFactor = 0;
1121 }
1122 break;
1123 case OP_FMA:
1124 case OP_MAD:
1125 if (imm0.isInteger(0)) {
1126 i->setSrc(0, i->getSrc(2));
1127 i->src(0).mod = i->src(2).mod;
1128 i->setSrc(1, NULL);
1129 i->setSrc(2, NULL);
1130 i->op = i->src(0).mod.getOp();
1131 if (i->op != OP_CVT)
1132 i->src(0).mod = 0;
1133 } else
1134 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1135 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1136 if (imm0.isNegative())
1137 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1138 if (s == 0) {
1139 i->setSrc(0, i->getSrc(1));
1140 i->src(0).mod = i->src(1).mod;
1141 }
1142 i->setSrc(1, i->getSrc(2));
1143 i->src(1).mod = i->src(2).mod;
1144 i->setSrc(2, NULL);
1145 i->dnz = 0;
1146 i->op = OP_ADD;
1147 } else
1148 if (!isFloatType(i->dType) && !i->subOp && !i->src(t).mod && !i->src(2).mod) {
1149 bld.setPosition(i, false);
1150 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1151 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, i->getSrc(2))) {
1152 delete_Instruction(prog, i);
1153 deleted = true;
1154 }
1155 }
1156 break;
1157 case OP_SUB:
1158 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1159 !isFloatType(i->dType))
1160 break;
1161 /* fallthrough */
1162 case OP_ADD:
1163 if (i->usesFlags())
1164 break;
1165 if (imm0.isInteger(0)) {
1166 if (s == 0) {
1167 i->setSrc(0, i->getSrc(1));
1168 i->src(0).mod = i->src(1).mod;
1169 if (i->op == OP_SUB)
1170 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1171 }
1172 i->setSrc(1, NULL);
1173 i->op = i->src(0).mod.getOp();
1174 if (i->op != OP_CVT)
1175 i->src(0).mod = Modifier(0);
1176 }
1177 break;
1178
1179 case OP_DIV:
1180 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1181 break;
1182 bld.setPosition(i, false);
1183 if (imm0.reg.data.u32 == 0) {
1184 break;
1185 } else
1186 if (imm0.reg.data.u32 == 1) {
1187 i->op = OP_MOV;
1188 i->setSrc(1, NULL);
1189 } else
1190 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1191 i->op = OP_SHR;
1192 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1193 } else
1194 if (i->dType == TYPE_U32) {
1195 Instruction *mul;
1196 Value *tA, *tB;
1197 const uint32_t d = imm0.reg.data.u32;
1198 uint32_t m;
1199 int r, s;
1200 uint32_t l = util_logbase2(d);
1201 if (((uint32_t)1 << l) < d)
1202 ++l;
1203 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1204 r = l ? 1 : 0;
1205 s = l ? (l - 1) : 0;
1206
1207 tA = bld.getSSA();
1208 tB = bld.getSSA();
1209 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1210 bld.loadImm(NULL, m));
1211 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1212 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1213 tA = bld.getSSA();
1214 if (r)
1215 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1216 else
1217 tA = tB;
1218 tB = s ? bld.getSSA() : i->getDef(0);
1219 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1220 if (s)
1221 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1222
1223 delete_Instruction(prog, i);
1224 deleted = true;
1225 } else
1226 if (imm0.reg.data.s32 == -1) {
1227 i->op = OP_NEG;
1228 i->setSrc(1, NULL);
1229 } else {
1230 LValue *tA, *tB;
1231 LValue *tD;
1232 const int32_t d = imm0.reg.data.s32;
1233 int32_t m;
1234 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1235 if ((1 << l) < abs(d))
1236 ++l;
1237 if (!l)
1238 l = 1;
1239 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1240
1241 tA = bld.getSSA();
1242 tB = bld.getSSA();
1243 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1244 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1245 if (l > 1)
1246 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1247 else
1248 tB = tA;
1249 tA = bld.getSSA();
1250 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1251 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1252 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1253 if (d < 0)
1254 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1255
1256 delete_Instruction(prog, i);
1257 deleted = true;
1258 }
1259 break;
1260
1261 case OP_MOD:
1262 if (s == 1 && imm0.isPow2()) {
1263 bld.setPosition(i, false);
1264 if (i->sType == TYPE_U32) {
1265 i->op = OP_AND;
1266 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1267 } else if (i->sType == TYPE_S32) {
1268 // Do it on the absolute value of the input, and then restore the
1269 // sign. The only odd case is MIN_INT, but that should work out
1270 // as well, since MIN_INT mod any power of 2 is 0.
1271 //
1272 // Technically we don't have to do any of this since MOD is
1273 // undefined with negative arguments in GLSL, but this seems like
1274 // the nice thing to do.
1275 Value *abs = bld.mkOp1v(OP_ABS, TYPE_S32, bld.getSSA(), i->getSrc(0));
1276 Value *neg, *v1, *v2;
1277 bld.mkCmp(OP_SET, CC_LT, TYPE_S32,
1278 (neg = bld.getSSA(1, prog->getTarget()->nativeFile(FILE_PREDICATE))),
1279 TYPE_S32, i->getSrc(0), bld.loadImm(NULL, 0));
1280 Value *mod = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), abs,
1281 bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1282 bld.mkOp1(OP_NEG, TYPE_S32, (v1 = bld.getSSA()), mod)
1283 ->setPredicate(CC_P, neg);
1284 bld.mkOp1(OP_MOV, TYPE_S32, (v2 = bld.getSSA()), mod)
1285 ->setPredicate(CC_NOT_P, neg);
1286 newi = bld.mkOp2(OP_UNION, TYPE_S32, i->getDef(0), v1, v2);
1287
1288 delete_Instruction(prog, i);
1289 deleted = true;
1290 }
1291 } else if (s == 1) {
1292 // In this case, we still want the optimized lowering that we get
1293 // from having division by an immediate.
1294 //
1295 // a % b == a - (a/b) * b
1296 bld.setPosition(i, false);
1297 Value *div = bld.mkOp2v(OP_DIV, i->sType, bld.getSSA(),
1298 i->getSrc(0), i->getSrc(1));
1299 newi = bld.mkOp2(OP_ADD, i->sType, i->getDef(0), i->getSrc(0),
1300 bld.mkOp2v(OP_MUL, i->sType, bld.getSSA(), div, i->getSrc(1)));
1301 // TODO: Check that target supports this. In this case, we know that
1302 // all backends do.
1303 newi->src(1).mod = Modifier(NV50_IR_MOD_NEG);
1304
1305 delete_Instruction(prog, i);
1306 deleted = true;
1307 }
1308 break;
1309
1310 case OP_SET: // TODO: SET_AND,OR,XOR
1311 {
1312 /* This optimizes the case where the output of a set is being compared
1313 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1314 * can be a lot cleverer in our comparison.
1315 */
1316 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1317 CondCode cc, ccZ;
1318 if (imm0.reg.data.u32 != 0 || !si)
1319 return false;
1320 cc = si->setCond;
1321 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1322 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1323 // first.
1324 if (s == 0)
1325 ccZ = reverseCondCode(ccZ);
1326 // If there is a negative modifier, we need to undo that, by flipping
1327 // the comparison to zero.
1328 if (i->src(t).mod.neg())
1329 ccZ = reverseCondCode(ccZ);
1330 // If this is a signed comparison, we expect the input to be a regular
1331 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1332 // is positive, so just flip the sign.
1333 if (i->sType == TYPE_S32) {
1334 assert(!isFloatType(si->dType));
1335 ccZ = reverseCondCode(ccZ);
1336 }
1337 switch (ccZ) {
1338 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1339 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1340 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1341 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1342 case CC_GT: break; // bool > 0 -- bool
1343 case CC_NE: break; // bool != 0 -- bool
1344 default:
1345 return false;
1346 }
1347
1348 // Update the condition of this SET to be identical to the origin set,
1349 // but with the updated condition code. The original SET should get
1350 // DCE'd, ideally.
1351 i->op = si->op;
1352 i->asCmp()->setCond = cc;
1353 i->setSrc(0, si->src(0));
1354 i->setSrc(1, si->src(1));
1355 if (si->srcExists(2))
1356 i->setSrc(2, si->src(2));
1357 i->sType = si->sType;
1358 }
1359 break;
1360
1361 case OP_AND:
1362 {
1363 Instruction *src = i->getSrc(t)->getInsn();
1364 ImmediateValue imm1;
1365 if (imm0.reg.data.u32 == 0) {
1366 i->op = OP_MOV;
1367 i->setSrc(0, new_ImmediateValue(prog, 0u));
1368 i->src(0).mod = Modifier(0);
1369 i->setSrc(1, NULL);
1370 } else if (imm0.reg.data.u32 == ~0U) {
1371 i->op = i->src(t).mod.getOp();
1372 if (t) {
1373 i->setSrc(0, i->getSrc(t));
1374 i->src(0).mod = i->src(t).mod;
1375 }
1376 i->setSrc(1, NULL);
1377 } else if (src->asCmp()) {
1378 CmpInstruction *cmp = src->asCmp();
1379 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1380 return false;
1381 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1382 return false;
1383 if (imm0.reg.data.f32 != 1.0)
1384 return false;
1385 if (cmp->dType != TYPE_U32)
1386 return false;
1387
1388 cmp->dType = TYPE_F32;
1389 if (i->src(t).mod != Modifier(0)) {
1390 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1391 i->src(t).mod = Modifier(0);
1392 cmp->setCond = inverseCondCode(cmp->setCond);
1393 }
1394 i->op = OP_MOV;
1395 i->setSrc(s, NULL);
1396 if (t) {
1397 i->setSrc(0, i->getSrc(t));
1398 i->setSrc(t, NULL);
1399 }
1400 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1401 src->op == OP_SHR &&
1402 src->src(1).getImmediate(imm1) &&
1403 i->src(t).mod == Modifier(0) &&
1404 util_is_power_of_two_or_zero(imm0.reg.data.u32 + 1)) {
1405 // low byte = offset, high byte = width
1406 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1407 i->op = OP_EXTBF;
1408 i->setSrc(0, src->getSrc(0));
1409 i->setSrc(1, new_ImmediateValue(prog, ext));
1410 } else if (src->op == OP_SHL &&
1411 src->src(1).getImmediate(imm1) &&
1412 i->src(t).mod == Modifier(0) &&
1413 util_is_power_of_two_or_zero(~imm0.reg.data.u32 + 1) &&
1414 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1415 i->op = OP_MOV;
1416 i->setSrc(s, NULL);
1417 if (t) {
1418 i->setSrc(0, i->getSrc(t));
1419 i->setSrc(t, NULL);
1420 }
1421 }
1422 }
1423 break;
1424
1425 case OP_SHL:
1426 {
1427 if (s != 1 || i->src(0).mod != Modifier(0))
1428 break;
1429 // try to concatenate shifts
1430 Instruction *si = i->getSrc(0)->getInsn();
1431 if (!si)
1432 break;
1433 ImmediateValue imm1;
1434 switch (si->op) {
1435 case OP_SHL:
1436 if (si->src(1).getImmediate(imm1)) {
1437 bld.setPosition(i, false);
1438 i->setSrc(0, si->getSrc(0));
1439 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1440 }
1441 break;
1442 case OP_SHR:
1443 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1444 bld.setPosition(i, false);
1445 i->op = OP_AND;
1446 i->setSrc(0, si->getSrc(0));
1447 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1448 }
1449 break;
1450 case OP_MUL:
1451 int muls;
1452 if (isFloatType(si->dType))
1453 return false;
1454 if (si->src(1).getImmediate(imm1))
1455 muls = 1;
1456 else if (si->src(0).getImmediate(imm1))
1457 muls = 0;
1458 else
1459 return false;
1460
1461 bld.setPosition(i, false);
1462 i->op = OP_MUL;
1463 i->setSrc(0, si->getSrc(!muls));
1464 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1465 break;
1466 case OP_SUB:
1467 case OP_ADD:
1468 int adds;
1469 if (isFloatType(si->dType))
1470 return false;
1471 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1472 adds = 0;
1473 else if (si->src(1).getImmediate(imm1))
1474 adds = 1;
1475 else
1476 return false;
1477 if (si->src(!adds).mod != Modifier(0))
1478 return false;
1479 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1480
1481 // This is more operations, but if one of x, y is an immediate, then
1482 // we can get a situation where (a) we can use ISCADD, or (b)
1483 // propagate the add bit into an indirect load.
1484 bld.setPosition(i, false);
1485 i->op = si->op;
1486 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1487 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1488 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1489 si->getSrc(!adds),
1490 bld.mkImm(imm0.reg.data.u32)));
1491 break;
1492 default:
1493 return false;
1494 }
1495 }
1496 break;
1497
1498 case OP_ABS:
1499 case OP_NEG:
1500 case OP_SAT:
1501 case OP_LG2:
1502 case OP_RCP:
1503 case OP_SQRT:
1504 case OP_RSQ:
1505 case OP_PRESIN:
1506 case OP_SIN:
1507 case OP_COS:
1508 case OP_PREEX2:
1509 case OP_EX2:
1510 unary(i, imm0);
1511 break;
1512 case OP_BFIND: {
1513 int32_t res;
1514 switch (i->dType) {
1515 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1516 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1517 default:
1518 return false;
1519 }
1520 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1521 res = 31 - res;
1522 bld.setPosition(i, false); /* make sure bld is init'ed */
1523 i->setSrc(0, bld.mkImm(res));
1524 i->setSrc(1, NULL);
1525 i->op = OP_MOV;
1526 i->subOp = 0;
1527 break;
1528 }
1529 case OP_POPCNT: {
1530 // Only deal with 1-arg POPCNT here
1531 if (i->srcExists(1))
1532 break;
1533 uint32_t res = util_bitcount(imm0.reg.data.u32);
1534 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1535 i->setSrc(1, NULL);
1536 i->op = OP_MOV;
1537 break;
1538 }
1539 case OP_CVT: {
1540 Storage res;
1541
1542 // TODO: handle 64-bit values properly
1543 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1544 return false;
1545
1546 // TODO: handle single byte/word extractions
1547 if (i->subOp)
1548 return false;
1549
1550 bld.setPosition(i, true); /* make sure bld is init'ed */
1551
1552 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1553 case type: \
1554 switch (i->sType) { \
1555 case TYPE_F64: \
1556 res.data.dst = util_iround(i->saturate ? \
1557 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1558 imm0.reg.data.f64); \
1559 break; \
1560 case TYPE_F32: \
1561 res.data.dst = util_iround(i->saturate ? \
1562 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1563 imm0.reg.data.f32); \
1564 break; \
1565 case TYPE_S32: \
1566 res.data.dst = i->saturate ? \
1567 CLAMP(imm0.reg.data.s32, imin, imax) : \
1568 imm0.reg.data.s32; \
1569 break; \
1570 case TYPE_U32: \
1571 res.data.dst = i->saturate ? \
1572 CLAMP(imm0.reg.data.u32, umin, umax) : \
1573 imm0.reg.data.u32; \
1574 break; \
1575 case TYPE_S16: \
1576 res.data.dst = i->saturate ? \
1577 CLAMP(imm0.reg.data.s16, imin, imax) : \
1578 imm0.reg.data.s16; \
1579 break; \
1580 case TYPE_U16: \
1581 res.data.dst = i->saturate ? \
1582 CLAMP(imm0.reg.data.u16, umin, umax) : \
1583 imm0.reg.data.u16; \
1584 break; \
1585 default: return false; \
1586 } \
1587 i->setSrc(0, bld.mkImm(res.data.dst)); \
1588 break
1589
1590 switch(i->dType) {
1591 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1592 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1593 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1594 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1595 case TYPE_F32:
1596 switch (i->sType) {
1597 case TYPE_F64:
1598 res.data.f32 = i->saturate ?
1599 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1600 imm0.reg.data.f64;
1601 break;
1602 case TYPE_F32:
1603 res.data.f32 = i->saturate ?
1604 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1605 imm0.reg.data.f32;
1606 break;
1607 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1608 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1609 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1610 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1611 default:
1612 return false;
1613 }
1614 i->setSrc(0, bld.mkImm(res.data.f32));
1615 break;
1616 case TYPE_F64:
1617 switch (i->sType) {
1618 case TYPE_F64:
1619 res.data.f64 = i->saturate ?
1620 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1621 imm0.reg.data.f64;
1622 break;
1623 case TYPE_F32:
1624 res.data.f64 = i->saturate ?
1625 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1626 imm0.reg.data.f32;
1627 break;
1628 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1629 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1630 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1631 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1632 default:
1633 return false;
1634 }
1635 i->setSrc(0, bld.mkImm(res.data.f64));
1636 break;
1637 default:
1638 return false;
1639 }
1640 #undef CASE
1641
1642 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1643 i->op = OP_MOV;
1644 i->saturate = 0;
1645 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1646 break;
1647 }
1648 default:
1649 return false;
1650 }
1651
1652 // This can get left behind some of the optimizations which simplify
1653 // saturatable values.
1654 if (newi->op == OP_MOV && newi->saturate) {
1655 ImmediateValue tmp;
1656 newi->saturate = 0;
1657 newi->op = OP_SAT;
1658 if (newi->src(0).getImmediate(tmp))
1659 unary(newi, tmp);
1660 }
1661
1662 if (newi->op != op)
1663 foldCount++;
1664 return deleted;
1665 }
1666
1667 // =============================================================================
1668
1669 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1670 class ModifierFolding : public Pass
1671 {
1672 private:
1673 virtual bool visit(BasicBlock *);
1674 };
1675
1676 bool
1677 ModifierFolding::visit(BasicBlock *bb)
1678 {
1679 const Target *target = prog->getTarget();
1680
1681 Instruction *i, *next, *mi;
1682 Modifier mod;
1683
1684 for (i = bb->getEntry(); i; i = next) {
1685 next = i->next;
1686
1687 if (0 && i->op == OP_SUB) {
1688 // turn "sub" into "add neg" (do we really want this ?)
1689 i->op = OP_ADD;
1690 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1691 }
1692
1693 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1694 mi = i->getSrc(s)->getInsn();
1695 if (!mi ||
1696 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1697 continue;
1698 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1699 if ((i->op != OP_ADD &&
1700 i->op != OP_MUL) ||
1701 (mi->op != OP_ABS &&
1702 mi->op != OP_NEG))
1703 continue;
1704 } else
1705 if (i->sType != mi->dType) {
1706 continue;
1707 }
1708 if ((mod = Modifier(mi->op)) == Modifier(0))
1709 continue;
1710 mod *= mi->src(0).mod;
1711
1712 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1713 // abs neg [abs] = abs
1714 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1715 } else
1716 if ((i->op == OP_NEG) && mod.neg()) {
1717 assert(s == 0);
1718 // neg as both opcode and modifier on same insn is prohibited
1719 // neg neg abs = abs, neg neg = identity
1720 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1721 i->op = mod.getOp();
1722 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1723 if (mod == Modifier(0))
1724 i->op = OP_MOV;
1725 }
1726
1727 if (target->isModSupported(i, s, mod)) {
1728 i->setSrc(s, mi->getSrc(0));
1729 i->src(s).mod *= mod;
1730 }
1731 }
1732
1733 if (i->op == OP_SAT) {
1734 mi = i->getSrc(0)->getInsn();
1735 if (mi &&
1736 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1737 mi->saturate = 1;
1738 mi->setDef(0, i->getDef(0));
1739 delete_Instruction(prog, i);
1740 }
1741 }
1742 }
1743
1744 return true;
1745 }
1746
1747 // =============================================================================
1748
1749 // MUL + ADD -> MAD/FMA
1750 // MIN/MAX(a, a) -> a, etc.
1751 // SLCT(a, b, const) -> cc(const) ? a : b
1752 // RCP(RCP(a)) -> a
1753 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1754 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
1755 class AlgebraicOpt : public Pass
1756 {
1757 private:
1758 virtual bool visit(BasicBlock *);
1759
1760 void handleABS(Instruction *);
1761 bool handleADD(Instruction *);
1762 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1763 void handleMINMAX(Instruction *);
1764 void handleRCP(Instruction *);
1765 void handleSLCT(Instruction *);
1766 void handleLOGOP(Instruction *);
1767 void handleCVT_NEG(Instruction *);
1768 void handleCVT_CVT(Instruction *);
1769 void handleCVT_EXTBF(Instruction *);
1770 void handleSUCLAMP(Instruction *);
1771 void handleNEG(Instruction *);
1772 void handleEXTBF_RDSV(Instruction *);
1773
1774 BuildUtil bld;
1775 };
1776
1777 void
1778 AlgebraicOpt::handleABS(Instruction *abs)
1779 {
1780 Instruction *sub = abs->getSrc(0)->getInsn();
1781 DataType ty;
1782 if (!sub ||
1783 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1784 return;
1785 // expect not to have mods yet, if we do, bail
1786 if (sub->src(0).mod || sub->src(1).mod)
1787 return;
1788 // hidden conversion ?
1789 ty = intTypeToSigned(sub->dType);
1790 if (abs->dType != abs->sType || ty != abs->sType)
1791 return;
1792
1793 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1794 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1795 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1796 return;
1797
1798 Value *src0 = sub->getSrc(0);
1799 Value *src1 = sub->getSrc(1);
1800
1801 if (sub->op == OP_ADD) {
1802 Instruction *neg = sub->getSrc(1)->getInsn();
1803 if (neg && neg->op != OP_NEG) {
1804 neg = sub->getSrc(0)->getInsn();
1805 src0 = sub->getSrc(1);
1806 }
1807 if (!neg || neg->op != OP_NEG ||
1808 neg->dType != neg->sType || neg->sType != ty)
1809 return;
1810 src1 = neg->getSrc(0);
1811 }
1812
1813 // found ABS(SUB))
1814 abs->moveSources(1, 2); // move sources >=1 up by 2
1815 abs->op = OP_SAD;
1816 abs->setType(sub->dType);
1817 abs->setSrc(0, src0);
1818 abs->setSrc(1, src1);
1819 bld.setPosition(abs, false);
1820 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1821 }
1822
1823 bool
1824 AlgebraicOpt::handleADD(Instruction *add)
1825 {
1826 Value *src0 = add->getSrc(0);
1827 Value *src1 = add->getSrc(1);
1828
1829 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1830 return false;
1831
1832 bool changed = false;
1833 // we can't optimize to MAD if the add is precise
1834 if (!add->precise && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1835 changed = tryADDToMADOrSAD(add, OP_MAD);
1836 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1837 changed = tryADDToMADOrSAD(add, OP_SAD);
1838 return changed;
1839 }
1840
1841 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1842 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1843 bool
1844 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1845 {
1846 Value *src0 = add->getSrc(0);
1847 Value *src1 = add->getSrc(1);
1848 Value *src;
1849 int s;
1850 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1851 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1852 Modifier mod[4];
1853
1854 if (src0->refCount() == 1 &&
1855 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1856 s = 0;
1857 else
1858 if (src1->refCount() == 1 &&
1859 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1860 s = 1;
1861 else
1862 return false;
1863
1864 src = add->getSrc(s);
1865
1866 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1867 return false;
1868
1869 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1870 src->getInsn()->dnz || src->getInsn()->precise)
1871 return false;
1872
1873 if (toOp == OP_SAD) {
1874 ImmediateValue imm;
1875 if (!src->getInsn()->src(2).getImmediate(imm))
1876 return false;
1877 if (!imm.isInteger(0))
1878 return false;
1879 }
1880
1881 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1882 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1883 return false;
1884
1885 mod[0] = add->src(0).mod;
1886 mod[1] = add->src(1).mod;
1887 mod[2] = src->getUniqueInsn()->src(0).mod;
1888 mod[3] = src->getUniqueInsn()->src(1).mod;
1889
1890 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1891 return false;
1892
1893 add->op = toOp;
1894 add->subOp = src->getInsn()->subOp; // potentially mul-high
1895 add->dnz = src->getInsn()->dnz;
1896 add->dType = src->getInsn()->dType; // sign matters for imad hi
1897 add->sType = src->getInsn()->sType;
1898
1899 add->setSrc(2, add->src(s ? 0 : 1));
1900
1901 add->setSrc(0, src->getInsn()->getSrc(0));
1902 add->src(0).mod = mod[2] ^ mod[s];
1903 add->setSrc(1, src->getInsn()->getSrc(1));
1904 add->src(1).mod = mod[3];
1905
1906 return true;
1907 }
1908
1909 void
1910 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1911 {
1912 Value *src0 = minmax->getSrc(0);
1913 Value *src1 = minmax->getSrc(1);
1914
1915 if (src0 != src1 || src0->reg.file != FILE_GPR)
1916 return;
1917 if (minmax->src(0).mod == minmax->src(1).mod) {
1918 if (minmax->def(0).mayReplace(minmax->src(0))) {
1919 minmax->def(0).replace(minmax->src(0), false);
1920 delete_Instruction(prog, minmax);
1921 } else {
1922 minmax->op = OP_CVT;
1923 minmax->setSrc(1, NULL);
1924 }
1925 } else {
1926 // TODO:
1927 // min(x, -x) = -abs(x)
1928 // min(x, -abs(x)) = -abs(x)
1929 // min(x, abs(x)) = x
1930 // max(x, -abs(x)) = x
1931 // max(x, abs(x)) = abs(x)
1932 // max(x, -x) = abs(x)
1933 }
1934 }
1935
1936 // rcp(rcp(a)) = a
1937 // rcp(sqrt(a)) = rsq(a)
1938 void
1939 AlgebraicOpt::handleRCP(Instruction *rcp)
1940 {
1941 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1942
1943 if (!si)
1944 return;
1945
1946 if (si->op == OP_RCP) {
1947 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1948 rcp->op = mod.getOp();
1949 rcp->setSrc(0, si->getSrc(0));
1950 } else if (si->op == OP_SQRT) {
1951 rcp->op = OP_RSQ;
1952 rcp->setSrc(0, si->getSrc(0));
1953 rcp->src(0).mod = rcp->src(0).mod * si->src(0).mod;
1954 }
1955 }
1956
1957 void
1958 AlgebraicOpt::handleSLCT(Instruction *slct)
1959 {
1960 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1961 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1962 slct->setSrc(0, slct->getSrc(1));
1963 } else
1964 if (slct->getSrc(0) != slct->getSrc(1)) {
1965 return;
1966 }
1967 slct->op = OP_MOV;
1968 slct->setSrc(1, NULL);
1969 slct->setSrc(2, NULL);
1970 }
1971
1972 void
1973 AlgebraicOpt::handleLOGOP(Instruction *logop)
1974 {
1975 Value *src0 = logop->getSrc(0);
1976 Value *src1 = logop->getSrc(1);
1977
1978 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1979 return;
1980
1981 if (src0 == src1) {
1982 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1983 logop->def(0).mayReplace(logop->src(0))) {
1984 logop->def(0).replace(logop->src(0), false);
1985 delete_Instruction(prog, logop);
1986 }
1987 } else {
1988 // try AND(SET, SET) -> SET_AND(SET)
1989 Instruction *set0 = src0->getInsn();
1990 Instruction *set1 = src1->getInsn();
1991
1992 if (!set0 || set0->fixed || !set1 || set1->fixed)
1993 return;
1994 if (set1->op != OP_SET) {
1995 Instruction *xchg = set0;
1996 set0 = set1;
1997 set1 = xchg;
1998 if (set1->op != OP_SET)
1999 return;
2000 }
2001 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
2002 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
2003 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
2004 return;
2005 if (set0->op != OP_SET &&
2006 set0->op != OP_SET_AND &&
2007 set0->op != OP_SET_OR &&
2008 set0->op != OP_SET_XOR)
2009 return;
2010 if (set0->getDef(0)->refCount() > 1 &&
2011 set1->getDef(0)->refCount() > 1)
2012 return;
2013 if (set0->getPredicate() || set1->getPredicate())
2014 return;
2015 // check that they don't source each other
2016 for (int s = 0; s < 2; ++s)
2017 if (set0->getSrc(s) == set1->getDef(0) ||
2018 set1->getSrc(s) == set0->getDef(0))
2019 return;
2020
2021 set0 = cloneForward(func, set0);
2022 set1 = cloneShallow(func, set1);
2023 logop->bb->insertAfter(logop, set1);
2024 logop->bb->insertAfter(logop, set0);
2025
2026 set0->dType = TYPE_U8;
2027 set0->getDef(0)->reg.file = FILE_PREDICATE;
2028 set0->getDef(0)->reg.size = 1;
2029 set1->setSrc(2, set0->getDef(0));
2030 set1->op = redOp;
2031 set1->setDef(0, logop->getDef(0));
2032 delete_Instruction(prog, logop);
2033 }
2034 }
2035
2036 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
2037 // nv50:
2038 // F2I(NEG(I2F(ABS(SET))))
2039 void
2040 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
2041 {
2042 Instruction *insn = cvt->getSrc(0)->getInsn();
2043 if (cvt->sType != TYPE_F32 ||
2044 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
2045 return;
2046 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
2047 return;
2048 if (insn->src(0).mod != Modifier(0))
2049 return;
2050 insn = insn->getSrc(0)->getInsn();
2051
2052 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
2053 if (insn && insn->op == OP_CVT &&
2054 insn->dType == TYPE_F32 &&
2055 insn->sType == TYPE_S32) {
2056 insn = insn->getSrc(0)->getInsn();
2057 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
2058 insn->src(0).mod)
2059 return;
2060 insn = insn->getSrc(0)->getInsn();
2061 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
2062 return;
2063 } else
2064 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
2065 return;
2066 }
2067
2068 Instruction *bset = cloneShallow(func, insn);
2069 bset->dType = TYPE_U32;
2070 bset->setDef(0, cvt->getDef(0));
2071 cvt->bb->insertAfter(cvt, bset);
2072 delete_Instruction(prog, cvt);
2073 }
2074
2075 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
2076 // does a type conversion, this becomes trickier as there might be range
2077 // changes/etc. We could handle those in theory as long as the range was being
2078 // reduced or kept the same.
2079 void
2080 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
2081 {
2082 Instruction *insn = cvt->getSrc(0)->getInsn();
2083
2084 if (!insn ||
2085 insn->saturate ||
2086 insn->subOp ||
2087 insn->dType != insn->sType ||
2088 insn->dType != cvt->sType)
2089 return;
2090
2091 RoundMode rnd = insn->rnd;
2092 switch (insn->op) {
2093 case OP_CEIL:
2094 rnd = ROUND_PI;
2095 break;
2096 case OP_FLOOR:
2097 rnd = ROUND_MI;
2098 break;
2099 case OP_TRUNC:
2100 rnd = ROUND_ZI;
2101 break;
2102 case OP_CVT:
2103 break;
2104 default:
2105 return;
2106 }
2107
2108 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
2109 rnd = (RoundMode)(rnd & 3);
2110
2111 cvt->rnd = rnd;
2112 cvt->setSrc(0, insn->getSrc(0));
2113 cvt->src(0).mod *= insn->src(0).mod;
2114 cvt->sType = insn->sType;
2115 }
2116
2117 // Some shaders extract packed bytes out of words and convert them to
2118 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
2119 // nv50 for word sizes.
2120 //
2121 // CVT(EXTBF(x, byte/word))
2122 // CVT(AND(bytemask, x))
2123 // CVT(AND(bytemask, SHR(x, 8/16/24)))
2124 // CVT(SHR(x, 16/24))
2125 void
2126 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
2127 {
2128 Instruction *insn = cvt->getSrc(0)->getInsn();
2129 ImmediateValue imm;
2130 Value *arg = NULL;
2131 unsigned width, offset;
2132 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
2133 return;
2134 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
2135 width = (imm.reg.data.u32 >> 8) & 0xff;
2136 offset = imm.reg.data.u32 & 0xff;
2137 arg = insn->getSrc(0);
2138
2139 if (width != 8 && width != 16)
2140 return;
2141 if (width == 8 && offset & 0x7)
2142 return;
2143 if (width == 16 && offset & 0xf)
2144 return;
2145 } else if (insn->op == OP_AND) {
2146 int s;
2147 if (insn->src(0).getImmediate(imm))
2148 s = 0;
2149 else if (insn->src(1).getImmediate(imm))
2150 s = 1;
2151 else
2152 return;
2153
2154 if (imm.reg.data.u32 == 0xff)
2155 width = 8;
2156 else if (imm.reg.data.u32 == 0xffff)
2157 width = 16;
2158 else
2159 return;
2160
2161 arg = insn->getSrc(!s);
2162 Instruction *shift = arg->getInsn();
2163 offset = 0;
2164 if (shift && shift->op == OP_SHR &&
2165 shift->sType == cvt->sType &&
2166 shift->src(1).getImmediate(imm) &&
2167 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2168 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2169 arg = shift->getSrc(0);
2170 offset = imm.reg.data.u32;
2171 }
2172 // We just AND'd the high bits away, which means this is effectively an
2173 // unsigned value.
2174 cvt->sType = TYPE_U32;
2175 } else if (insn->op == OP_SHR &&
2176 insn->sType == cvt->sType &&
2177 insn->src(1).getImmediate(imm)) {
2178 arg = insn->getSrc(0);
2179 if (imm.reg.data.u32 == 24) {
2180 width = 8;
2181 offset = 24;
2182 } else if (imm.reg.data.u32 == 16) {
2183 width = 16;
2184 offset = 16;
2185 } else {
2186 return;
2187 }
2188 }
2189
2190 if (!arg)
2191 return;
2192
2193 // Irrespective of what came earlier, we can undo a shift on the argument
2194 // by adjusting the offset.
2195 Instruction *shift = arg->getInsn();
2196 if (shift && shift->op == OP_SHL &&
2197 shift->src(1).getImmediate(imm) &&
2198 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2199 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2200 imm.reg.data.u32 <= offset) {
2201 arg = shift->getSrc(0);
2202 offset -= imm.reg.data.u32;
2203 }
2204
2205 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2206 // annoying to detect them.
2207
2208 if (width == 8) {
2209 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2210 } else {
2211 assert(width == 16);
2212 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2213 }
2214 cvt->setSrc(0, arg);
2215 cvt->subOp = offset >> 3;
2216 }
2217
2218 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2219 void
2220 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2221 {
2222 ImmediateValue imm;
2223 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2224 int s;
2225 Instruction *add;
2226
2227 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2228
2229 // look for ADD (TODO: only count references by non-SUCLAMP)
2230 if (insn->getSrc(0)->refCount() > 1)
2231 return;
2232 add = insn->getSrc(0)->getInsn();
2233 if (!add || add->op != OP_ADD ||
2234 (add->dType != TYPE_U32 &&
2235 add->dType != TYPE_S32))
2236 return;
2237
2238 // look for immediate
2239 for (s = 0; s < 2; ++s)
2240 if (add->src(s).getImmediate(imm))
2241 break;
2242 if (s >= 2)
2243 return;
2244 s = s ? 0 : 1;
2245 // determine if immediate fits
2246 val += imm.reg.data.s32;
2247 if (val > 31 || val < -32)
2248 return;
2249 // determine if other addend fits
2250 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2251 return;
2252
2253 bld.setPosition(insn, false); // make sure bld is init'ed
2254 // replace sources
2255 insn->setSrc(2, bld.mkImm(val));
2256 insn->setSrc(0, add->getSrc(s));
2257 }
2258
2259 // NEG(AND(SET, 1)) -> SET
2260 void
2261 AlgebraicOpt::handleNEG(Instruction *i) {
2262 Instruction *src = i->getSrc(0)->getInsn();
2263 ImmediateValue imm;
2264 int b;
2265
2266 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2267 return;
2268
2269 if (src->src(0).getImmediate(imm))
2270 b = 1;
2271 else if (src->src(1).getImmediate(imm))
2272 b = 0;
2273 else
2274 return;
2275
2276 if (!imm.isInteger(1))
2277 return;
2278
2279 Instruction *set = src->getSrc(b)->getInsn();
2280 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2281 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2282 !isFloatType(set->dType)) {
2283 i->def(0).replace(set->getDef(0), false);
2284 }
2285 }
2286
2287 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
2288 void
2289 AlgebraicOpt::handleEXTBF_RDSV(Instruction *i)
2290 {
2291 Instruction *rdsv = i->getSrc(0)->getUniqueInsn();
2292 if (rdsv->op != OP_RDSV ||
2293 rdsv->getSrc(0)->asSym()->reg.data.sv.sv != SV_COMBINED_TID)
2294 return;
2295 // Avoid creating more RDSV instructions
2296 if (rdsv->getDef(0)->refCount() > 1)
2297 return;
2298
2299 ImmediateValue imm;
2300 if (!i->src(1).getImmediate(imm))
2301 return;
2302
2303 int index;
2304 if (imm.isInteger(0x1000))
2305 index = 0;
2306 else
2307 if (imm.isInteger(0x0a10))
2308 index = 1;
2309 else
2310 if (imm.isInteger(0x061a))
2311 index = 2;
2312 else
2313 return;
2314
2315 bld.setPosition(i, false);
2316
2317 i->op = OP_RDSV;
2318 i->setSrc(0, bld.mkSysVal(SV_TID, index));
2319 i->setSrc(1, NULL);
2320 }
2321
2322 bool
2323 AlgebraicOpt::visit(BasicBlock *bb)
2324 {
2325 Instruction *next;
2326 for (Instruction *i = bb->getEntry(); i; i = next) {
2327 next = i->next;
2328 switch (i->op) {
2329 case OP_ABS:
2330 handleABS(i);
2331 break;
2332 case OP_ADD:
2333 handleADD(i);
2334 break;
2335 case OP_RCP:
2336 handleRCP(i);
2337 break;
2338 case OP_MIN:
2339 case OP_MAX:
2340 handleMINMAX(i);
2341 break;
2342 case OP_SLCT:
2343 handleSLCT(i);
2344 break;
2345 case OP_AND:
2346 case OP_OR:
2347 case OP_XOR:
2348 handleLOGOP(i);
2349 break;
2350 case OP_CVT:
2351 handleCVT_NEG(i);
2352 handleCVT_CVT(i);
2353 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2354 handleCVT_EXTBF(i);
2355 break;
2356 case OP_SUCLAMP:
2357 handleSUCLAMP(i);
2358 break;
2359 case OP_NEG:
2360 handleNEG(i);
2361 break;
2362 case OP_EXTBF:
2363 handleEXTBF_RDSV(i);
2364 break;
2365 default:
2366 break;
2367 }
2368 }
2369
2370 return true;
2371 }
2372
2373 // =============================================================================
2374
2375 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2376 // MUL(a, b) -> a few XMADs
2377 // MAD/FMA(a, b, c) -> a few XMADs
2378 class LateAlgebraicOpt : public Pass
2379 {
2380 private:
2381 virtual bool visit(Instruction *);
2382
2383 void handleADD(Instruction *);
2384 void handleMULMAD(Instruction *);
2385 bool tryADDToSHLADD(Instruction *);
2386
2387 BuildUtil bld;
2388 };
2389
2390 void
2391 LateAlgebraicOpt::handleADD(Instruction *add)
2392 {
2393 Value *src0 = add->getSrc(0);
2394 Value *src1 = add->getSrc(1);
2395
2396 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2397 return;
2398
2399 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2400 tryADDToSHLADD(add);
2401 }
2402
2403 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2404 bool
2405 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2406 {
2407 Value *src0 = add->getSrc(0);
2408 Value *src1 = add->getSrc(1);
2409 ImmediateValue imm;
2410 Instruction *shl;
2411 Value *src;
2412 int s;
2413
2414 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2415 || isFloatType(add->dType))
2416 return false;
2417
2418 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2419 s = 0;
2420 else
2421 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2422 s = 1;
2423 else
2424 return false;
2425
2426 src = add->getSrc(s);
2427 shl = src->getUniqueInsn();
2428
2429 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2430 return false;
2431
2432 if (!shl->src(1).getImmediate(imm))
2433 return false;
2434
2435 add->op = OP_SHLADD;
2436 add->setSrc(2, add->src(!s));
2437 // SHL can't have any modifiers, but the ADD source may have had
2438 // one. Preserve it.
2439 add->setSrc(0, shl->getSrc(0));
2440 if (s == 1)
2441 add->src(0).mod = add->src(1).mod;
2442 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2443 add->src(1).mod = Modifier(0);
2444
2445 return true;
2446 }
2447
2448 // MUL(a, b) -> a few XMADs
2449 // MAD/FMA(a, b, c) -> a few XMADs
2450 void
2451 LateAlgebraicOpt::handleMULMAD(Instruction *i)
2452 {
2453 // TODO: handle NV50_IR_SUBOP_MUL_HIGH
2454 if (!prog->getTarget()->isOpSupported(OP_XMAD, TYPE_U32))
2455 return;
2456 if (isFloatType(i->dType) || typeSizeof(i->dType) != 4)
2457 return;
2458 if (i->subOp || i->usesFlags() || i->flagsDef >= 0)
2459 return;
2460
2461 assert(!i->src(0).mod);
2462 assert(!i->src(1).mod);
2463 assert(i->op == OP_MUL ? 1 : !i->src(2).mod);
2464
2465 bld.setPosition(i, false);
2466
2467 Value *a = i->getSrc(0);
2468 Value *b = i->getSrc(1);
2469 Value *c = i->op == OP_MUL ? bld.mkImm(0) : i->getSrc(2);
2470
2471 Value *tmp0 = bld.getSSA();
2472 Value *tmp1 = bld.getSSA();
2473
2474 Instruction *insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp0, b, a, c);
2475 insn->setPredicate(i->cc, i->getPredicate());
2476
2477 insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp1, b, a, bld.mkImm(0));
2478 insn->setPredicate(i->cc, i->getPredicate());
2479 insn->subOp = NV50_IR_SUBOP_XMAD_MRG | NV50_IR_SUBOP_XMAD_H1(1);
2480
2481 Value *pred = i->getPredicate();
2482 i->setPredicate(i->cc, NULL);
2483
2484 i->op = OP_XMAD;
2485 i->setSrc(0, b);
2486 i->setSrc(1, tmp1);
2487 i->setSrc(2, tmp0);
2488 i->subOp = NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_CBCC;
2489 i->subOp |= NV50_IR_SUBOP_XMAD_H1(0) | NV50_IR_SUBOP_XMAD_H1(1);
2490
2491 i->setPredicate(i->cc, pred);
2492 }
2493
2494 bool
2495 LateAlgebraicOpt::visit(Instruction *i)
2496 {
2497 switch (i->op) {
2498 case OP_ADD:
2499 handleADD(i);
2500 break;
2501 case OP_MUL:
2502 case OP_MAD:
2503 case OP_FMA:
2504 handleMULMAD(i);
2505 break;
2506 default:
2507 break;
2508 }
2509
2510 return true;
2511 }
2512
2513 // =============================================================================
2514
2515 // Split 64-bit MUL and MAD
2516 class Split64BitOpPreRA : public Pass
2517 {
2518 private:
2519 virtual bool visit(BasicBlock *);
2520 void split64MulMad(Function *, Instruction *, DataType);
2521
2522 BuildUtil bld;
2523 };
2524
2525 bool
2526 Split64BitOpPreRA::visit(BasicBlock *bb)
2527 {
2528 Instruction *i, *next;
2529 Modifier mod;
2530
2531 for (i = bb->getEntry(); i; i = next) {
2532 next = i->next;
2533
2534 DataType hTy;
2535 switch (i->dType) {
2536 case TYPE_U64: hTy = TYPE_U32; break;
2537 case TYPE_S64: hTy = TYPE_S32; break;
2538 default:
2539 continue;
2540 }
2541
2542 if (i->op == OP_MAD || i->op == OP_MUL)
2543 split64MulMad(func, i, hTy);
2544 }
2545
2546 return true;
2547 }
2548
2549 void
2550 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2551 {
2552 assert(i->op == OP_MAD || i->op == OP_MUL);
2553 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2554 assert(typeSizeof(hTy) == 4);
2555
2556 bld.setPosition(i, true);
2557
2558 Value *zero = bld.mkImm(0u);
2559 Value *carry = bld.getSSA(1, FILE_FLAGS);
2560
2561 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2562 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2563 // gives the following operations:
2564 // * `d.low = low(a.low * b.low) (+ c.low)?`
2565 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2566 // + high(a.low * b.low) (+ c.high)?`
2567 //
2568 // To compute the high bits, we can split in the following operations:
2569 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2570 // * `tmp2 = low(a.low * b.high) + tmp1`
2571 // * `d.high = high(a.low * b.low) + tmp2`
2572 //
2573 // mkSplit put lower bits at index 0 and higher bits at index 1
2574
2575 Value *op1[2];
2576 if (i->getSrc(0)->reg.size == 8)
2577 bld.mkSplit(op1, 4, i->getSrc(0));
2578 else {
2579 op1[0] = i->getSrc(0);
2580 op1[1] = zero;
2581 }
2582 Value *op2[2];
2583 if (i->getSrc(1)->reg.size == 8)
2584 bld.mkSplit(op2, 4, i->getSrc(1));
2585 else {
2586 op2[0] = i->getSrc(1);
2587 op2[1] = zero;
2588 }
2589
2590 Value *op3[2] = { NULL, NULL };
2591 if (i->op == OP_MAD) {
2592 if (i->getSrc(2)->reg.size == 8)
2593 bld.mkSplit(op3, 4, i->getSrc(2));
2594 else {
2595 op3[0] = i->getSrc(2);
2596 op3[1] = zero;
2597 }
2598 }
2599
2600 Value *tmpRes1Hi = bld.getSSA();
2601 if (i->op == OP_MAD)
2602 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2603 else
2604 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2605
2606 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2607
2608 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2609
2610 // If it was a MAD, add the carry from the low bits
2611 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2612 // d.high
2613 if (i->op == OP_MAD)
2614 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2615 else
2616 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2617
2618 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2619 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2620 if (i->op == OP_MAD)
2621 hiPart3->setFlagsSrc(3, carry);
2622
2623 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2624
2625 delete_Instruction(fn->getProgram(), i);
2626 }
2627
2628 // =============================================================================
2629
2630 static inline void
2631 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2632 {
2633 if (offset != ldst->getSrc(0)->reg.data.offset) {
2634 if (ldst->getSrc(0)->refCount() > 1)
2635 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2636 ldst->getSrc(0)->reg.data.offset = offset;
2637 }
2638 }
2639
2640 // Combine loads and stores, forward stores to loads where possible.
2641 class MemoryOpt : public Pass
2642 {
2643 private:
2644 class Record
2645 {
2646 public:
2647 Record *next;
2648 Instruction *insn;
2649 const Value *rel[2];
2650 const Value *base;
2651 int32_t offset;
2652 int8_t fileIndex;
2653 uint8_t size;
2654 bool locked;
2655 Record *prev;
2656
2657 bool overlaps(const Instruction *ldst) const;
2658
2659 inline void link(Record **);
2660 inline void unlink(Record **);
2661 inline void set(const Instruction *ldst);
2662 };
2663
2664 public:
2665 MemoryOpt();
2666
2667 Record *loads[DATA_FILE_COUNT];
2668 Record *stores[DATA_FILE_COUNT];
2669
2670 MemoryPool recordPool;
2671
2672 private:
2673 virtual bool visit(BasicBlock *);
2674 bool runOpt(BasicBlock *);
2675
2676 Record **getList(const Instruction *);
2677
2678 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2679
2680 // merge @insn into load/store instruction from @rec
2681 bool combineLd(Record *rec, Instruction *ld);
2682 bool combineSt(Record *rec, Instruction *st);
2683
2684 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2685 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2686 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2687
2688 void addRecord(Instruction *ldst);
2689 void purgeRecords(Instruction *const st, DataFile);
2690 void lockStores(Instruction *const ld);
2691 void reset();
2692
2693 private:
2694 Record *prevRecord;
2695 };
2696
2697 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2698 {
2699 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2700 loads[i] = NULL;
2701 stores[i] = NULL;
2702 }
2703 prevRecord = NULL;
2704 }
2705
2706 void
2707 MemoryOpt::reset()
2708 {
2709 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2710 Record *it, *next;
2711 for (it = loads[i]; it; it = next) {
2712 next = it->next;
2713 recordPool.release(it);
2714 }
2715 loads[i] = NULL;
2716 for (it = stores[i]; it; it = next) {
2717 next = it->next;
2718 recordPool.release(it);
2719 }
2720 stores[i] = NULL;
2721 }
2722 }
2723
2724 bool
2725 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2726 {
2727 int32_t offRc = rec->offset;
2728 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2729 int sizeRc = rec->size;
2730 int sizeLd = typeSizeof(ld->dType);
2731 int size = sizeRc + sizeLd;
2732 int d, j;
2733
2734 if (!prog->getTarget()->
2735 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2736 return false;
2737 // no unaligned loads
2738 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2739 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2740 return false;
2741 // for compute indirect loads are not guaranteed to be aligned
2742 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2743 return false;
2744
2745 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2746
2747 // lock any stores that overlap with the load being merged into the
2748 // existing record.
2749 lockStores(ld);
2750
2751 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2752
2753 if (offLd < offRc) {
2754 int sz;
2755 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2756 // d: nr of definitions in ld
2757 // j: nr of definitions in rec->insn, move:
2758 for (d = d + j - 1; j > 0; --j, --d)
2759 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2760
2761 if (rec->insn->getSrc(0)->refCount() > 1)
2762 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2763 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2764
2765 d = 0;
2766 } else {
2767 d = j;
2768 }
2769 // move definitions of @ld to @rec->insn
2770 for (j = 0; sizeLd; ++j, ++d) {
2771 sizeLd -= ld->getDef(j)->reg.size;
2772 rec->insn->setDef(d, ld->getDef(j));
2773 }
2774
2775 rec->size = size;
2776 rec->insn->getSrc(0)->reg.size = size;
2777 rec->insn->setType(typeOfSize(size));
2778
2779 delete_Instruction(prog, ld);
2780
2781 return true;
2782 }
2783
2784 bool
2785 MemoryOpt::combineSt(Record *rec, Instruction *st)
2786 {
2787 int32_t offRc = rec->offset;
2788 int32_t offSt = st->getSrc(0)->reg.data.offset;
2789 int sizeRc = rec->size;
2790 int sizeSt = typeSizeof(st->dType);
2791 int s = sizeSt / 4;
2792 int size = sizeRc + sizeSt;
2793 int j, k;
2794 Value *src[4]; // no modifiers in ValueRef allowed for st
2795 Value *extra[3];
2796
2797 if (!prog->getTarget()->
2798 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2799 return false;
2800 // no unaligned stores
2801 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2802 return false;
2803 // for compute indirect stores are not guaranteed to be aligned
2804 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2805 return false;
2806
2807 // remove any existing load/store records for the store being merged into
2808 // the existing record.
2809 purgeRecords(st, DATA_FILE_COUNT);
2810
2811 st->takeExtraSources(0, extra); // save predicate and indirect address
2812
2813 if (offRc < offSt) {
2814 // save values from @st
2815 for (s = 0; sizeSt; ++s) {
2816 sizeSt -= st->getSrc(s + 1)->reg.size;
2817 src[s] = st->getSrc(s + 1);
2818 }
2819 // set record's values as low sources of @st
2820 for (j = 1; sizeRc; ++j) {
2821 sizeRc -= rec->insn->getSrc(j)->reg.size;
2822 st->setSrc(j, rec->insn->getSrc(j));
2823 }
2824 // set saved values as high sources of @st
2825 for (k = j, j = 0; j < s; ++j)
2826 st->setSrc(k++, src[j]);
2827
2828 updateLdStOffset(st, offRc, func);
2829 } else {
2830 for (j = 1; sizeSt; ++j)
2831 sizeSt -= st->getSrc(j)->reg.size;
2832 for (s = 1; sizeRc; ++j, ++s) {
2833 sizeRc -= rec->insn->getSrc(s)->reg.size;
2834 st->setSrc(j, rec->insn->getSrc(s));
2835 }
2836 rec->offset = offSt;
2837 }
2838 st->putExtraSources(0, extra); // restore pointer and predicate
2839
2840 delete_Instruction(prog, rec->insn);
2841 rec->insn = st;
2842 rec->size = size;
2843 rec->insn->getSrc(0)->reg.size = size;
2844 rec->insn->setType(typeOfSize(size));
2845 return true;
2846 }
2847
2848 void
2849 MemoryOpt::Record::set(const Instruction *ldst)
2850 {
2851 const Symbol *mem = ldst->getSrc(0)->asSym();
2852 fileIndex = mem->reg.fileIndex;
2853 rel[0] = ldst->getIndirect(0, 0);
2854 rel[1] = ldst->getIndirect(0, 1);
2855 offset = mem->reg.data.offset;
2856 base = mem->getBase();
2857 size = typeSizeof(ldst->sType);
2858 }
2859
2860 void
2861 MemoryOpt::Record::link(Record **list)
2862 {
2863 next = *list;
2864 if (next)
2865 next->prev = this;
2866 prev = NULL;
2867 *list = this;
2868 }
2869
2870 void
2871 MemoryOpt::Record::unlink(Record **list)
2872 {
2873 if (next)
2874 next->prev = prev;
2875 if (prev)
2876 prev->next = next;
2877 else
2878 *list = next;
2879 }
2880
2881 MemoryOpt::Record **
2882 MemoryOpt::getList(const Instruction *insn)
2883 {
2884 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2885 return &loads[insn->src(0).getFile()];
2886 return &stores[insn->src(0).getFile()];
2887 }
2888
2889 void
2890 MemoryOpt::addRecord(Instruction *i)
2891 {
2892 Record **list = getList(i);
2893 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2894
2895 it->link(list);
2896 it->set(i);
2897 it->insn = i;
2898 it->locked = false;
2899 }
2900
2901 MemoryOpt::Record *
2902 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2903 {
2904 const Symbol *sym = insn->getSrc(0)->asSym();
2905 const int size = typeSizeof(insn->sType);
2906 Record *rec = NULL;
2907 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2908
2909 for (; it; it = it->next) {
2910 if (it->locked && insn->op != OP_LOAD && insn->op != OP_VFETCH)
2911 continue;
2912 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2913 it->rel[0] != insn->getIndirect(0, 0) ||
2914 it->fileIndex != sym->reg.fileIndex ||
2915 it->rel[1] != insn->getIndirect(0, 1))
2916 continue;
2917
2918 if (it->offset < sym->reg.data.offset) {
2919 if (it->offset + it->size >= sym->reg.data.offset) {
2920 isAdj = (it->offset + it->size == sym->reg.data.offset);
2921 if (!isAdj)
2922 return it;
2923 if (!(it->offset & 0x7))
2924 rec = it;
2925 }
2926 } else {
2927 isAdj = it->offset != sym->reg.data.offset;
2928 if (size <= it->size && !isAdj)
2929 return it;
2930 else
2931 if (!(sym->reg.data.offset & 0x7))
2932 if (it->offset - size <= sym->reg.data.offset)
2933 rec = it;
2934 }
2935 }
2936 return rec;
2937 }
2938
2939 bool
2940 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2941 {
2942 Instruction *st = rec->insn;
2943 int32_t offSt = rec->offset;
2944 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2945 int d, s;
2946
2947 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2948 offSt += st->getSrc(s)->reg.size;
2949 if (offSt != offLd)
2950 return false;
2951
2952 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2953 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2954 return false;
2955 if (st->getSrc(s)->reg.file != FILE_GPR)
2956 return false;
2957 ld->def(d).replace(st->src(s), false);
2958 }
2959 ld->bb->remove(ld);
2960 return true;
2961 }
2962
2963 bool
2964 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2965 {
2966 Instruction *ldR = rec->insn;
2967 int32_t offR = rec->offset;
2968 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2969 int dR, dE;
2970
2971 assert(offR <= offE);
2972 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2973 offR += ldR->getDef(dR)->reg.size;
2974 if (offR != offE)
2975 return false;
2976
2977 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2978 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2979 return false;
2980 ldE->def(dE).replace(ldR->getDef(dR), false);
2981 }
2982
2983 delete_Instruction(prog, ldE);
2984 return true;
2985 }
2986
2987 bool
2988 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2989 {
2990 const Instruction *const ri = rec->insn;
2991 Value *extra[3];
2992
2993 int32_t offS = st->getSrc(0)->reg.data.offset;
2994 int32_t offR = rec->offset;
2995 int32_t endS = offS + typeSizeof(st->dType);
2996 int32_t endR = offR + typeSizeof(ri->dType);
2997
2998 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2999
3000 st->takeExtraSources(0, extra);
3001
3002 if (offR < offS) {
3003 Value *vals[10];
3004 int s, n;
3005 int k = 0;
3006 // get non-replaced sources of ri
3007 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
3008 vals[k++] = ri->getSrc(s);
3009 n = s;
3010 // get replaced sources of st
3011 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
3012 vals[k++] = st->getSrc(s);
3013 // skip replaced sources of ri
3014 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
3015 // get non-replaced sources after values covered by st
3016 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
3017 vals[k++] = ri->getSrc(s);
3018 assert((unsigned int)k <= ARRAY_SIZE(vals));
3019 for (s = 0; s < k; ++s)
3020 st->setSrc(s + 1, vals[s]);
3021 st->setSrc(0, ri->getSrc(0));
3022 } else
3023 if (endR > endS) {
3024 int j, s;
3025 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
3026 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
3027 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
3028 st->setSrc(s++, ri->getSrc(j));
3029 }
3030 st->putExtraSources(0, extra);
3031
3032 delete_Instruction(prog, rec->insn);
3033
3034 rec->insn = st;
3035 rec->offset = st->getSrc(0)->reg.data.offset;
3036
3037 st->setType(typeOfSize(rec->size));
3038
3039 return true;
3040 }
3041
3042 bool
3043 MemoryOpt::Record::overlaps(const Instruction *ldst) const
3044 {
3045 Record that;
3046 that.set(ldst);
3047
3048 // This assumes that images/buffers can't overlap. They can.
3049 // TODO: Plumb the restrict logic through, and only skip when it's a
3050 // restrict situation, or there can implicitly be no writes.
3051 if (this->fileIndex != that.fileIndex && this->rel[1] == that.rel[1])
3052 return false;
3053
3054 if (this->rel[0] || that.rel[0])
3055 return this->base == that.base;
3056
3057 return
3058 (this->offset < that.offset + that.size) &&
3059 (this->offset + this->size > that.offset);
3060 }
3061
3062 // We must not eliminate stores that affect the result of @ld if
3063 // we find later stores to the same location, and we may no longer
3064 // merge them with later stores.
3065 // The stored value can, however, still be used to determine the value
3066 // returned by future loads.
3067 void
3068 MemoryOpt::lockStores(Instruction *const ld)
3069 {
3070 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
3071 if (!r->locked && r->overlaps(ld))
3072 r->locked = true;
3073 }
3074
3075 // Prior loads from the location of @st are no longer valid.
3076 // Stores to the location of @st may no longer be used to derive
3077 // the value at it nor be coalesced into later stores.
3078 void
3079 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
3080 {
3081 if (st)
3082 f = st->src(0).getFile();
3083
3084 for (Record *r = loads[f]; r; r = r->next)
3085 if (!st || r->overlaps(st))
3086 r->unlink(&loads[f]);
3087
3088 for (Record *r = stores[f]; r; r = r->next)
3089 if (!st || r->overlaps(st))
3090 r->unlink(&stores[f]);
3091 }
3092
3093 bool
3094 MemoryOpt::visit(BasicBlock *bb)
3095 {
3096 bool ret = runOpt(bb);
3097 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
3098 // where 96 bit memory operations are forbidden.
3099 if (ret)
3100 ret = runOpt(bb);
3101 return ret;
3102 }
3103
3104 bool
3105 MemoryOpt::runOpt(BasicBlock *bb)
3106 {
3107 Instruction *ldst, *next;
3108 Record *rec;
3109 bool isAdjacent = true;
3110
3111 for (ldst = bb->getEntry(); ldst; ldst = next) {
3112 bool keep = true;
3113 bool isLoad = true;
3114 next = ldst->next;
3115
3116 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
3117 if (ldst->isDead()) {
3118 // might have been produced by earlier optimization
3119 delete_Instruction(prog, ldst);
3120 continue;
3121 }
3122 } else
3123 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
3124 if (typeSizeof(ldst->dType) == 4 &&
3125 ldst->src(1).getFile() == FILE_GPR &&
3126 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
3127 delete_Instruction(prog, ldst);
3128 continue;
3129 }
3130 isLoad = false;
3131 } else {
3132 // TODO: maybe have all fixed ops act as barrier ?
3133 if (ldst->op == OP_CALL ||
3134 ldst->op == OP_BAR ||
3135 ldst->op == OP_MEMBAR) {
3136 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3137 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3138 purgeRecords(NULL, FILE_MEMORY_SHARED);
3139 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3140 } else
3141 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
3142 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
3143 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3144 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3145 purgeRecords(NULL, FILE_MEMORY_SHARED);
3146 } else {
3147 purgeRecords(NULL, ldst->src(0).getFile());
3148 }
3149 } else
3150 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
3151 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3152 }
3153 continue;
3154 }
3155 if (ldst->getPredicate()) // TODO: handle predicated ld/st
3156 continue;
3157 if (ldst->perPatch) // TODO: create separate per-patch lists
3158 continue;
3159
3160 if (isLoad) {
3161 DataFile file = ldst->src(0).getFile();
3162
3163 // if ld l[]/g[] look for previous store to eliminate the reload
3164 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
3165 // TODO: shared memory ?
3166 rec = findRecord(ldst, false, isAdjacent);
3167 if (rec && !isAdjacent)
3168 keep = !replaceLdFromSt(ldst, rec);
3169 }
3170
3171 // or look for ld from the same location and replace this one
3172 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
3173 if (rec) {
3174 if (!isAdjacent)
3175 keep = !replaceLdFromLd(ldst, rec);
3176 else
3177 // or combine a previous load with this one
3178 keep = !combineLd(rec, ldst);
3179 }
3180 if (keep)
3181 lockStores(ldst);
3182 } else {
3183 rec = findRecord(ldst, false, isAdjacent);
3184 if (rec) {
3185 if (!isAdjacent)
3186 keep = !replaceStFromSt(ldst, rec);
3187 else
3188 keep = !combineSt(rec, ldst);
3189 }
3190 if (keep)
3191 purgeRecords(ldst, DATA_FILE_COUNT);
3192 }
3193 if (keep)
3194 addRecord(ldst);
3195 }
3196 reset();
3197
3198 return true;
3199 }
3200
3201 // =============================================================================
3202
3203 // Turn control flow into predicated instructions (after register allocation !).
3204 // TODO:
3205 // Could move this to before register allocation on NVC0 and also handle nested
3206 // constructs.
3207 class FlatteningPass : public Pass
3208 {
3209 private:
3210 virtual bool visit(Function *);
3211 virtual bool visit(BasicBlock *);
3212
3213 bool tryPredicateConditional(BasicBlock *);
3214 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
3215 void tryPropagateBranch(BasicBlock *);
3216 inline bool isConstantCondition(Value *pred);
3217 inline bool mayPredicate(const Instruction *, const Value *pred) const;
3218 inline void removeFlow(Instruction *);
3219
3220 uint8_t gpr_unit;
3221 };
3222
3223 bool
3224 FlatteningPass::isConstantCondition(Value *pred)
3225 {
3226 Instruction *insn = pred->getUniqueInsn();
3227 assert(insn);
3228 if (insn->op != OP_SET || insn->srcExists(2))
3229 return false;
3230
3231 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
3232 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
3233 DataFile file;
3234 if (ld) {
3235 if (ld->op != OP_MOV && ld->op != OP_LOAD)
3236 return false;
3237 if (ld->src(0).isIndirect(0))
3238 return false;
3239 file = ld->src(0).getFile();
3240 } else {
3241 file = insn->src(s).getFile();
3242 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
3243 // in register "units", which can vary between targets.
3244 if (file == FILE_GPR) {
3245 Value *v = insn->getSrc(s);
3246 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
3247 int units = bytes >> gpr_unit;
3248 if (units > prog->maxGPR)
3249 file = FILE_IMMEDIATE;
3250 }
3251 }
3252 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
3253 return false;
3254 }
3255 return true;
3256 }
3257
3258 void
3259 FlatteningPass::removeFlow(Instruction *insn)
3260 {
3261 FlowInstruction *term = insn ? insn->asFlow() : NULL;
3262 if (!term)
3263 return;
3264 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
3265
3266 if (term->op == OP_BRA) {
3267 // TODO: this might get more difficult when we get arbitrary BRAs
3268 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
3269 return;
3270 } else
3271 if (term->op != OP_JOIN)
3272 return;
3273
3274 Value *pred = term->getPredicate();
3275
3276 delete_Instruction(prog, term);
3277
3278 if (pred && pred->refCount() == 0) {
3279 Instruction *pSet = pred->getUniqueInsn();
3280 pred->join->reg.data.id = -1; // deallocate
3281 if (pSet->isDead())
3282 delete_Instruction(prog, pSet);
3283 }
3284 }
3285
3286 void
3287 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3288 {
3289 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3290 if (i->isNop())
3291 continue;
3292 assert(!i->getPredicate());
3293 i->setPredicate(cc, pred);
3294 }
3295 removeFlow(bb->getExit());
3296 }
3297
3298 bool
3299 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3300 {
3301 if (insn->isPseudo())
3302 return true;
3303 // TODO: calls where we don't know which registers are modified
3304
3305 if (!prog->getTarget()->mayPredicate(insn, pred))
3306 return false;
3307 for (int d = 0; insn->defExists(d); ++d)
3308 if (insn->getDef(d)->equals(pred))
3309 return false;
3310 return true;
3311 }
3312
3313 // If we jump to BRA/RET/EXIT, replace the jump with it.
3314 // NOTE: We do not update the CFG anymore here !
3315 //
3316 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3317 // BB:0
3318 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3319 // BB1:
3320 // bra BB:3
3321 // BB2:
3322 // ...
3323 // BB3:
3324 // ...
3325 void
3326 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3327 {
3328 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3329 BasicBlock *bf = i->asFlow()->target.bb;
3330
3331 if (bf->getInsnCount() != 1)
3332 continue;
3333
3334 FlowInstruction *bra = i->asFlow();
3335 FlowInstruction *rep = bf->getExit()->asFlow();
3336
3337 if (!rep || rep->getPredicate())
3338 continue;
3339 if (rep->op != OP_BRA &&
3340 rep->op != OP_JOIN &&
3341 rep->op != OP_EXIT)
3342 continue;
3343
3344 // TODO: If there are multiple branches to @rep, only the first would
3345 // be replaced, so only remove them after this pass is done ?
3346 // Also, need to check all incident blocks for fall-through exits and
3347 // add the branch there.
3348 bra->op = rep->op;
3349 bra->target.bb = rep->target.bb;
3350 if (bf->cfg.incidentCount() == 1)
3351 bf->remove(rep);
3352 }
3353 }
3354
3355 bool
3356 FlatteningPass::visit(Function *fn)
3357 {
3358 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3359
3360 return true;
3361 }
3362
3363 bool
3364 FlatteningPass::visit(BasicBlock *bb)
3365 {
3366 if (tryPredicateConditional(bb))
3367 return true;
3368
3369 // try to attach join to previous instruction
3370 if (prog->getTarget()->hasJoin) {
3371 Instruction *insn = bb->getExit();
3372 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3373 insn = insn->prev;
3374 if (insn && !insn->getPredicate() &&
3375 !insn->asFlow() &&
3376 insn->op != OP_DISCARD &&
3377 insn->op != OP_TEXBAR &&
3378 !isTextureOp(insn->op) && // probably just nve4
3379 !isSurfaceOp(insn->op) && // not confirmed
3380 insn->op != OP_LINTERP && // probably just nve4
3381 insn->op != OP_PINTERP && // probably just nve4
3382 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3383 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3384 !insn->isNop()) {
3385 insn->join = 1;
3386 bb->remove(bb->getExit());
3387 return true;
3388 }
3389 }
3390 }
3391
3392 tryPropagateBranch(bb);
3393
3394 return true;
3395 }
3396
3397 bool
3398 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3399 {
3400 BasicBlock *bL = NULL, *bR = NULL;
3401 unsigned int nL = 0, nR = 0, limit = 12;
3402 Instruction *insn;
3403 unsigned int mask;
3404
3405 mask = bb->initiatesSimpleConditional();
3406 if (!mask)
3407 return false;
3408
3409 assert(bb->getExit());
3410 Value *pred = bb->getExit()->getPredicate();
3411 assert(pred);
3412
3413 if (isConstantCondition(pred))
3414 limit = 4;
3415
3416 Graph::EdgeIterator ei = bb->cfg.outgoing();
3417
3418 if (mask & 1) {
3419 bL = BasicBlock::get(ei.getNode());
3420 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3421 if (!mayPredicate(insn, pred))
3422 return false;
3423 if (nL > limit)
3424 return false; // too long, do a real branch
3425 }
3426 ei.next();
3427
3428 if (mask & 2) {
3429 bR = BasicBlock::get(ei.getNode());
3430 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3431 if (!mayPredicate(insn, pred))
3432 return false;
3433 if (nR > limit)
3434 return false; // too long, do a real branch
3435 }
3436
3437 if (bL)
3438 predicateInstructions(bL, pred, bb->getExit()->cc);
3439 if (bR)
3440 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3441
3442 if (bb->joinAt) {
3443 bb->remove(bb->joinAt);
3444 bb->joinAt = NULL;
3445 }
3446 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3447
3448 // remove potential join operations at the end of the conditional
3449 if (prog->getTarget()->joinAnterior) {
3450 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3451 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3452 removeFlow(bb->getEntry());
3453 }
3454
3455 return true;
3456 }
3457
3458 // =============================================================================
3459
3460 // Fold Immediate into MAD; must be done after register allocation due to
3461 // constraint SDST == SSRC2
3462 // TODO:
3463 // Does NVC0+ have other situations where this pass makes sense?
3464 class PostRaLoadPropagation : public Pass
3465 {
3466 private:
3467 virtual bool visit(Instruction *);
3468
3469 void handleMADforNV50(Instruction *);
3470 void handleMADforNVC0(Instruction *);
3471 };
3472
3473 static bool
3474 post_ra_dead(Instruction *i)
3475 {
3476 for (int d = 0; i->defExists(d); ++d)
3477 if (i->getDef(d)->refCount())
3478 return false;
3479 return true;
3480 }
3481
3482 // Fold Immediate into MAD; must be done after register allocation due to
3483 // constraint SDST == SSRC2
3484 void
3485 PostRaLoadPropagation::handleMADforNV50(Instruction *i)
3486 {
3487 if (i->def(0).getFile() != FILE_GPR ||
3488 i->src(0).getFile() != FILE_GPR ||
3489 i->src(1).getFile() != FILE_GPR ||
3490 i->src(2).getFile() != FILE_GPR ||
3491 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3492 return;
3493
3494 if (i->getDef(0)->reg.data.id >= 64 ||
3495 i->getSrc(0)->reg.data.id >= 64)
3496 return;
3497
3498 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3499 return;
3500
3501 if (i->getPredicate())
3502 return;
3503
3504 Value *vtmp;
3505 Instruction *def = i->getSrc(1)->getInsn();
3506
3507 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3508 def = def->getSrc(0)->getInsn();
3509 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3510 vtmp = i->getSrc(1);
3511 if (isFloatType(i->sType)) {
3512 i->setSrc(1, def->getSrc(0));
3513 } else {
3514 ImmediateValue val;
3515 // getImmediate() has side-effects on the argument so this *shouldn't*
3516 // be folded into the assert()
3517 MAYBE_UNUSED bool ret = def->src(0).getImmediate(val);
3518 assert(ret);
3519 if (i->getSrc(1)->reg.data.id & 1)
3520 val.reg.data.u32 >>= 16;
3521 val.reg.data.u32 &= 0xffff;
3522 i->setSrc(1, new_ImmediateValue(prog, val.reg.data.u32));
3523 }
3524
3525 /* There's no post-RA dead code elimination, so do it here
3526 * XXX: if we add more code-removing post-RA passes, we might
3527 * want to create a post-RA dead-code elim pass */
3528 if (post_ra_dead(vtmp->getInsn())) {
3529 Value *src = vtmp->getInsn()->getSrc(0);
3530 // Careful -- splits will have already been removed from the
3531 // functions. Don't double-delete.
3532 if (vtmp->getInsn()->bb)
3533 delete_Instruction(prog, vtmp->getInsn());
3534 if (src->getInsn() && post_ra_dead(src->getInsn()))
3535 delete_Instruction(prog, src->getInsn());
3536 }
3537 }
3538 }
3539
3540 void
3541 PostRaLoadPropagation::handleMADforNVC0(Instruction *i)
3542 {
3543 if (i->def(0).getFile() != FILE_GPR ||
3544 i->src(0).getFile() != FILE_GPR ||
3545 i->src(1).getFile() != FILE_GPR ||
3546 i->src(2).getFile() != FILE_GPR ||
3547 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3548 return;
3549
3550 // TODO: gm107 can also do this for S32, maybe other chipsets as well
3551 if (i->dType != TYPE_F32)
3552 return;
3553
3554 if ((i->src(2).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3555 return;
3556
3557 ImmediateValue val;
3558 int s;
3559
3560 if (i->src(0).getImmediate(val))
3561 s = 1;
3562 else if (i->src(1).getImmediate(val))
3563 s = 0;
3564 else
3565 return;
3566
3567 if ((i->src(s).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3568 return;
3569
3570 if (s == 1)
3571 i->swapSources(0, 1);
3572
3573 Instruction *imm = i->getSrc(1)->getInsn();
3574 i->setSrc(1, imm->getSrc(0));
3575 if (post_ra_dead(imm))
3576 delete_Instruction(prog, imm);
3577 }
3578
3579 bool
3580 PostRaLoadPropagation::visit(Instruction *i)
3581 {
3582 switch (i->op) {
3583 case OP_FMA:
3584 case OP_MAD:
3585 if (prog->getTarget()->getChipset() < 0xc0)
3586 handleMADforNV50(i);
3587 else
3588 handleMADforNVC0(i);
3589 break;
3590 default:
3591 break;
3592 }
3593
3594 return true;
3595 }
3596
3597 // =============================================================================
3598
3599 // Common subexpression elimination. Stupid O^2 implementation.
3600 class LocalCSE : public Pass
3601 {
3602 private:
3603 virtual bool visit(BasicBlock *);
3604
3605 inline bool tryReplace(Instruction **, Instruction *);
3606
3607 DLList ops[OP_LAST + 1];
3608 };
3609
3610 class GlobalCSE : public Pass
3611 {
3612 private:
3613 virtual bool visit(BasicBlock *);
3614 };
3615
3616 bool
3617 Instruction::isActionEqual(const Instruction *that) const
3618 {
3619 if (this->op != that->op ||
3620 this->dType != that->dType ||
3621 this->sType != that->sType)
3622 return false;
3623 if (this->cc != that->cc)
3624 return false;
3625
3626 if (this->asTex()) {
3627 if (memcmp(&this->asTex()->tex,
3628 &that->asTex()->tex,
3629 sizeof(this->asTex()->tex)))
3630 return false;
3631 } else
3632 if (this->asCmp()) {
3633 if (this->asCmp()->setCond != that->asCmp()->setCond)
3634 return false;
3635 } else
3636 if (this->asFlow()) {
3637 return false;
3638 } else
3639 if (this->op == OP_PHI && this->bb != that->bb) {
3640 /* TODO: we could probably be a bit smarter here by following the
3641 * control flow, but honestly, it is quite painful to check */
3642 return false;
3643 } else {
3644 if (this->ipa != that->ipa ||
3645 this->lanes != that->lanes ||
3646 this->perPatch != that->perPatch)
3647 return false;
3648 if (this->postFactor != that->postFactor)
3649 return false;
3650 }
3651
3652 if (this->subOp != that->subOp ||
3653 this->saturate != that->saturate ||
3654 this->rnd != that->rnd ||
3655 this->ftz != that->ftz ||
3656 this->dnz != that->dnz ||
3657 this->cache != that->cache ||
3658 this->mask != that->mask)
3659 return false;
3660
3661 return true;
3662 }
3663
3664 bool
3665 Instruction::isResultEqual(const Instruction *that) const
3666 {
3667 unsigned int d, s;
3668
3669 // NOTE: location of discard only affects tex with liveOnly and quadops
3670 if (!this->defExists(0) && this->op != OP_DISCARD)
3671 return false;
3672
3673 if (!isActionEqual(that))
3674 return false;
3675
3676 if (this->predSrc != that->predSrc)
3677 return false;
3678
3679 for (d = 0; this->defExists(d); ++d) {
3680 if (!that->defExists(d) ||
3681 !this->getDef(d)->equals(that->getDef(d), false))
3682 return false;
3683 }
3684 if (that->defExists(d))
3685 return false;
3686
3687 for (s = 0; this->srcExists(s); ++s) {
3688 if (!that->srcExists(s))
3689 return false;
3690 if (this->src(s).mod != that->src(s).mod)
3691 return false;
3692 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3693 return false;
3694 }
3695 if (that->srcExists(s))
3696 return false;
3697
3698 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3699 switch (src(0).getFile()) {
3700 case FILE_MEMORY_CONST:
3701 case FILE_SHADER_INPUT:
3702 return true;
3703 case FILE_SHADER_OUTPUT:
3704 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3705 default:
3706 return false;
3707 }
3708 }
3709
3710 return true;
3711 }
3712
3713 // pull through common expressions from different in-blocks
3714 bool
3715 GlobalCSE::visit(BasicBlock *bb)
3716 {
3717 Instruction *phi, *next, *ik;
3718 int s;
3719
3720 // TODO: maybe do this with OP_UNION, too
3721
3722 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3723 next = phi->next;
3724 if (phi->getSrc(0)->refCount() > 1)
3725 continue;
3726 ik = phi->getSrc(0)->getInsn();
3727 if (!ik)
3728 continue; // probably a function input
3729 if (ik->defCount(0xff) > 1)
3730 continue; // too painful to check if we can really push this forward
3731 for (s = 1; phi->srcExists(s); ++s) {
3732 if (phi->getSrc(s)->refCount() > 1)
3733 break;
3734 if (!phi->getSrc(s)->getInsn() ||
3735 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3736 break;
3737 }
3738 if (!phi->srcExists(s)) {
3739 assert(ik->op != OP_PHI);
3740 Instruction *entry = bb->getEntry();
3741 ik->bb->remove(ik);
3742 if (!entry || entry->op != OP_JOIN)
3743 bb->insertHead(ik);
3744 else
3745 bb->insertAfter(entry, ik);
3746 ik->setDef(0, phi->getDef(0));
3747 delete_Instruction(prog, phi);
3748 }
3749 }
3750
3751 return true;
3752 }
3753
3754 bool
3755 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3756 {
3757 Instruction *old = *ptr;
3758
3759 // TODO: maybe relax this later (causes trouble with OP_UNION)
3760 if (i->isPredicated())
3761 return false;
3762
3763 if (!old->isResultEqual(i))
3764 return false;
3765
3766 for (int d = 0; old->defExists(d); ++d)
3767 old->def(d).replace(i->getDef(d), false);
3768 delete_Instruction(prog, old);
3769 *ptr = NULL;
3770 return true;
3771 }
3772
3773 bool
3774 LocalCSE::visit(BasicBlock *bb)
3775 {
3776 unsigned int replaced;
3777
3778 do {
3779 Instruction *ir, *next;
3780
3781 replaced = 0;
3782
3783 // will need to know the order of instructions
3784 int serial = 0;
3785 for (ir = bb->getFirst(); ir; ir = ir->next)
3786 ir->serial = serial++;
3787
3788 for (ir = bb->getFirst(); ir; ir = next) {
3789 int s;
3790 Value *src = NULL;
3791
3792 next = ir->next;
3793
3794 if (ir->fixed) {
3795 ops[ir->op].insert(ir);
3796 continue;
3797 }
3798
3799 for (s = 0; ir->srcExists(s); ++s)
3800 if (ir->getSrc(s)->asLValue())
3801 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3802 src = ir->getSrc(s);
3803
3804 if (src) {
3805 for (Value::UseIterator it = src->uses.begin();
3806 it != src->uses.end(); ++it) {
3807 Instruction *ik = (*it)->getInsn();
3808 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3809 if (tryReplace(&ir, ik))
3810 break;
3811 }
3812 } else {
3813 DLLIST_FOR_EACH(&ops[ir->op], iter)
3814 {
3815 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3816 if (tryReplace(&ir, ik))
3817 break;
3818 }
3819 }
3820
3821 if (ir)
3822 ops[ir->op].insert(ir);
3823 else
3824 ++replaced;
3825 }
3826 for (unsigned int i = 0; i <= OP_LAST; ++i)
3827 ops[i].clear();
3828
3829 } while (replaced);
3830
3831 return true;
3832 }
3833
3834 // =============================================================================
3835
3836 // Remove computations of unused values.
3837 class DeadCodeElim : public Pass
3838 {
3839 public:
3840 bool buryAll(Program *);
3841
3842 private:
3843 virtual bool visit(BasicBlock *);
3844
3845 void checkSplitLoad(Instruction *ld); // for partially dead loads
3846
3847 unsigned int deadCount;
3848 };
3849
3850 bool
3851 DeadCodeElim::buryAll(Program *prog)
3852 {
3853 do {
3854 deadCount = 0;
3855 if (!this->run(prog, false, false))
3856 return false;
3857 } while (deadCount);
3858
3859 return true;
3860 }
3861
3862 bool
3863 DeadCodeElim::visit(BasicBlock *bb)
3864 {
3865 Instruction *prev;
3866
3867 for (Instruction *i = bb->getExit(); i; i = prev) {
3868 prev = i->prev;
3869 if (i->isDead()) {
3870 ++deadCount;
3871 delete_Instruction(prog, i);
3872 } else
3873 if (i->defExists(1) &&
3874 i->subOp == 0 &&
3875 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3876 checkSplitLoad(i);
3877 } else
3878 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3879 if (i->op == OP_ATOM ||
3880 i->op == OP_SUREDP ||
3881 i->op == OP_SUREDB) {
3882 i->setDef(0, NULL);
3883 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3884 i->cache = CACHE_CV;
3885 i->op = OP_STORE;
3886 i->subOp = 0;
3887 }
3888 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3889 i->setDef(0, i->getDef(1));
3890 i->setDef(1, NULL);
3891 }
3892 }
3893 }
3894 return true;
3895 }
3896
3897 // Each load can go into up to 4 destinations, any of which might potentially
3898 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3899 // of where the holes are. We find the first contiguous region, put it into
3900 // the first load, and then put the second contiguous region into the second
3901 // load. There can be at most 2 contiguous regions.
3902 //
3903 // Note that there are some restrictions, for example it's not possible to do
3904 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3905 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3906 // split into a 64-bit and 32-bit load.
3907 void
3908 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3909 {
3910 Instruction *ld2 = NULL; // can get at most 2 loads
3911 Value *def1[4];
3912 Value *def2[4];
3913 int32_t addr1, addr2;
3914 int32_t size1, size2;
3915 int d, n1, n2;
3916 uint32_t mask = 0xffffffff;
3917
3918 for (d = 0; ld1->defExists(d); ++d)
3919 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3920 mask &= ~(1 << d);
3921 if (mask == 0xffffffff)
3922 return;
3923
3924 addr1 = ld1->getSrc(0)->reg.data.offset;
3925 n1 = n2 = 0;
3926 size1 = size2 = 0;
3927
3928 // Compute address/width for first load
3929 for (d = 0; ld1->defExists(d); ++d) {
3930 if (mask & (1 << d)) {
3931 if (size1 && (addr1 & 0x7))
3932 break;
3933 def1[n1] = ld1->getDef(d);
3934 size1 += def1[n1++]->reg.size;
3935 } else
3936 if (!n1) {
3937 addr1 += ld1->getDef(d)->reg.size;
3938 } else {
3939 break;
3940 }
3941 }
3942
3943 // Scale back the size of the first load until it can be loaded. This
3944 // typically happens for TYPE_B96 loads.
3945 while (n1 &&
3946 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3947 typeOfSize(size1))) {
3948 size1 -= def1[--n1]->reg.size;
3949 d--;
3950 }
3951
3952 // Compute address/width for second load
3953 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3954 if (mask & (1 << d)) {
3955 assert(!size2 || !(addr2 & 0x7));
3956 def2[n2] = ld1->getDef(d);
3957 size2 += def2[n2++]->reg.size;
3958 } else if (!n2) {
3959 assert(!n2);
3960 addr2 += ld1->getDef(d)->reg.size;
3961 } else {
3962 break;
3963 }
3964 }
3965
3966 // Make sure that we've processed all the values
3967 for (; ld1->defExists(d); ++d)
3968 assert(!(mask & (1 << d)));
3969
3970 updateLdStOffset(ld1, addr1, func);
3971 ld1->setType(typeOfSize(size1));
3972 for (d = 0; d < 4; ++d)
3973 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3974
3975 if (!n2)
3976 return;
3977
3978 ld2 = cloneShallow(func, ld1);
3979 updateLdStOffset(ld2, addr2, func);
3980 ld2->setType(typeOfSize(size2));
3981 for (d = 0; d < 4; ++d)
3982 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3983
3984 ld1->bb->insertAfter(ld1, ld2);
3985 }
3986
3987 // =============================================================================
3988
3989 #define RUN_PASS(l, n, f) \
3990 if (level >= (l)) { \
3991 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3992 INFO("PEEPHOLE: %s\n", #n); \
3993 n pass; \
3994 if (!pass.f(this)) \
3995 return false; \
3996 }
3997
3998 bool
3999 Program::optimizeSSA(int level)
4000 {
4001 RUN_PASS(1, DeadCodeElim, buryAll);
4002 RUN_PASS(1, CopyPropagation, run);
4003 RUN_PASS(1, MergeSplits, run);
4004 RUN_PASS(2, GlobalCSE, run);
4005 RUN_PASS(1, LocalCSE, run);
4006 RUN_PASS(2, AlgebraicOpt, run);
4007 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
4008 RUN_PASS(1, ConstantFolding, foldAll);
4009 RUN_PASS(0, Split64BitOpPreRA, run);
4010 RUN_PASS(2, LateAlgebraicOpt, run);
4011 RUN_PASS(1, LoadPropagation, run);
4012 RUN_PASS(1, IndirectPropagation, run);
4013 RUN_PASS(2, MemoryOpt, run);
4014 RUN_PASS(2, LocalCSE, run);
4015 RUN_PASS(0, DeadCodeElim, buryAll);
4016
4017 return true;
4018 }
4019
4020 bool
4021 Program::optimizePostRA(int level)
4022 {
4023 RUN_PASS(2, FlatteningPass, run);
4024 RUN_PASS(2, PostRaLoadPropagation, run);
4025
4026 return true;
4027 }
4028
4029 }