nv50/ir: do not try to attach JOIN ops to ATOM
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative)
195 if (insn->op != OP_SET && insn->op != OP_SLCT)
196 return;
197 if (insn->src(1).getFile() != FILE_GPR)
198 return;
199
200 Instruction *i0 = insn->getSrc(0)->getInsn();
201 Instruction *i1 = insn->getSrc(1)->getInsn();
202
203 // Swap sources to inline the less frequently used source. That way,
204 // optimistically, it will eventually be able to remove the instruction.
205 int i0refs = insn->getSrc(0)->refCount();
206 int i1refs = insn->getSrc(1)->refCount();
207
208 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
209 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
210 !targ->insnCanLoad(insn, 1, i1) ||
211 i0refs < i1refs)
212 insn->swapSources(0, 1);
213 else
214 return;
215 } else
216 if (isAttribOrSharedLoad(i1)) {
217 if (!isAttribOrSharedLoad(i0))
218 insn->swapSources(0, 1);
219 else
220 return;
221 } else {
222 return;
223 }
224
225 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
226 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
227 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
228 else
229 if (insn->op == OP_SLCT)
230 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
231 }
232
233 bool
234 LoadPropagation::visit(BasicBlock *bb)
235 {
236 const Target *targ = prog->getTarget();
237 Instruction *next;
238
239 for (Instruction *i = bb->getEntry(); i; i = next) {
240 next = i->next;
241
242 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
243 continue;
244
245 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
246 continue;
247
248 if (i->srcExists(1))
249 checkSwapSrc01(i);
250
251 for (int s = 0; i->srcExists(s); ++s) {
252 Instruction *ld = i->getSrc(s)->getInsn();
253
254 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
255 continue;
256 if (!targ->insnCanLoad(i, s, ld))
257 continue;
258
259 // propagate !
260 i->setSrc(s, ld->getSrc(0));
261 if (ld->src(0).isIndirect(0))
262 i->setIndirect(s, 0, ld->getIndirect(0, 0));
263
264 if (ld->getDef(0)->refCount() == 0)
265 delete_Instruction(prog, ld);
266 }
267 }
268 return true;
269 }
270
271 // =============================================================================
272
273 class IndirectPropagation : public Pass
274 {
275 private:
276 virtual bool visit(BasicBlock *);
277 };
278
279 bool
280 IndirectPropagation::visit(BasicBlock *bb)
281 {
282 const Target *targ = prog->getTarget();
283 Instruction *next;
284
285 for (Instruction *i = bb->getEntry(); i; i = next) {
286 next = i->next;
287
288 for (int s = 0; i->srcExists(s); ++s) {
289 Instruction *insn;
290 ImmediateValue imm;
291 if (!i->src(s).isIndirect(0))
292 continue;
293 insn = i->getIndirect(s, 0)->getInsn();
294 if (!insn)
295 continue;
296 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
297 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
298 !insn->src(1).getImmediate(imm) ||
299 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
300 continue;
301 i->setIndirect(s, 0, insn->getSrc(0));
302 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
303 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
304 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
305 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
306 !insn->src(1).getImmediate(imm) ||
307 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
308 continue;
309 i->setIndirect(s, 0, insn->getSrc(0));
310 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
311 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
312 } else if (insn->op == OP_MOV) {
313 if (!insn->src(0).getImmediate(imm) ||
314 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
315 continue;
316 i->setIndirect(s, 0, NULL);
317 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
318 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
319 }
320 }
321 }
322 return true;
323 }
324
325 // =============================================================================
326
327 // Evaluate constant expressions.
328 class ConstantFolding : public Pass
329 {
330 public:
331 bool foldAll(Program *);
332
333 private:
334 virtual bool visit(BasicBlock *);
335
336 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
337 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
338 void opnd(Instruction *, ImmediateValue&, int s);
339 void opnd3(Instruction *, ImmediateValue&);
340
341 void unary(Instruction *, const ImmediateValue&);
342
343 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
344
345 CmpInstruction *findOriginForTestWithZero(Value *);
346
347 unsigned int foldCount;
348
349 BuildUtil bld;
350 };
351
352 // TODO: remember generated immediates and only revisit these
353 bool
354 ConstantFolding::foldAll(Program *prog)
355 {
356 unsigned int iterCount = 0;
357 do {
358 foldCount = 0;
359 if (!run(prog))
360 return false;
361 } while (foldCount && ++iterCount < 2);
362 return true;
363 }
364
365 bool
366 ConstantFolding::visit(BasicBlock *bb)
367 {
368 Instruction *i, *next;
369
370 for (i = bb->getEntry(); i; i = next) {
371 next = i->next;
372 if (i->op == OP_MOV || i->op == OP_CALL)
373 continue;
374
375 ImmediateValue src0, src1, src2;
376
377 if (i->srcExists(2) &&
378 i->src(0).getImmediate(src0) &&
379 i->src(1).getImmediate(src1) &&
380 i->src(2).getImmediate(src2))
381 expr(i, src0, src1, src2);
382 else
383 if (i->srcExists(1) &&
384 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
385 expr(i, src0, src1);
386 else
387 if (i->srcExists(0) && i->src(0).getImmediate(src0))
388 opnd(i, src0, 0);
389 else
390 if (i->srcExists(1) && i->src(1).getImmediate(src1))
391 opnd(i, src1, 1);
392 if (i->srcExists(2) && i->src(2).getImmediate(src2))
393 opnd3(i, src2);
394 }
395 return true;
396 }
397
398 CmpInstruction *
399 ConstantFolding::findOriginForTestWithZero(Value *value)
400 {
401 if (!value)
402 return NULL;
403 Instruction *insn = value->getInsn();
404
405 if (insn->asCmp() && insn->op != OP_SLCT)
406 return insn->asCmp();
407
408 /* Sometimes mov's will sneak in as a result of other folding. This gets
409 * cleaned up later.
410 */
411 if (insn->op == OP_MOV)
412 return findOriginForTestWithZero(insn->getSrc(0));
413
414 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
415 if (insn->op == OP_AND) {
416 int s = 0;
417 ImmediateValue imm;
418 if (!insn->src(s).getImmediate(imm)) {
419 s = 1;
420 if (!insn->src(s).getImmediate(imm))
421 return NULL;
422 }
423 if (imm.reg.data.f32 != 1.0f)
424 return NULL;
425 /* TODO: Come up with a way to handle the condition being inverted */
426 if (insn->src(!s).mod != Modifier(0))
427 return NULL;
428 return findOriginForTestWithZero(insn->getSrc(!s));
429 }
430
431 return NULL;
432 }
433
434 void
435 Modifier::applyTo(ImmediateValue& imm) const
436 {
437 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
438 return;
439 switch (imm.reg.type) {
440 case TYPE_F32:
441 if (bits & NV50_IR_MOD_ABS)
442 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
443 if (bits & NV50_IR_MOD_NEG)
444 imm.reg.data.f32 = -imm.reg.data.f32;
445 if (bits & NV50_IR_MOD_SAT) {
446 if (imm.reg.data.f32 < 0.0f)
447 imm.reg.data.f32 = 0.0f;
448 else
449 if (imm.reg.data.f32 > 1.0f)
450 imm.reg.data.f32 = 1.0f;
451 }
452 assert(!(bits & NV50_IR_MOD_NOT));
453 break;
454
455 case TYPE_S8: // NOTE: will be extended
456 case TYPE_S16:
457 case TYPE_S32:
458 case TYPE_U8: // NOTE: treated as signed
459 case TYPE_U16:
460 case TYPE_U32:
461 if (bits & NV50_IR_MOD_ABS)
462 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
463 imm.reg.data.s32 : -imm.reg.data.s32;
464 if (bits & NV50_IR_MOD_NEG)
465 imm.reg.data.s32 = -imm.reg.data.s32;
466 if (bits & NV50_IR_MOD_NOT)
467 imm.reg.data.s32 = ~imm.reg.data.s32;
468 break;
469
470 case TYPE_F64:
471 if (bits & NV50_IR_MOD_ABS)
472 imm.reg.data.f64 = fabs(imm.reg.data.f64);
473 if (bits & NV50_IR_MOD_NEG)
474 imm.reg.data.f64 = -imm.reg.data.f64;
475 if (bits & NV50_IR_MOD_SAT) {
476 if (imm.reg.data.f64 < 0.0)
477 imm.reg.data.f64 = 0.0;
478 else
479 if (imm.reg.data.f64 > 1.0)
480 imm.reg.data.f64 = 1.0;
481 }
482 assert(!(bits & NV50_IR_MOD_NOT));
483 break;
484
485 default:
486 assert(!"invalid/unhandled type");
487 imm.reg.data.u64 = 0;
488 break;
489 }
490 }
491
492 operation
493 Modifier::getOp() const
494 {
495 switch (bits) {
496 case NV50_IR_MOD_ABS: return OP_ABS;
497 case NV50_IR_MOD_NEG: return OP_NEG;
498 case NV50_IR_MOD_SAT: return OP_SAT;
499 case NV50_IR_MOD_NOT: return OP_NOT;
500 case 0:
501 return OP_MOV;
502 default:
503 return OP_CVT;
504 }
505 }
506
507 void
508 ConstantFolding::expr(Instruction *i,
509 ImmediateValue &imm0, ImmediateValue &imm1)
510 {
511 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
512 struct Storage res;
513 DataType type = i->dType;
514
515 memset(&res.data, 0, sizeof(res.data));
516
517 switch (i->op) {
518 case OP_MAD:
519 case OP_FMA:
520 case OP_MUL:
521 if (i->dnz && i->dType == TYPE_F32) {
522 if (!isfinite(a->data.f32))
523 a->data.f32 = 0.0f;
524 if (!isfinite(b->data.f32))
525 b->data.f32 = 0.0f;
526 }
527 switch (i->dType) {
528 case TYPE_F32:
529 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
530 break;
531 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
532 case TYPE_S32:
533 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
534 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
535 break;
536 }
537 /* fallthrough */
538 case TYPE_U32:
539 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
540 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
541 break;
542 }
543 res.data.u32 = a->data.u32 * b->data.u32; break;
544 default:
545 return;
546 }
547 break;
548 case OP_DIV:
549 if (b->data.u32 == 0)
550 break;
551 switch (i->dType) {
552 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
553 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
554 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
555 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
556 default:
557 return;
558 }
559 break;
560 case OP_ADD:
561 switch (i->dType) {
562 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
563 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
564 case TYPE_S32:
565 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
566 default:
567 return;
568 }
569 break;
570 case OP_POW:
571 switch (i->dType) {
572 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
573 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
574 default:
575 return;
576 }
577 break;
578 case OP_MAX:
579 switch (i->dType) {
580 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
581 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
582 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
583 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
584 default:
585 return;
586 }
587 break;
588 case OP_MIN:
589 switch (i->dType) {
590 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
591 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
592 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
593 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
594 default:
595 return;
596 }
597 break;
598 case OP_AND:
599 res.data.u64 = a->data.u64 & b->data.u64;
600 break;
601 case OP_OR:
602 res.data.u64 = a->data.u64 | b->data.u64;
603 break;
604 case OP_XOR:
605 res.data.u64 = a->data.u64 ^ b->data.u64;
606 break;
607 case OP_SHL:
608 res.data.u32 = a->data.u32 << b->data.u32;
609 break;
610 case OP_SHR:
611 switch (i->dType) {
612 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
613 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
614 default:
615 return;
616 }
617 break;
618 case OP_SLCT:
619 if (a->data.u32 != b->data.u32)
620 return;
621 res.data.u32 = a->data.u32;
622 break;
623 case OP_EXTBF: {
624 int offset = b->data.u32 & 0xff;
625 int width = (b->data.u32 >> 8) & 0xff;
626 int rshift = offset;
627 int lshift = 0;
628 if (width == 0) {
629 res.data.u32 = 0;
630 break;
631 }
632 if (width + offset < 32) {
633 rshift = 32 - width;
634 lshift = 32 - width - offset;
635 }
636 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
637 res.data.u32 = util_bitreverse(a->data.u32);
638 else
639 res.data.u32 = a->data.u32;
640 switch (i->dType) {
641 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
642 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
643 default:
644 return;
645 }
646 break;
647 }
648 case OP_POPCNT:
649 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
650 break;
651 case OP_PFETCH:
652 // The two arguments to pfetch are logically added together. Normally
653 // the second argument will not be constant, but that can happen.
654 res.data.u32 = a->data.u32 + b->data.u32;
655 type = TYPE_U32;
656 break;
657 case OP_MERGE:
658 switch (i->dType) {
659 case TYPE_U64:
660 case TYPE_S64:
661 case TYPE_F64:
662 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
663 break;
664 default:
665 return;
666 }
667 break;
668 default:
669 return;
670 }
671 ++foldCount;
672
673 i->src(0).mod = Modifier(0);
674 i->src(1).mod = Modifier(0);
675 i->postFactor = 0;
676
677 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
678 i->setSrc(1, NULL);
679
680 i->getSrc(0)->reg.data = res.data;
681 i->getSrc(0)->reg.type = type;
682 i->getSrc(0)->reg.size = typeSizeof(type);
683
684 switch (i->op) {
685 case OP_MAD:
686 case OP_FMA: {
687 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
688
689 // Move the immediate into position 1, where we know it might be
690 // emittable. However it might not be anyways, as there may be other
691 // restrictions, so move it into a separate LValue.
692 bld.setPosition(i, false);
693 i->op = OP_ADD;
694 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
695 i->setSrc(0, i->getSrc(2));
696 i->src(0).mod = i->src(2).mod;
697 i->setSrc(2, NULL);
698
699 if (i->src(0).getImmediate(src0))
700 expr(i, src0, src1);
701 else
702 opnd(i, src1, 1);
703 break;
704 }
705 case OP_PFETCH:
706 // Leave PFETCH alone... we just folded its 2 args into 1.
707 break;
708 default:
709 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
710 break;
711 }
712 i->subOp = 0;
713 }
714
715 void
716 ConstantFolding::expr(Instruction *i,
717 ImmediateValue &imm0,
718 ImmediateValue &imm1,
719 ImmediateValue &imm2)
720 {
721 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
722 struct Storage res;
723
724 memset(&res.data, 0, sizeof(res.data));
725
726 switch (i->op) {
727 case OP_INSBF: {
728 int offset = b->data.u32 & 0xff;
729 int width = (b->data.u32 >> 8) & 0xff;
730 unsigned bitmask = ((1 << width) - 1) << offset;
731 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
732 break;
733 }
734 case OP_MAD:
735 case OP_FMA: {
736 switch (i->dType) {
737 case TYPE_F32:
738 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
739 c->data.f32;
740 break;
741 case TYPE_F64:
742 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
743 break;
744 case TYPE_S32:
745 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
746 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
747 break;
748 }
749 /* fallthrough */
750 case TYPE_U32:
751 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
752 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
753 break;
754 }
755 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
756 break;
757 default:
758 return;
759 }
760 break;
761 }
762 default:
763 return;
764 }
765
766 ++foldCount;
767 i->src(0).mod = Modifier(0);
768 i->src(1).mod = Modifier(0);
769 i->src(2).mod = Modifier(0);
770
771 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
772 i->setSrc(1, NULL);
773 i->setSrc(2, NULL);
774
775 i->getSrc(0)->reg.data = res.data;
776 i->getSrc(0)->reg.type = i->dType;
777 i->getSrc(0)->reg.size = typeSizeof(i->dType);
778
779 i->op = OP_MOV;
780 }
781
782 void
783 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
784 {
785 Storage res;
786
787 if (i->dType != TYPE_F32)
788 return;
789 switch (i->op) {
790 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
791 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
792 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
793 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
794 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
795 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
796 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
797 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
798 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
799 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
800 case OP_PRESIN:
801 case OP_PREEX2:
802 // these should be handled in subsequent OP_SIN/COS/EX2
803 res.data.f32 = imm.reg.data.f32;
804 break;
805 default:
806 return;
807 }
808 i->op = OP_MOV;
809 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
810 i->src(0).mod = Modifier(0);
811 }
812
813 void
814 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
815 const int s, ImmediateValue& imm2)
816 {
817 const int t = s ? 0 : 1;
818 Instruction *insn;
819 Instruction *mul1 = NULL; // mul1 before mul2
820 int e = 0;
821 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
822 ImmediateValue imm1;
823
824 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
825
826 if (mul2->getSrc(t)->refCount() == 1) {
827 insn = mul2->getSrc(t)->getInsn();
828 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
829 mul1 = insn;
830 if (mul1 && !mul1->saturate) {
831 int s1;
832
833 if (mul1->src(s1 = 0).getImmediate(imm1) ||
834 mul1->src(s1 = 1).getImmediate(imm1)) {
835 bld.setPosition(mul1, false);
836 // a = mul r, imm1
837 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
838 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
839 mul1->src(s1).mod = Modifier(0);
840 mul2->def(0).replace(mul1->getDef(0), false);
841 mul1->saturate = mul2->saturate;
842 } else
843 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
844 // c = mul a, b
845 // d = mul c, imm -> d = mul_x_imm a, b
846 mul1->postFactor = e;
847 mul2->def(0).replace(mul1->getDef(0), false);
848 if (f < 0)
849 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
850 mul1->saturate = mul2->saturate;
851 }
852 return;
853 }
854 }
855 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
856 // b = mul a, imm
857 // d = mul b, c -> d = mul_x_imm a, c
858 int s2, t2;
859 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
860 if (!insn)
861 return;
862 mul1 = mul2;
863 mul2 = NULL;
864 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
865 t2 = s2 ? 0 : 1;
866 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
867 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
868 mul2 = insn;
869 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
870 mul2->postFactor = e;
871 mul2->setSrc(s2, mul1->src(t));
872 if (f < 0)
873 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
874 }
875 }
876 }
877
878 void
879 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
880 {
881 switch (i->op) {
882 case OP_MAD:
883 case OP_FMA:
884 if (imm2.isInteger(0)) {
885 i->op = OP_MUL;
886 i->setSrc(2, NULL);
887 foldCount++;
888 return;
889 }
890 break;
891 default:
892 return;
893 }
894 }
895
896 void
897 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
898 {
899 const int t = !s;
900 const operation op = i->op;
901 Instruction *newi = i;
902
903 switch (i->op) {
904 case OP_MUL:
905 if (i->dType == TYPE_F32)
906 tryCollapseChainedMULs(i, s, imm0);
907
908 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
909 assert(!isFloatType(i->sType));
910 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
911 bld.setPosition(i, false);
912 // Need to set to the sign value, which is a compare.
913 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
914 TYPE_S32, i->getSrc(t), bld.mkImm(0));
915 delete_Instruction(prog, i);
916 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
917 // The high bits can't be set in this case (either mul by 0 or
918 // unsigned by 1)
919 i->op = OP_MOV;
920 i->subOp = 0;
921 i->setSrc(0, new_ImmediateValue(prog, 0u));
922 i->src(0).mod = Modifier(0);
923 i->setSrc(1, NULL);
924 } else if (!imm0.isNegative() && imm0.isPow2()) {
925 // Translate into a shift
926 imm0.applyLog2();
927 i->op = OP_SHR;
928 i->subOp = 0;
929 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
930 i->setSrc(0, i->getSrc(t));
931 i->src(0).mod = i->src(t).mod;
932 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
933 i->src(1).mod = 0;
934 }
935 } else
936 if (imm0.isInteger(0)) {
937 i->op = OP_MOV;
938 i->setSrc(0, new_ImmediateValue(prog, 0u));
939 i->src(0).mod = Modifier(0);
940 i->postFactor = 0;
941 i->setSrc(1, NULL);
942 } else
943 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
944 if (imm0.isNegative())
945 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
946 i->op = i->src(t).mod.getOp();
947 if (s == 0) {
948 i->setSrc(0, i->getSrc(1));
949 i->src(0).mod = i->src(1).mod;
950 i->src(1).mod = 0;
951 }
952 if (i->op != OP_CVT)
953 i->src(0).mod = 0;
954 i->setSrc(1, NULL);
955 } else
956 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
957 if (imm0.isNegative())
958 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
959 i->op = OP_ADD;
960 i->setSrc(s, i->getSrc(t));
961 i->src(s).mod = i->src(t).mod;
962 } else
963 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
964 i->op = OP_SHL;
965 imm0.applyLog2();
966 i->setSrc(0, i->getSrc(t));
967 i->src(0).mod = i->src(t).mod;
968 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
969 i->src(1).mod = 0;
970 } else
971 if (i->postFactor && i->sType == TYPE_F32) {
972 /* Can't emit a postfactor with an immediate, have to fold it in */
973 i->setSrc(s, new_ImmediateValue(
974 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
975 i->postFactor = 0;
976 }
977 break;
978 case OP_MAD:
979 if (imm0.isInteger(0)) {
980 i->setSrc(0, i->getSrc(2));
981 i->src(0).mod = i->src(2).mod;
982 i->setSrc(1, NULL);
983 i->setSrc(2, NULL);
984 i->op = i->src(0).mod.getOp();
985 if (i->op != OP_CVT)
986 i->src(0).mod = 0;
987 } else
988 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
989 (imm0.isInteger(1) || imm0.isInteger(-1))) {
990 if (imm0.isNegative())
991 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
992 if (s == 0) {
993 i->setSrc(0, i->getSrc(1));
994 i->src(0).mod = i->src(1).mod;
995 }
996 i->setSrc(1, i->getSrc(2));
997 i->src(1).mod = i->src(2).mod;
998 i->setSrc(2, NULL);
999 i->op = OP_ADD;
1000 }
1001 break;
1002 case OP_ADD:
1003 if (i->usesFlags())
1004 break;
1005 if (imm0.isInteger(0)) {
1006 if (s == 0) {
1007 i->setSrc(0, i->getSrc(1));
1008 i->src(0).mod = i->src(1).mod;
1009 }
1010 i->setSrc(1, NULL);
1011 i->op = i->src(0).mod.getOp();
1012 if (i->op != OP_CVT)
1013 i->src(0).mod = Modifier(0);
1014 }
1015 break;
1016
1017 case OP_DIV:
1018 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1019 break;
1020 bld.setPosition(i, false);
1021 if (imm0.reg.data.u32 == 0) {
1022 break;
1023 } else
1024 if (imm0.reg.data.u32 == 1) {
1025 i->op = OP_MOV;
1026 i->setSrc(1, NULL);
1027 } else
1028 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1029 i->op = OP_SHR;
1030 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1031 } else
1032 if (i->dType == TYPE_U32) {
1033 Instruction *mul;
1034 Value *tA, *tB;
1035 const uint32_t d = imm0.reg.data.u32;
1036 uint32_t m;
1037 int r, s;
1038 uint32_t l = util_logbase2(d);
1039 if (((uint32_t)1 << l) < d)
1040 ++l;
1041 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1042 r = l ? 1 : 0;
1043 s = l ? (l - 1) : 0;
1044
1045 tA = bld.getSSA();
1046 tB = bld.getSSA();
1047 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1048 bld.loadImm(NULL, m));
1049 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1050 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1051 tA = bld.getSSA();
1052 if (r)
1053 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1054 else
1055 tA = tB;
1056 tB = s ? bld.getSSA() : i->getDef(0);
1057 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1058 if (s)
1059 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1060
1061 delete_Instruction(prog, i);
1062 } else
1063 if (imm0.reg.data.s32 == -1) {
1064 i->op = OP_NEG;
1065 i->setSrc(1, NULL);
1066 } else {
1067 LValue *tA, *tB;
1068 LValue *tD;
1069 const int32_t d = imm0.reg.data.s32;
1070 int32_t m;
1071 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1072 if ((1 << l) < abs(d))
1073 ++l;
1074 if (!l)
1075 l = 1;
1076 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1077
1078 tA = bld.getSSA();
1079 tB = bld.getSSA();
1080 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1081 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1082 if (l > 1)
1083 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1084 else
1085 tB = tA;
1086 tA = bld.getSSA();
1087 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1088 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1089 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1090 if (d < 0)
1091 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1092
1093 delete_Instruction(prog, i);
1094 }
1095 break;
1096
1097 case OP_MOD:
1098 if (i->sType == TYPE_U32 && imm0.isPow2()) {
1099 bld.setPosition(i, false);
1100 i->op = OP_AND;
1101 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1102 }
1103 break;
1104
1105 case OP_SET: // TODO: SET_AND,OR,XOR
1106 {
1107 /* This optimizes the case where the output of a set is being compared
1108 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1109 * can be a lot cleverer in our comparison.
1110 */
1111 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1112 CondCode cc, ccZ;
1113 if (imm0.reg.data.u32 != 0 || !si)
1114 return;
1115 cc = si->setCond;
1116 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1117 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1118 // first.
1119 if (s == 0)
1120 ccZ = reverseCondCode(ccZ);
1121 // If there is a negative modifier, we need to undo that, by flipping
1122 // the comparison to zero.
1123 if (i->src(t).mod.neg())
1124 ccZ = reverseCondCode(ccZ);
1125 // If this is a signed comparison, we expect the input to be a regular
1126 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1127 // is positive, so just flip the sign.
1128 if (i->sType == TYPE_S32) {
1129 assert(!isFloatType(si->dType));
1130 ccZ = reverseCondCode(ccZ);
1131 }
1132 switch (ccZ) {
1133 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1134 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1135 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1136 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1137 case CC_GT: break; // bool > 0 -- bool
1138 case CC_NE: break; // bool != 0 -- bool
1139 default:
1140 return;
1141 }
1142
1143 // Update the condition of this SET to be identical to the origin set,
1144 // but with the updated condition code. The original SET should get
1145 // DCE'd, ideally.
1146 i->op = si->op;
1147 i->asCmp()->setCond = cc;
1148 i->setSrc(0, si->src(0));
1149 i->setSrc(1, si->src(1));
1150 if (si->srcExists(2))
1151 i->setSrc(2, si->src(2));
1152 i->sType = si->sType;
1153 }
1154 break;
1155
1156 case OP_AND:
1157 {
1158 Instruction *src = i->getSrc(t)->getInsn();
1159 ImmediateValue imm1;
1160 if (imm0.reg.data.u32 == 0) {
1161 i->op = OP_MOV;
1162 i->setSrc(0, new_ImmediateValue(prog, 0u));
1163 i->src(0).mod = Modifier(0);
1164 i->setSrc(1, NULL);
1165 } else if (imm0.reg.data.u32 == ~0U) {
1166 i->op = i->src(t).mod.getOp();
1167 if (t) {
1168 i->setSrc(0, i->getSrc(t));
1169 i->src(0).mod = i->src(t).mod;
1170 }
1171 i->setSrc(1, NULL);
1172 } else if (src->asCmp()) {
1173 CmpInstruction *cmp = src->asCmp();
1174 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1175 return;
1176 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1177 return;
1178 if (imm0.reg.data.f32 != 1.0)
1179 return;
1180 if (cmp->dType != TYPE_U32)
1181 return;
1182
1183 cmp->dType = TYPE_F32;
1184 if (i->src(t).mod != Modifier(0)) {
1185 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1186 i->src(t).mod = Modifier(0);
1187 cmp->setCond = inverseCondCode(cmp->setCond);
1188 }
1189 i->op = OP_MOV;
1190 i->setSrc(s, NULL);
1191 if (t) {
1192 i->setSrc(0, i->getSrc(t));
1193 i->setSrc(t, NULL);
1194 }
1195 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1196 src->op == OP_SHR &&
1197 src->src(1).getImmediate(imm1) &&
1198 i->src(t).mod == Modifier(0) &&
1199 util_is_power_of_two(imm0.reg.data.u32 + 1)) {
1200 // low byte = offset, high byte = width
1201 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1202 i->op = OP_EXTBF;
1203 i->setSrc(0, src->getSrc(0));
1204 i->setSrc(1, new_ImmediateValue(prog, ext));
1205 }
1206 }
1207 break;
1208
1209 case OP_SHL:
1210 {
1211 if (s != 1 || i->src(0).mod != Modifier(0))
1212 break;
1213 // try to concatenate shifts
1214 Instruction *si = i->getSrc(0)->getInsn();
1215 if (!si)
1216 break;
1217 ImmediateValue imm1;
1218 switch (si->op) {
1219 case OP_SHL:
1220 if (si->src(1).getImmediate(imm1)) {
1221 bld.setPosition(i, false);
1222 i->setSrc(0, si->getSrc(0));
1223 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1224 }
1225 break;
1226 case OP_SHR:
1227 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1228 bld.setPosition(i, false);
1229 i->op = OP_AND;
1230 i->setSrc(0, si->getSrc(0));
1231 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1232 }
1233 break;
1234 case OP_MUL:
1235 int muls;
1236 if (isFloatType(si->dType))
1237 return;
1238 if (si->src(1).getImmediate(imm1))
1239 muls = 1;
1240 else if (si->src(0).getImmediate(imm1))
1241 muls = 0;
1242 else
1243 return;
1244
1245 bld.setPosition(i, false);
1246 i->op = OP_MUL;
1247 i->setSrc(0, si->getSrc(!muls));
1248 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1249 break;
1250 case OP_SUB:
1251 case OP_ADD:
1252 int adds;
1253 if (isFloatType(si->dType))
1254 return;
1255 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1256 adds = 0;
1257 else if (si->src(1).getImmediate(imm1))
1258 adds = 1;
1259 else
1260 return;
1261 if (si->src(!adds).mod != Modifier(0))
1262 return;
1263 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1264
1265 // This is more operations, but if one of x, y is an immediate, then
1266 // we can get a situation where (a) we can use ISCADD, or (b)
1267 // propagate the add bit into an indirect load.
1268 bld.setPosition(i, false);
1269 i->op = si->op;
1270 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1271 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1272 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1273 si->getSrc(!adds),
1274 bld.mkImm(imm0.reg.data.u32)));
1275 break;
1276 default:
1277 return;
1278 }
1279 }
1280 break;
1281
1282 case OP_ABS:
1283 case OP_NEG:
1284 case OP_SAT:
1285 case OP_LG2:
1286 case OP_RCP:
1287 case OP_SQRT:
1288 case OP_RSQ:
1289 case OP_PRESIN:
1290 case OP_SIN:
1291 case OP_COS:
1292 case OP_PREEX2:
1293 case OP_EX2:
1294 unary(i, imm0);
1295 break;
1296 case OP_BFIND: {
1297 int32_t res;
1298 switch (i->dType) {
1299 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1300 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1301 default:
1302 return;
1303 }
1304 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1305 res = 31 - res;
1306 bld.setPosition(i, false); /* make sure bld is init'ed */
1307 i->setSrc(0, bld.mkImm(res));
1308 i->setSrc(1, NULL);
1309 i->op = OP_MOV;
1310 i->subOp = 0;
1311 break;
1312 }
1313 case OP_POPCNT: {
1314 // Only deal with 1-arg POPCNT here
1315 if (i->srcExists(1))
1316 break;
1317 uint32_t res = util_bitcount(imm0.reg.data.u32);
1318 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1319 i->setSrc(1, NULL);
1320 i->op = OP_MOV;
1321 break;
1322 }
1323 case OP_CVT: {
1324 Storage res;
1325
1326 // TODO: handle 64-bit values properly
1327 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1328 return;
1329
1330 // TODO: handle single byte/word extractions
1331 if (i->subOp)
1332 return;
1333
1334 bld.setPosition(i, true); /* make sure bld is init'ed */
1335
1336 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1337 case type: \
1338 switch (i->sType) { \
1339 case TYPE_F64: \
1340 res.data.dst = util_iround(i->saturate ? \
1341 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1342 imm0.reg.data.f64); \
1343 break; \
1344 case TYPE_F32: \
1345 res.data.dst = util_iround(i->saturate ? \
1346 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1347 imm0.reg.data.f32); \
1348 break; \
1349 case TYPE_S32: \
1350 res.data.dst = i->saturate ? \
1351 CLAMP(imm0.reg.data.s32, imin, imax) : \
1352 imm0.reg.data.s32; \
1353 break; \
1354 case TYPE_U32: \
1355 res.data.dst = i->saturate ? \
1356 CLAMP(imm0.reg.data.u32, umin, umax) : \
1357 imm0.reg.data.u32; \
1358 break; \
1359 case TYPE_S16: \
1360 res.data.dst = i->saturate ? \
1361 CLAMP(imm0.reg.data.s16, imin, imax) : \
1362 imm0.reg.data.s16; \
1363 break; \
1364 case TYPE_U16: \
1365 res.data.dst = i->saturate ? \
1366 CLAMP(imm0.reg.data.u16, umin, umax) : \
1367 imm0.reg.data.u16; \
1368 break; \
1369 default: return; \
1370 } \
1371 i->setSrc(0, bld.mkImm(res.data.dst)); \
1372 break
1373
1374 switch(i->dType) {
1375 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1376 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1377 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1378 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1379 case TYPE_F32:
1380 switch (i->sType) {
1381 case TYPE_F64:
1382 res.data.f32 = i->saturate ?
1383 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1384 imm0.reg.data.f64;
1385 break;
1386 case TYPE_F32:
1387 res.data.f32 = i->saturate ?
1388 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1389 imm0.reg.data.f32;
1390 break;
1391 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1392 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1393 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1394 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1395 default:
1396 return;
1397 }
1398 i->setSrc(0, bld.mkImm(res.data.f32));
1399 break;
1400 case TYPE_F64:
1401 switch (i->sType) {
1402 case TYPE_F64:
1403 res.data.f64 = i->saturate ?
1404 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1405 imm0.reg.data.f64;
1406 break;
1407 case TYPE_F32:
1408 res.data.f64 = i->saturate ?
1409 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1410 imm0.reg.data.f32;
1411 break;
1412 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1413 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1414 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1415 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1416 default:
1417 return;
1418 }
1419 i->setSrc(0, bld.mkImm(res.data.f64));
1420 break;
1421 default:
1422 return;
1423 }
1424 #undef CASE
1425
1426 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1427 i->op = OP_MOV;
1428 i->saturate = 0;
1429 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1430 break;
1431 }
1432 default:
1433 return;
1434 }
1435 if (newi->op != op)
1436 foldCount++;
1437 }
1438
1439 // =============================================================================
1440
1441 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1442 class ModifierFolding : public Pass
1443 {
1444 private:
1445 virtual bool visit(BasicBlock *);
1446 };
1447
1448 bool
1449 ModifierFolding::visit(BasicBlock *bb)
1450 {
1451 const Target *target = prog->getTarget();
1452
1453 Instruction *i, *next, *mi;
1454 Modifier mod;
1455
1456 for (i = bb->getEntry(); i; i = next) {
1457 next = i->next;
1458
1459 if (0 && i->op == OP_SUB) {
1460 // turn "sub" into "add neg" (do we really want this ?)
1461 i->op = OP_ADD;
1462 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1463 }
1464
1465 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1466 mi = i->getSrc(s)->getInsn();
1467 if (!mi ||
1468 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1469 continue;
1470 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1471 if ((i->op != OP_ADD &&
1472 i->op != OP_MUL) ||
1473 (mi->op != OP_ABS &&
1474 mi->op != OP_NEG))
1475 continue;
1476 } else
1477 if (i->sType != mi->dType) {
1478 continue;
1479 }
1480 if ((mod = Modifier(mi->op)) == Modifier(0))
1481 continue;
1482 mod *= mi->src(0).mod;
1483
1484 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1485 // abs neg [abs] = abs
1486 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1487 } else
1488 if ((i->op == OP_NEG) && mod.neg()) {
1489 assert(s == 0);
1490 // neg as both opcode and modifier on same insn is prohibited
1491 // neg neg abs = abs, neg neg = identity
1492 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1493 i->op = mod.getOp();
1494 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1495 if (mod == Modifier(0))
1496 i->op = OP_MOV;
1497 }
1498
1499 if (target->isModSupported(i, s, mod)) {
1500 i->setSrc(s, mi->getSrc(0));
1501 i->src(s).mod *= mod;
1502 }
1503 }
1504
1505 if (i->op == OP_SAT) {
1506 mi = i->getSrc(0)->getInsn();
1507 if (mi &&
1508 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1509 mi->saturate = 1;
1510 mi->setDef(0, i->getDef(0));
1511 delete_Instruction(prog, i);
1512 }
1513 }
1514 }
1515
1516 return true;
1517 }
1518
1519 // =============================================================================
1520
1521 // MUL + ADD -> MAD/FMA
1522 // MIN/MAX(a, a) -> a, etc.
1523 // SLCT(a, b, const) -> cc(const) ? a : b
1524 // RCP(RCP(a)) -> a
1525 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1526 class AlgebraicOpt : public Pass
1527 {
1528 private:
1529 virtual bool visit(BasicBlock *);
1530
1531 void handleABS(Instruction *);
1532 bool handleADD(Instruction *);
1533 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1534 void handleMINMAX(Instruction *);
1535 void handleRCP(Instruction *);
1536 void handleSLCT(Instruction *);
1537 void handleLOGOP(Instruction *);
1538 void handleCVT_NEG(Instruction *);
1539 void handleCVT_CVT(Instruction *);
1540 void handleCVT_EXTBF(Instruction *);
1541 void handleSUCLAMP(Instruction *);
1542 void handleNEG(Instruction *);
1543
1544 BuildUtil bld;
1545 };
1546
1547 void
1548 AlgebraicOpt::handleABS(Instruction *abs)
1549 {
1550 Instruction *sub = abs->getSrc(0)->getInsn();
1551 DataType ty;
1552 if (!sub ||
1553 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1554 return;
1555 // expect not to have mods yet, if we do, bail
1556 if (sub->src(0).mod || sub->src(1).mod)
1557 return;
1558 // hidden conversion ?
1559 ty = intTypeToSigned(sub->dType);
1560 if (abs->dType != abs->sType || ty != abs->sType)
1561 return;
1562
1563 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1564 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1565 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1566 return;
1567
1568 Value *src0 = sub->getSrc(0);
1569 Value *src1 = sub->getSrc(1);
1570
1571 if (sub->op == OP_ADD) {
1572 Instruction *neg = sub->getSrc(1)->getInsn();
1573 if (neg && neg->op != OP_NEG) {
1574 neg = sub->getSrc(0)->getInsn();
1575 src0 = sub->getSrc(1);
1576 }
1577 if (!neg || neg->op != OP_NEG ||
1578 neg->dType != neg->sType || neg->sType != ty)
1579 return;
1580 src1 = neg->getSrc(0);
1581 }
1582
1583 // found ABS(SUB))
1584 abs->moveSources(1, 2); // move sources >=1 up by 2
1585 abs->op = OP_SAD;
1586 abs->setType(sub->dType);
1587 abs->setSrc(0, src0);
1588 abs->setSrc(1, src1);
1589 bld.setPosition(abs, false);
1590 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1591 }
1592
1593 bool
1594 AlgebraicOpt::handleADD(Instruction *add)
1595 {
1596 Value *src0 = add->getSrc(0);
1597 Value *src1 = add->getSrc(1);
1598
1599 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1600 return false;
1601
1602 bool changed = false;
1603 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1604 changed = tryADDToMADOrSAD(add, OP_MAD);
1605 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1606 changed = tryADDToMADOrSAD(add, OP_SAD);
1607 return changed;
1608 }
1609
1610 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1611 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1612 bool
1613 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1614 {
1615 Value *src0 = add->getSrc(0);
1616 Value *src1 = add->getSrc(1);
1617 Value *src;
1618 int s;
1619 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1620 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1621 Modifier mod[4];
1622
1623 if (src0->refCount() == 1 &&
1624 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1625 s = 0;
1626 else
1627 if (src1->refCount() == 1 &&
1628 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1629 s = 1;
1630 else
1631 return false;
1632
1633 src = add->getSrc(s);
1634
1635 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1636 return false;
1637
1638 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1639 src->getInsn()->dnz)
1640 return false;
1641
1642 if (toOp == OP_SAD) {
1643 ImmediateValue imm;
1644 if (!src->getInsn()->src(2).getImmediate(imm))
1645 return false;
1646 if (!imm.isInteger(0))
1647 return false;
1648 }
1649
1650 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1651 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1652 return false;
1653
1654 mod[0] = add->src(0).mod;
1655 mod[1] = add->src(1).mod;
1656 mod[2] = src->getUniqueInsn()->src(0).mod;
1657 mod[3] = src->getUniqueInsn()->src(1).mod;
1658
1659 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1660 return false;
1661
1662 add->op = toOp;
1663 add->subOp = src->getInsn()->subOp; // potentially mul-high
1664 add->dType = src->getInsn()->dType; // sign matters for imad hi
1665 add->sType = src->getInsn()->sType;
1666
1667 add->setSrc(2, add->src(s ? 0 : 1));
1668
1669 add->setSrc(0, src->getInsn()->getSrc(0));
1670 add->src(0).mod = mod[2] ^ mod[s];
1671 add->setSrc(1, src->getInsn()->getSrc(1));
1672 add->src(1).mod = mod[3];
1673
1674 return true;
1675 }
1676
1677 void
1678 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1679 {
1680 Value *src0 = minmax->getSrc(0);
1681 Value *src1 = minmax->getSrc(1);
1682
1683 if (src0 != src1 || src0->reg.file != FILE_GPR)
1684 return;
1685 if (minmax->src(0).mod == minmax->src(1).mod) {
1686 if (minmax->def(0).mayReplace(minmax->src(0))) {
1687 minmax->def(0).replace(minmax->src(0), false);
1688 minmax->bb->remove(minmax);
1689 } else {
1690 minmax->op = OP_CVT;
1691 minmax->setSrc(1, NULL);
1692 }
1693 } else {
1694 // TODO:
1695 // min(x, -x) = -abs(x)
1696 // min(x, -abs(x)) = -abs(x)
1697 // min(x, abs(x)) = x
1698 // max(x, -abs(x)) = x
1699 // max(x, abs(x)) = abs(x)
1700 // max(x, -x) = abs(x)
1701 }
1702 }
1703
1704 void
1705 AlgebraicOpt::handleRCP(Instruction *rcp)
1706 {
1707 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1708
1709 if (si && si->op == OP_RCP) {
1710 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1711 rcp->op = mod.getOp();
1712 rcp->setSrc(0, si->getSrc(0));
1713 }
1714 }
1715
1716 void
1717 AlgebraicOpt::handleSLCT(Instruction *slct)
1718 {
1719 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1720 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1721 slct->setSrc(0, slct->getSrc(1));
1722 } else
1723 if (slct->getSrc(0) != slct->getSrc(1)) {
1724 return;
1725 }
1726 slct->op = OP_MOV;
1727 slct->setSrc(1, NULL);
1728 slct->setSrc(2, NULL);
1729 }
1730
1731 void
1732 AlgebraicOpt::handleLOGOP(Instruction *logop)
1733 {
1734 Value *src0 = logop->getSrc(0);
1735 Value *src1 = logop->getSrc(1);
1736
1737 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1738 return;
1739
1740 if (src0 == src1) {
1741 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1742 logop->def(0).mayReplace(logop->src(0))) {
1743 logop->def(0).replace(logop->src(0), false);
1744 delete_Instruction(prog, logop);
1745 }
1746 } else {
1747 // try AND(SET, SET) -> SET_AND(SET)
1748 Instruction *set0 = src0->getInsn();
1749 Instruction *set1 = src1->getInsn();
1750
1751 if (!set0 || set0->fixed || !set1 || set1->fixed)
1752 return;
1753 if (set1->op != OP_SET) {
1754 Instruction *xchg = set0;
1755 set0 = set1;
1756 set1 = xchg;
1757 if (set1->op != OP_SET)
1758 return;
1759 }
1760 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1761 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1762 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1763 return;
1764 if (set0->op != OP_SET &&
1765 set0->op != OP_SET_AND &&
1766 set0->op != OP_SET_OR &&
1767 set0->op != OP_SET_XOR)
1768 return;
1769 if (set0->getDef(0)->refCount() > 1 &&
1770 set1->getDef(0)->refCount() > 1)
1771 return;
1772 if (set0->getPredicate() || set1->getPredicate())
1773 return;
1774 // check that they don't source each other
1775 for (int s = 0; s < 2; ++s)
1776 if (set0->getSrc(s) == set1->getDef(0) ||
1777 set1->getSrc(s) == set0->getDef(0))
1778 return;
1779
1780 set0 = cloneForward(func, set0);
1781 set1 = cloneShallow(func, set1);
1782 logop->bb->insertAfter(logop, set1);
1783 logop->bb->insertAfter(logop, set0);
1784
1785 set0->dType = TYPE_U8;
1786 set0->getDef(0)->reg.file = FILE_PREDICATE;
1787 set0->getDef(0)->reg.size = 1;
1788 set1->setSrc(2, set0->getDef(0));
1789 set1->op = redOp;
1790 set1->setDef(0, logop->getDef(0));
1791 delete_Instruction(prog, logop);
1792 }
1793 }
1794
1795 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1796 // nv50:
1797 // F2I(NEG(I2F(ABS(SET))))
1798 void
1799 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1800 {
1801 Instruction *insn = cvt->getSrc(0)->getInsn();
1802 if (cvt->sType != TYPE_F32 ||
1803 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1804 return;
1805 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1806 return;
1807 if (insn->src(0).mod != Modifier(0))
1808 return;
1809 insn = insn->getSrc(0)->getInsn();
1810
1811 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1812 if (insn && insn->op == OP_CVT &&
1813 insn->dType == TYPE_F32 &&
1814 insn->sType == TYPE_S32) {
1815 insn = insn->getSrc(0)->getInsn();
1816 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1817 insn->src(0).mod)
1818 return;
1819 insn = insn->getSrc(0)->getInsn();
1820 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1821 return;
1822 } else
1823 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1824 return;
1825 }
1826
1827 Instruction *bset = cloneShallow(func, insn);
1828 bset->dType = TYPE_U32;
1829 bset->setDef(0, cvt->getDef(0));
1830 cvt->bb->insertAfter(cvt, bset);
1831 delete_Instruction(prog, cvt);
1832 }
1833
1834 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
1835 // does a type conversion, this becomes trickier as there might be range
1836 // changes/etc. We could handle those in theory as long as the range was being
1837 // reduced or kept the same.
1838 void
1839 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
1840 {
1841 Instruction *insn = cvt->getSrc(0)->getInsn();
1842 RoundMode rnd = insn->rnd;
1843
1844 if (insn->saturate ||
1845 insn->subOp ||
1846 insn->dType != insn->sType ||
1847 insn->dType != cvt->sType)
1848 return;
1849
1850 switch (insn->op) {
1851 case OP_CEIL:
1852 rnd = ROUND_PI;
1853 break;
1854 case OP_FLOOR:
1855 rnd = ROUND_MI;
1856 break;
1857 case OP_TRUNC:
1858 rnd = ROUND_ZI;
1859 break;
1860 case OP_CVT:
1861 break;
1862 default:
1863 return;
1864 }
1865
1866 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
1867 rnd = (RoundMode)(rnd & 3);
1868
1869 cvt->rnd = rnd;
1870 cvt->setSrc(0, insn->getSrc(0));
1871 cvt->src(0).mod *= insn->src(0).mod;
1872 cvt->sType = insn->sType;
1873 }
1874
1875 // Some shaders extract packed bytes out of words and convert them to
1876 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
1877 // nv50 for word sizes.
1878 //
1879 // CVT(EXTBF(x, byte/word))
1880 // CVT(AND(bytemask, x))
1881 // CVT(AND(bytemask, SHR(x, 8/16/24)))
1882 // CVT(SHR(x, 16/24))
1883 void
1884 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
1885 {
1886 Instruction *insn = cvt->getSrc(0)->getInsn();
1887 ImmediateValue imm;
1888 Value *arg = NULL;
1889 unsigned width, offset;
1890 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
1891 return;
1892 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
1893 width = (imm.reg.data.u32 >> 8) & 0xff;
1894 offset = imm.reg.data.u32 & 0xff;
1895 arg = insn->getSrc(0);
1896
1897 if (width != 8 && width != 16)
1898 return;
1899 if (width == 8 && offset & 0x7)
1900 return;
1901 if (width == 16 && offset & 0xf)
1902 return;
1903 } else if (insn->op == OP_AND) {
1904 int s;
1905 if (insn->src(0).getImmediate(imm))
1906 s = 0;
1907 else if (insn->src(1).getImmediate(imm))
1908 s = 1;
1909 else
1910 return;
1911
1912 if (imm.reg.data.u32 == 0xff)
1913 width = 8;
1914 else if (imm.reg.data.u32 == 0xffff)
1915 width = 16;
1916 else
1917 return;
1918
1919 arg = insn->getSrc(!s);
1920 Instruction *shift = arg->getInsn();
1921 offset = 0;
1922 if (shift && shift->op == OP_SHR &&
1923 shift->sType == cvt->sType &&
1924 shift->src(1).getImmediate(imm) &&
1925 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1926 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
1927 arg = shift->getSrc(0);
1928 offset = imm.reg.data.u32;
1929 }
1930 // We just AND'd the high bits away, which means this is effectively an
1931 // unsigned value.
1932 cvt->sType = TYPE_U32;
1933 } else if (insn->op == OP_SHR &&
1934 insn->sType == cvt->sType &&
1935 insn->src(1).getImmediate(imm)) {
1936 arg = insn->getSrc(0);
1937 if (imm.reg.data.u32 == 24) {
1938 width = 8;
1939 offset = 24;
1940 } else if (imm.reg.data.u32 == 16) {
1941 width = 16;
1942 offset = 16;
1943 } else {
1944 return;
1945 }
1946 }
1947
1948 if (!arg)
1949 return;
1950
1951 // Irrespective of what came earlier, we can undo a shift on the argument
1952 // by adjusting the offset.
1953 Instruction *shift = arg->getInsn();
1954 if (shift && shift->op == OP_SHL &&
1955 shift->src(1).getImmediate(imm) &&
1956 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
1957 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
1958 imm.reg.data.u32 <= offset) {
1959 arg = shift->getSrc(0);
1960 offset -= imm.reg.data.u32;
1961 }
1962
1963 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
1964 // annoying to detect them.
1965
1966 if (width == 8) {
1967 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
1968 } else {
1969 assert(width == 16);
1970 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
1971 }
1972 cvt->setSrc(0, arg);
1973 cvt->subOp = offset >> 3;
1974 }
1975
1976 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
1977 void
1978 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
1979 {
1980 ImmediateValue imm;
1981 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
1982 int s;
1983 Instruction *add;
1984
1985 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
1986
1987 // look for ADD (TODO: only count references by non-SUCLAMP)
1988 if (insn->getSrc(0)->refCount() > 1)
1989 return;
1990 add = insn->getSrc(0)->getInsn();
1991 if (!add || add->op != OP_ADD ||
1992 (add->dType != TYPE_U32 &&
1993 add->dType != TYPE_S32))
1994 return;
1995
1996 // look for immediate
1997 for (s = 0; s < 2; ++s)
1998 if (add->src(s).getImmediate(imm))
1999 break;
2000 if (s >= 2)
2001 return;
2002 s = s ? 0 : 1;
2003 // determine if immediate fits
2004 val += imm.reg.data.s32;
2005 if (val > 31 || val < -32)
2006 return;
2007 // determine if other addend fits
2008 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2009 return;
2010
2011 bld.setPosition(insn, false); // make sure bld is init'ed
2012 // replace sources
2013 insn->setSrc(2, bld.mkImm(val));
2014 insn->setSrc(0, add->getSrc(s));
2015 }
2016
2017 // NEG(AND(SET, 1)) -> SET
2018 void
2019 AlgebraicOpt::handleNEG(Instruction *i) {
2020 Instruction *src = i->getSrc(0)->getInsn();
2021 ImmediateValue imm;
2022 int b;
2023
2024 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2025 return;
2026
2027 if (src->src(0).getImmediate(imm))
2028 b = 1;
2029 else if (src->src(1).getImmediate(imm))
2030 b = 0;
2031 else
2032 return;
2033
2034 if (!imm.isInteger(1))
2035 return;
2036
2037 Instruction *set = src->getSrc(b)->getInsn();
2038 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2039 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2040 !isFloatType(set->dType)) {
2041 i->def(0).replace(set->getDef(0), false);
2042 }
2043 }
2044
2045 bool
2046 AlgebraicOpt::visit(BasicBlock *bb)
2047 {
2048 Instruction *next;
2049 for (Instruction *i = bb->getEntry(); i; i = next) {
2050 next = i->next;
2051 switch (i->op) {
2052 case OP_ABS:
2053 handleABS(i);
2054 break;
2055 case OP_ADD:
2056 handleADD(i);
2057 break;
2058 case OP_RCP:
2059 handleRCP(i);
2060 break;
2061 case OP_MIN:
2062 case OP_MAX:
2063 handleMINMAX(i);
2064 break;
2065 case OP_SLCT:
2066 handleSLCT(i);
2067 break;
2068 case OP_AND:
2069 case OP_OR:
2070 case OP_XOR:
2071 handleLOGOP(i);
2072 break;
2073 case OP_CVT:
2074 handleCVT_NEG(i);
2075 handleCVT_CVT(i);
2076 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2077 handleCVT_EXTBF(i);
2078 break;
2079 case OP_SUCLAMP:
2080 handleSUCLAMP(i);
2081 break;
2082 case OP_NEG:
2083 handleNEG(i);
2084 break;
2085 default:
2086 break;
2087 }
2088 }
2089
2090 return true;
2091 }
2092
2093 // =============================================================================
2094
2095 static inline void
2096 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2097 {
2098 if (offset != ldst->getSrc(0)->reg.data.offset) {
2099 if (ldst->getSrc(0)->refCount() > 1)
2100 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2101 ldst->getSrc(0)->reg.data.offset = offset;
2102 }
2103 }
2104
2105 // Combine loads and stores, forward stores to loads where possible.
2106 class MemoryOpt : public Pass
2107 {
2108 private:
2109 class Record
2110 {
2111 public:
2112 Record *next;
2113 Instruction *insn;
2114 const Value *rel[2];
2115 const Value *base;
2116 int32_t offset;
2117 int8_t fileIndex;
2118 uint8_t size;
2119 bool locked;
2120 Record *prev;
2121
2122 bool overlaps(const Instruction *ldst) const;
2123
2124 inline void link(Record **);
2125 inline void unlink(Record **);
2126 inline void set(const Instruction *ldst);
2127 };
2128
2129 public:
2130 MemoryOpt();
2131
2132 Record *loads[DATA_FILE_COUNT];
2133 Record *stores[DATA_FILE_COUNT];
2134
2135 MemoryPool recordPool;
2136
2137 private:
2138 virtual bool visit(BasicBlock *);
2139 bool runOpt(BasicBlock *);
2140
2141 Record **getList(const Instruction *);
2142
2143 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2144
2145 // merge @insn into load/store instruction from @rec
2146 bool combineLd(Record *rec, Instruction *ld);
2147 bool combineSt(Record *rec, Instruction *st);
2148
2149 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2150 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2151 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2152
2153 void addRecord(Instruction *ldst);
2154 void purgeRecords(Instruction *const st, DataFile);
2155 void lockStores(Instruction *const ld);
2156 void reset();
2157
2158 private:
2159 Record *prevRecord;
2160 };
2161
2162 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2163 {
2164 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2165 loads[i] = NULL;
2166 stores[i] = NULL;
2167 }
2168 prevRecord = NULL;
2169 }
2170
2171 void
2172 MemoryOpt::reset()
2173 {
2174 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2175 Record *it, *next;
2176 for (it = loads[i]; it; it = next) {
2177 next = it->next;
2178 recordPool.release(it);
2179 }
2180 loads[i] = NULL;
2181 for (it = stores[i]; it; it = next) {
2182 next = it->next;
2183 recordPool.release(it);
2184 }
2185 stores[i] = NULL;
2186 }
2187 }
2188
2189 bool
2190 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2191 {
2192 int32_t offRc = rec->offset;
2193 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2194 int sizeRc = rec->size;
2195 int sizeLd = typeSizeof(ld->dType);
2196 int size = sizeRc + sizeLd;
2197 int d, j;
2198
2199 if (!prog->getTarget()->
2200 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2201 return false;
2202 // no unaligned loads
2203 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2204 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2205 return false;
2206
2207 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2208
2209 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2210
2211 if (offLd < offRc) {
2212 int sz;
2213 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2214 // d: nr of definitions in ld
2215 // j: nr of definitions in rec->insn, move:
2216 for (d = d + j - 1; j > 0; --j, --d)
2217 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2218
2219 if (rec->insn->getSrc(0)->refCount() > 1)
2220 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2221 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2222
2223 d = 0;
2224 } else {
2225 d = j;
2226 }
2227 // move definitions of @ld to @rec->insn
2228 for (j = 0; sizeLd; ++j, ++d) {
2229 sizeLd -= ld->getDef(j)->reg.size;
2230 rec->insn->setDef(d, ld->getDef(j));
2231 }
2232
2233 rec->size = size;
2234 rec->insn->getSrc(0)->reg.size = size;
2235 rec->insn->setType(typeOfSize(size));
2236
2237 delete_Instruction(prog, ld);
2238
2239 return true;
2240 }
2241
2242 bool
2243 MemoryOpt::combineSt(Record *rec, Instruction *st)
2244 {
2245 int32_t offRc = rec->offset;
2246 int32_t offSt = st->getSrc(0)->reg.data.offset;
2247 int sizeRc = rec->size;
2248 int sizeSt = typeSizeof(st->dType);
2249 int s = sizeSt / 4;
2250 int size = sizeRc + sizeSt;
2251 int j, k;
2252 Value *src[4]; // no modifiers in ValueRef allowed for st
2253 Value *extra[3];
2254
2255 if (!prog->getTarget()->
2256 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2257 return false;
2258 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2259 return false;
2260
2261 st->takeExtraSources(0, extra); // save predicate and indirect address
2262
2263 if (offRc < offSt) {
2264 // save values from @st
2265 for (s = 0; sizeSt; ++s) {
2266 sizeSt -= st->getSrc(s + 1)->reg.size;
2267 src[s] = st->getSrc(s + 1);
2268 }
2269 // set record's values as low sources of @st
2270 for (j = 1; sizeRc; ++j) {
2271 sizeRc -= rec->insn->getSrc(j)->reg.size;
2272 st->setSrc(j, rec->insn->getSrc(j));
2273 }
2274 // set saved values as high sources of @st
2275 for (k = j, j = 0; j < s; ++j)
2276 st->setSrc(k++, src[j]);
2277
2278 updateLdStOffset(st, offRc, func);
2279 } else {
2280 for (j = 1; sizeSt; ++j)
2281 sizeSt -= st->getSrc(j)->reg.size;
2282 for (s = 1; sizeRc; ++j, ++s) {
2283 sizeRc -= rec->insn->getSrc(s)->reg.size;
2284 st->setSrc(j, rec->insn->getSrc(s));
2285 }
2286 rec->offset = offSt;
2287 }
2288 st->putExtraSources(0, extra); // restore pointer and predicate
2289
2290 delete_Instruction(prog, rec->insn);
2291 rec->insn = st;
2292 rec->size = size;
2293 rec->insn->getSrc(0)->reg.size = size;
2294 rec->insn->setType(typeOfSize(size));
2295 return true;
2296 }
2297
2298 void
2299 MemoryOpt::Record::set(const Instruction *ldst)
2300 {
2301 const Symbol *mem = ldst->getSrc(0)->asSym();
2302 fileIndex = mem->reg.fileIndex;
2303 rel[0] = ldst->getIndirect(0, 0);
2304 rel[1] = ldst->getIndirect(0, 1);
2305 offset = mem->reg.data.offset;
2306 base = mem->getBase();
2307 size = typeSizeof(ldst->sType);
2308 }
2309
2310 void
2311 MemoryOpt::Record::link(Record **list)
2312 {
2313 next = *list;
2314 if (next)
2315 next->prev = this;
2316 prev = NULL;
2317 *list = this;
2318 }
2319
2320 void
2321 MemoryOpt::Record::unlink(Record **list)
2322 {
2323 if (next)
2324 next->prev = prev;
2325 if (prev)
2326 prev->next = next;
2327 else
2328 *list = next;
2329 }
2330
2331 MemoryOpt::Record **
2332 MemoryOpt::getList(const Instruction *insn)
2333 {
2334 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2335 return &loads[insn->src(0).getFile()];
2336 return &stores[insn->src(0).getFile()];
2337 }
2338
2339 void
2340 MemoryOpt::addRecord(Instruction *i)
2341 {
2342 Record **list = getList(i);
2343 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2344
2345 it->link(list);
2346 it->set(i);
2347 it->insn = i;
2348 it->locked = false;
2349 }
2350
2351 MemoryOpt::Record *
2352 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2353 {
2354 const Symbol *sym = insn->getSrc(0)->asSym();
2355 const int size = typeSizeof(insn->sType);
2356 Record *rec = NULL;
2357 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2358
2359 for (; it; it = it->next) {
2360 if (it->locked && insn->op != OP_LOAD)
2361 continue;
2362 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2363 it->rel[0] != insn->getIndirect(0, 0) ||
2364 it->fileIndex != sym->reg.fileIndex ||
2365 it->rel[1] != insn->getIndirect(0, 1))
2366 continue;
2367
2368 if (it->offset < sym->reg.data.offset) {
2369 if (it->offset + it->size >= sym->reg.data.offset) {
2370 isAdj = (it->offset + it->size == sym->reg.data.offset);
2371 if (!isAdj)
2372 return it;
2373 if (!(it->offset & 0x7))
2374 rec = it;
2375 }
2376 } else {
2377 isAdj = it->offset != sym->reg.data.offset;
2378 if (size <= it->size && !isAdj)
2379 return it;
2380 else
2381 if (!(sym->reg.data.offset & 0x7))
2382 if (it->offset - size <= sym->reg.data.offset)
2383 rec = it;
2384 }
2385 }
2386 return rec;
2387 }
2388
2389 bool
2390 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2391 {
2392 Instruction *st = rec->insn;
2393 int32_t offSt = rec->offset;
2394 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2395 int d, s;
2396
2397 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2398 offSt += st->getSrc(s)->reg.size;
2399 if (offSt != offLd)
2400 return false;
2401
2402 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2403 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2404 return false;
2405 if (st->getSrc(s)->reg.file != FILE_GPR)
2406 return false;
2407 ld->def(d).replace(st->src(s), false);
2408 }
2409 ld->bb->remove(ld);
2410 return true;
2411 }
2412
2413 bool
2414 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2415 {
2416 Instruction *ldR = rec->insn;
2417 int32_t offR = rec->offset;
2418 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2419 int dR, dE;
2420
2421 assert(offR <= offE);
2422 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2423 offR += ldR->getDef(dR)->reg.size;
2424 if (offR != offE)
2425 return false;
2426
2427 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2428 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2429 return false;
2430 ldE->def(dE).replace(ldR->getDef(dR), false);
2431 }
2432
2433 delete_Instruction(prog, ldE);
2434 return true;
2435 }
2436
2437 bool
2438 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2439 {
2440 const Instruction *const ri = rec->insn;
2441 Value *extra[3];
2442
2443 int32_t offS = st->getSrc(0)->reg.data.offset;
2444 int32_t offR = rec->offset;
2445 int32_t endS = offS + typeSizeof(st->dType);
2446 int32_t endR = offR + typeSizeof(ri->dType);
2447
2448 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2449
2450 st->takeExtraSources(0, extra);
2451
2452 if (offR < offS) {
2453 Value *vals[10];
2454 int s, n;
2455 int k = 0;
2456 // get non-replaced sources of ri
2457 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2458 vals[k++] = ri->getSrc(s);
2459 n = s;
2460 // get replaced sources of st
2461 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2462 vals[k++] = st->getSrc(s);
2463 // skip replaced sources of ri
2464 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2465 // get non-replaced sources after values covered by st
2466 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2467 vals[k++] = ri->getSrc(s);
2468 assert((unsigned int)k <= Elements(vals));
2469 for (s = 0; s < k; ++s)
2470 st->setSrc(s + 1, vals[s]);
2471 st->setSrc(0, ri->getSrc(0));
2472 } else
2473 if (endR > endS) {
2474 int j, s;
2475 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2476 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2477 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2478 st->setSrc(s++, ri->getSrc(j));
2479 }
2480 st->putExtraSources(0, extra);
2481
2482 delete_Instruction(prog, rec->insn);
2483
2484 rec->insn = st;
2485 rec->offset = st->getSrc(0)->reg.data.offset;
2486
2487 st->setType(typeOfSize(rec->size));
2488
2489 return true;
2490 }
2491
2492 bool
2493 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2494 {
2495 Record that;
2496 that.set(ldst);
2497
2498 if (this->fileIndex != that.fileIndex)
2499 return false;
2500
2501 if (this->rel[0] || that.rel[0])
2502 return this->base == that.base;
2503 return
2504 (this->offset < that.offset + that.size) &&
2505 (this->offset + this->size > that.offset);
2506 }
2507
2508 // We must not eliminate stores that affect the result of @ld if
2509 // we find later stores to the same location, and we may no longer
2510 // merge them with later stores.
2511 // The stored value can, however, still be used to determine the value
2512 // returned by future loads.
2513 void
2514 MemoryOpt::lockStores(Instruction *const ld)
2515 {
2516 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2517 if (!r->locked && r->overlaps(ld))
2518 r->locked = true;
2519 }
2520
2521 // Prior loads from the location of @st are no longer valid.
2522 // Stores to the location of @st may no longer be used to derive
2523 // the value at it nor be coalesced into later stores.
2524 void
2525 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
2526 {
2527 if (st)
2528 f = st->src(0).getFile();
2529
2530 for (Record *r = loads[f]; r; r = r->next)
2531 if (!st || r->overlaps(st))
2532 r->unlink(&loads[f]);
2533
2534 for (Record *r = stores[f]; r; r = r->next)
2535 if (!st || r->overlaps(st))
2536 r->unlink(&stores[f]);
2537 }
2538
2539 bool
2540 MemoryOpt::visit(BasicBlock *bb)
2541 {
2542 bool ret = runOpt(bb);
2543 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
2544 // where 96 bit memory operations are forbidden.
2545 if (ret)
2546 ret = runOpt(bb);
2547 return ret;
2548 }
2549
2550 bool
2551 MemoryOpt::runOpt(BasicBlock *bb)
2552 {
2553 Instruction *ldst, *next;
2554 Record *rec;
2555 bool isAdjacent = true;
2556
2557 for (ldst = bb->getEntry(); ldst; ldst = next) {
2558 bool keep = true;
2559 bool isLoad = true;
2560 next = ldst->next;
2561
2562 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
2563 if (ldst->isDead()) {
2564 // might have been produced by earlier optimization
2565 delete_Instruction(prog, ldst);
2566 continue;
2567 }
2568 } else
2569 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
2570 if (typeSizeof(ldst->dType) == 4 &&
2571 ldst->src(1).getFile() == FILE_GPR &&
2572 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
2573 delete_Instruction(prog, ldst);
2574 continue;
2575 }
2576 isLoad = false;
2577 } else {
2578 // TODO: maybe have all fixed ops act as barrier ?
2579 if (ldst->op == OP_CALL ||
2580 ldst->op == OP_BAR ||
2581 ldst->op == OP_MEMBAR) {
2582 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2583 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2584 purgeRecords(NULL, FILE_MEMORY_SHARED);
2585 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2586 } else
2587 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
2588 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
2589 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2590 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2591 purgeRecords(NULL, FILE_MEMORY_SHARED);
2592 } else {
2593 purgeRecords(NULL, ldst->src(0).getFile());
2594 }
2595 } else
2596 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
2597 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2598 }
2599 continue;
2600 }
2601 if (ldst->getPredicate()) // TODO: handle predicated ld/st
2602 continue;
2603 if (ldst->perPatch) // TODO: create separate per-patch lists
2604 continue;
2605
2606 if (isLoad) {
2607 DataFile file = ldst->src(0).getFile();
2608
2609 // if ld l[]/g[] look for previous store to eliminate the reload
2610 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
2611 // TODO: shared memory ?
2612 rec = findRecord(ldst, false, isAdjacent);
2613 if (rec && !isAdjacent)
2614 keep = !replaceLdFromSt(ldst, rec);
2615 }
2616
2617 // or look for ld from the same location and replace this one
2618 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
2619 if (rec) {
2620 if (!isAdjacent)
2621 keep = !replaceLdFromLd(ldst, rec);
2622 else
2623 // or combine a previous load with this one
2624 keep = !combineLd(rec, ldst);
2625 }
2626 if (keep)
2627 lockStores(ldst);
2628 } else {
2629 rec = findRecord(ldst, false, isAdjacent);
2630 if (rec) {
2631 if (!isAdjacent)
2632 keep = !replaceStFromSt(ldst, rec);
2633 else
2634 keep = !combineSt(rec, ldst);
2635 }
2636 if (keep)
2637 purgeRecords(ldst, DATA_FILE_COUNT);
2638 }
2639 if (keep)
2640 addRecord(ldst);
2641 }
2642 reset();
2643
2644 return true;
2645 }
2646
2647 // =============================================================================
2648
2649 // Turn control flow into predicated instructions (after register allocation !).
2650 // TODO:
2651 // Could move this to before register allocation on NVC0 and also handle nested
2652 // constructs.
2653 class FlatteningPass : public Pass
2654 {
2655 private:
2656 virtual bool visit(Function *);
2657 virtual bool visit(BasicBlock *);
2658
2659 bool tryPredicateConditional(BasicBlock *);
2660 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2661 void tryPropagateBranch(BasicBlock *);
2662 inline bool isConstantCondition(Value *pred);
2663 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2664 inline void removeFlow(Instruction *);
2665
2666 uint8_t gpr_unit;
2667 };
2668
2669 bool
2670 FlatteningPass::isConstantCondition(Value *pred)
2671 {
2672 Instruction *insn = pred->getUniqueInsn();
2673 assert(insn);
2674 if (insn->op != OP_SET || insn->srcExists(2))
2675 return false;
2676
2677 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2678 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2679 DataFile file;
2680 if (ld) {
2681 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2682 return false;
2683 if (ld->src(0).isIndirect(0))
2684 return false;
2685 file = ld->src(0).getFile();
2686 } else {
2687 file = insn->src(s).getFile();
2688 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
2689 // in register "units", which can vary between targets.
2690 if (file == FILE_GPR) {
2691 Value *v = insn->getSrc(s);
2692 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
2693 int units = bytes >> gpr_unit;
2694 if (units > prog->maxGPR)
2695 file = FILE_IMMEDIATE;
2696 }
2697 }
2698 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2699 return false;
2700 }
2701 return true;
2702 }
2703
2704 void
2705 FlatteningPass::removeFlow(Instruction *insn)
2706 {
2707 FlowInstruction *term = insn ? insn->asFlow() : NULL;
2708 if (!term)
2709 return;
2710 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
2711
2712 if (term->op == OP_BRA) {
2713 // TODO: this might get more difficult when we get arbitrary BRAs
2714 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
2715 return;
2716 } else
2717 if (term->op != OP_JOIN)
2718 return;
2719
2720 Value *pred = term->getPredicate();
2721
2722 delete_Instruction(prog, term);
2723
2724 if (pred && pred->refCount() == 0) {
2725 Instruction *pSet = pred->getUniqueInsn();
2726 pred->join->reg.data.id = -1; // deallocate
2727 if (pSet->isDead())
2728 delete_Instruction(prog, pSet);
2729 }
2730 }
2731
2732 void
2733 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
2734 {
2735 for (Instruction *i = bb->getEntry(); i; i = i->next) {
2736 if (i->isNop())
2737 continue;
2738 assert(!i->getPredicate());
2739 i->setPredicate(cc, pred);
2740 }
2741 removeFlow(bb->getExit());
2742 }
2743
2744 bool
2745 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
2746 {
2747 if (insn->isPseudo())
2748 return true;
2749 // TODO: calls where we don't know which registers are modified
2750
2751 if (!prog->getTarget()->mayPredicate(insn, pred))
2752 return false;
2753 for (int d = 0; insn->defExists(d); ++d)
2754 if (insn->getDef(d)->equals(pred))
2755 return false;
2756 return true;
2757 }
2758
2759 // If we jump to BRA/RET/EXIT, replace the jump with it.
2760 // NOTE: We do not update the CFG anymore here !
2761 //
2762 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
2763 // BB:0
2764 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
2765 // BB1:
2766 // bra BB:3
2767 // BB2:
2768 // ...
2769 // BB3:
2770 // ...
2771 void
2772 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
2773 {
2774 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
2775 BasicBlock *bf = i->asFlow()->target.bb;
2776
2777 if (bf->getInsnCount() != 1)
2778 continue;
2779
2780 FlowInstruction *bra = i->asFlow();
2781 FlowInstruction *rep = bf->getExit()->asFlow();
2782
2783 if (!rep || rep->getPredicate())
2784 continue;
2785 if (rep->op != OP_BRA &&
2786 rep->op != OP_JOIN &&
2787 rep->op != OP_EXIT)
2788 continue;
2789
2790 // TODO: If there are multiple branches to @rep, only the first would
2791 // be replaced, so only remove them after this pass is done ?
2792 // Also, need to check all incident blocks for fall-through exits and
2793 // add the branch there.
2794 bra->op = rep->op;
2795 bra->target.bb = rep->target.bb;
2796 if (bf->cfg.incidentCount() == 1)
2797 bf->remove(rep);
2798 }
2799 }
2800
2801 bool
2802 FlatteningPass::visit(Function *fn)
2803 {
2804 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
2805
2806 return true;
2807 }
2808
2809 bool
2810 FlatteningPass::visit(BasicBlock *bb)
2811 {
2812 if (tryPredicateConditional(bb))
2813 return true;
2814
2815 // try to attach join to previous instruction
2816 if (prog->getTarget()->hasJoin) {
2817 Instruction *insn = bb->getExit();
2818 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
2819 insn = insn->prev;
2820 if (insn && !insn->getPredicate() &&
2821 !insn->asFlow() &&
2822 insn->op != OP_TEXBAR &&
2823 !isTextureOp(insn->op) && // probably just nve4
2824 !isSurfaceOp(insn->op) && // not confirmed
2825 insn->op != OP_LINTERP && // probably just nve4
2826 insn->op != OP_PINTERP && // probably just nve4
2827 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
2828 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
2829 !insn->isNop()) {
2830 insn->join = 1;
2831 bb->remove(bb->getExit());
2832 return true;
2833 }
2834 }
2835 }
2836
2837 tryPropagateBranch(bb);
2838
2839 return true;
2840 }
2841
2842 bool
2843 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
2844 {
2845 BasicBlock *bL = NULL, *bR = NULL;
2846 unsigned int nL = 0, nR = 0, limit = 12;
2847 Instruction *insn;
2848 unsigned int mask;
2849
2850 mask = bb->initiatesSimpleConditional();
2851 if (!mask)
2852 return false;
2853
2854 assert(bb->getExit());
2855 Value *pred = bb->getExit()->getPredicate();
2856 assert(pred);
2857
2858 if (isConstantCondition(pred))
2859 limit = 4;
2860
2861 Graph::EdgeIterator ei = bb->cfg.outgoing();
2862
2863 if (mask & 1) {
2864 bL = BasicBlock::get(ei.getNode());
2865 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
2866 if (!mayPredicate(insn, pred))
2867 return false;
2868 if (nL > limit)
2869 return false; // too long, do a real branch
2870 }
2871 ei.next();
2872
2873 if (mask & 2) {
2874 bR = BasicBlock::get(ei.getNode());
2875 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
2876 if (!mayPredicate(insn, pred))
2877 return false;
2878 if (nR > limit)
2879 return false; // too long, do a real branch
2880 }
2881
2882 if (bL)
2883 predicateInstructions(bL, pred, bb->getExit()->cc);
2884 if (bR)
2885 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
2886
2887 if (bb->joinAt) {
2888 bb->remove(bb->joinAt);
2889 bb->joinAt = NULL;
2890 }
2891 removeFlow(bb->getExit()); // delete the branch/join at the fork point
2892
2893 // remove potential join operations at the end of the conditional
2894 if (prog->getTarget()->joinAnterior) {
2895 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
2896 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
2897 removeFlow(bb->getEntry());
2898 }
2899
2900 return true;
2901 }
2902
2903 // =============================================================================
2904
2905 // Fold Immediate into MAD; must be done after register allocation due to
2906 // constraint SDST == SSRC2
2907 // TODO:
2908 // Does NVC0+ have other situations where this pass makes sense?
2909 class NV50PostRaConstantFolding : public Pass
2910 {
2911 private:
2912 virtual bool visit(BasicBlock *);
2913 };
2914
2915 static bool
2916 post_ra_dead(Instruction *i)
2917 {
2918 for (int d = 0; i->defExists(d); ++d)
2919 if (i->getDef(d)->refCount())
2920 return false;
2921 return true;
2922 }
2923
2924 bool
2925 NV50PostRaConstantFolding::visit(BasicBlock *bb)
2926 {
2927 Value *vtmp;
2928 Instruction *def;
2929
2930 for (Instruction *i = bb->getFirst(); i; i = i->next) {
2931 switch (i->op) {
2932 case OP_MAD:
2933 if (i->def(0).getFile() != FILE_GPR ||
2934 i->src(0).getFile() != FILE_GPR ||
2935 i->src(1).getFile() != FILE_GPR ||
2936 i->src(2).getFile() != FILE_GPR ||
2937 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
2938 break;
2939
2940 if (i->getDef(0)->reg.data.id >= 64 ||
2941 i->getSrc(0)->reg.data.id >= 64)
2942 break;
2943
2944 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
2945 break;
2946
2947 if (i->getPredicate())
2948 break;
2949
2950 def = i->getSrc(1)->getInsn();
2951 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
2952 def = def->getSrc(0)->getInsn();
2953 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
2954 vtmp = i->getSrc(1);
2955 if (isFloatType(i->sType)) {
2956 i->setSrc(1, def->getSrc(0));
2957 } else {
2958 ImmediateValue val;
2959 bool ret = def->src(0).getImmediate(val);
2960 assert(ret);
2961 if (i->getSrc(1)->reg.data.id & 1)
2962 val.reg.data.u32 >>= 16;
2963 val.reg.data.u32 &= 0xffff;
2964 i->setSrc(1, new_ImmediateValue(bb->getProgram(), val.reg.data.u32));
2965 }
2966
2967 /* There's no post-RA dead code elimination, so do it here
2968 * XXX: if we add more code-removing post-RA passes, we might
2969 * want to create a post-RA dead-code elim pass */
2970 if (post_ra_dead(vtmp->getInsn())) {
2971 Value *src = vtmp->getInsn()->getSrc(0);
2972 // Careful -- splits will have already been removed from the
2973 // functions. Don't double-delete.
2974 if (vtmp->getInsn()->bb)
2975 delete_Instruction(prog, vtmp->getInsn());
2976 if (src->getInsn() && post_ra_dead(src->getInsn()))
2977 delete_Instruction(prog, src->getInsn());
2978 }
2979
2980 break;
2981 }
2982 break;
2983 default:
2984 break;
2985 }
2986 }
2987
2988 return true;
2989 }
2990
2991 // =============================================================================
2992
2993 // Common subexpression elimination. Stupid O^2 implementation.
2994 class LocalCSE : public Pass
2995 {
2996 private:
2997 virtual bool visit(BasicBlock *);
2998
2999 inline bool tryReplace(Instruction **, Instruction *);
3000
3001 DLList ops[OP_LAST + 1];
3002 };
3003
3004 class GlobalCSE : public Pass
3005 {
3006 private:
3007 virtual bool visit(BasicBlock *);
3008 };
3009
3010 bool
3011 Instruction::isActionEqual(const Instruction *that) const
3012 {
3013 if (this->op != that->op ||
3014 this->dType != that->dType ||
3015 this->sType != that->sType)
3016 return false;
3017 if (this->cc != that->cc)
3018 return false;
3019
3020 if (this->asTex()) {
3021 if (memcmp(&this->asTex()->tex,
3022 &that->asTex()->tex,
3023 sizeof(this->asTex()->tex)))
3024 return false;
3025 } else
3026 if (this->asCmp()) {
3027 if (this->asCmp()->setCond != that->asCmp()->setCond)
3028 return false;
3029 } else
3030 if (this->asFlow()) {
3031 return false;
3032 } else {
3033 if (this->ipa != that->ipa ||
3034 this->lanes != that->lanes ||
3035 this->perPatch != that->perPatch)
3036 return false;
3037 if (this->postFactor != that->postFactor)
3038 return false;
3039 }
3040
3041 if (this->subOp != that->subOp ||
3042 this->saturate != that->saturate ||
3043 this->rnd != that->rnd ||
3044 this->ftz != that->ftz ||
3045 this->dnz != that->dnz ||
3046 this->cache != that->cache ||
3047 this->mask != that->mask)
3048 return false;
3049
3050 return true;
3051 }
3052
3053 bool
3054 Instruction::isResultEqual(const Instruction *that) const
3055 {
3056 unsigned int d, s;
3057
3058 // NOTE: location of discard only affects tex with liveOnly and quadops
3059 if (!this->defExists(0) && this->op != OP_DISCARD)
3060 return false;
3061
3062 if (!isActionEqual(that))
3063 return false;
3064
3065 if (this->predSrc != that->predSrc)
3066 return false;
3067
3068 for (d = 0; this->defExists(d); ++d) {
3069 if (!that->defExists(d) ||
3070 !this->getDef(d)->equals(that->getDef(d), false))
3071 return false;
3072 }
3073 if (that->defExists(d))
3074 return false;
3075
3076 for (s = 0; this->srcExists(s); ++s) {
3077 if (!that->srcExists(s))
3078 return false;
3079 if (this->src(s).mod != that->src(s).mod)
3080 return false;
3081 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3082 return false;
3083 }
3084 if (that->srcExists(s))
3085 return false;
3086
3087 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3088 switch (src(0).getFile()) {
3089 case FILE_MEMORY_CONST:
3090 case FILE_SHADER_INPUT:
3091 return true;
3092 case FILE_SHADER_OUTPUT:
3093 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3094 default:
3095 return false;
3096 }
3097 }
3098
3099 return true;
3100 }
3101
3102 // pull through common expressions from different in-blocks
3103 bool
3104 GlobalCSE::visit(BasicBlock *bb)
3105 {
3106 Instruction *phi, *next, *ik;
3107 int s;
3108
3109 // TODO: maybe do this with OP_UNION, too
3110
3111 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3112 next = phi->next;
3113 if (phi->getSrc(0)->refCount() > 1)
3114 continue;
3115 ik = phi->getSrc(0)->getInsn();
3116 if (!ik)
3117 continue; // probably a function input
3118 if (ik->defCount(0xff) > 1)
3119 continue; // too painful to check if we can really push this forward
3120 for (s = 1; phi->srcExists(s); ++s) {
3121 if (phi->getSrc(s)->refCount() > 1)
3122 break;
3123 if (!phi->getSrc(s)->getInsn() ||
3124 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3125 break;
3126 }
3127 if (!phi->srcExists(s)) {
3128 Instruction *entry = bb->getEntry();
3129 ik->bb->remove(ik);
3130 if (!entry || entry->op != OP_JOIN)
3131 bb->insertHead(ik);
3132 else
3133 bb->insertAfter(entry, ik);
3134 ik->setDef(0, phi->getDef(0));
3135 delete_Instruction(prog, phi);
3136 }
3137 }
3138
3139 return true;
3140 }
3141
3142 bool
3143 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3144 {
3145 Instruction *old = *ptr;
3146
3147 // TODO: maybe relax this later (causes trouble with OP_UNION)
3148 if (i->isPredicated())
3149 return false;
3150
3151 if (!old->isResultEqual(i))
3152 return false;
3153
3154 for (int d = 0; old->defExists(d); ++d)
3155 old->def(d).replace(i->getDef(d), false);
3156 delete_Instruction(prog, old);
3157 *ptr = NULL;
3158 return true;
3159 }
3160
3161 bool
3162 LocalCSE::visit(BasicBlock *bb)
3163 {
3164 unsigned int replaced;
3165
3166 do {
3167 Instruction *ir, *next;
3168
3169 replaced = 0;
3170
3171 // will need to know the order of instructions
3172 int serial = 0;
3173 for (ir = bb->getFirst(); ir; ir = ir->next)
3174 ir->serial = serial++;
3175
3176 for (ir = bb->getEntry(); ir; ir = next) {
3177 int s;
3178 Value *src = NULL;
3179
3180 next = ir->next;
3181
3182 if (ir->fixed) {
3183 ops[ir->op].insert(ir);
3184 continue;
3185 }
3186
3187 for (s = 0; ir->srcExists(s); ++s)
3188 if (ir->getSrc(s)->asLValue())
3189 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3190 src = ir->getSrc(s);
3191
3192 if (src) {
3193 for (Value::UseIterator it = src->uses.begin();
3194 it != src->uses.end(); ++it) {
3195 Instruction *ik = (*it)->getInsn();
3196 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3197 if (tryReplace(&ir, ik))
3198 break;
3199 }
3200 } else {
3201 DLLIST_FOR_EACH(&ops[ir->op], iter)
3202 {
3203 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3204 if (tryReplace(&ir, ik))
3205 break;
3206 }
3207 }
3208
3209 if (ir)
3210 ops[ir->op].insert(ir);
3211 else
3212 ++replaced;
3213 }
3214 for (unsigned int i = 0; i <= OP_LAST; ++i)
3215 ops[i].clear();
3216
3217 } while (replaced);
3218
3219 return true;
3220 }
3221
3222 // =============================================================================
3223
3224 // Remove computations of unused values.
3225 class DeadCodeElim : public Pass
3226 {
3227 public:
3228 bool buryAll(Program *);
3229
3230 private:
3231 virtual bool visit(BasicBlock *);
3232
3233 void checkSplitLoad(Instruction *ld); // for partially dead loads
3234
3235 unsigned int deadCount;
3236 };
3237
3238 bool
3239 DeadCodeElim::buryAll(Program *prog)
3240 {
3241 do {
3242 deadCount = 0;
3243 if (!this->run(prog, false, false))
3244 return false;
3245 } while (deadCount);
3246
3247 return true;
3248 }
3249
3250 bool
3251 DeadCodeElim::visit(BasicBlock *bb)
3252 {
3253 Instruction *prev;
3254
3255 for (Instruction *i = bb->getExit(); i; i = prev) {
3256 prev = i->prev;
3257 if (i->isDead()) {
3258 ++deadCount;
3259 delete_Instruction(prog, i);
3260 } else
3261 if (i->defExists(1) && (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3262 checkSplitLoad(i);
3263 } else
3264 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3265 if (i->op == OP_ATOM ||
3266 i->op == OP_SUREDP ||
3267 i->op == OP_SUREDB)
3268 i->setDef(0, NULL);
3269 }
3270 }
3271 return true;
3272 }
3273
3274 // Each load can go into up to 4 destinations, any of which might potentially
3275 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3276 // of where the holes are. We find the first contiguous region, put it into
3277 // the first load, and then put the second contiguous region into the second
3278 // load. There can be at most 2 contiguous regions.
3279 //
3280 // Note that there are some restrictions, for example it's not possible to do
3281 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3282 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3283 // split into a 64-bit and 32-bit load.
3284 void
3285 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3286 {
3287 Instruction *ld2 = NULL; // can get at most 2 loads
3288 Value *def1[4];
3289 Value *def2[4];
3290 int32_t addr1, addr2;
3291 int32_t size1, size2;
3292 int d, n1, n2;
3293 uint32_t mask = 0xffffffff;
3294
3295 for (d = 0; ld1->defExists(d); ++d)
3296 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3297 mask &= ~(1 << d);
3298 if (mask == 0xffffffff)
3299 return;
3300
3301 addr1 = ld1->getSrc(0)->reg.data.offset;
3302 n1 = n2 = 0;
3303 size1 = size2 = 0;
3304
3305 // Compute address/width for first load
3306 for (d = 0; ld1->defExists(d); ++d) {
3307 if (mask & (1 << d)) {
3308 if (size1 && (addr1 & 0x7))
3309 break;
3310 def1[n1] = ld1->getDef(d);
3311 size1 += def1[n1++]->reg.size;
3312 } else
3313 if (!n1) {
3314 addr1 += ld1->getDef(d)->reg.size;
3315 } else {
3316 break;
3317 }
3318 }
3319
3320 // Scale back the size of the first load until it can be loaded. This
3321 // typically happens for TYPE_B96 loads.
3322 while (n1 &&
3323 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3324 typeOfSize(size1))) {
3325 size1 -= def1[--n1]->reg.size;
3326 d--;
3327 }
3328
3329 // Compute address/width for second load
3330 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3331 if (mask & (1 << d)) {
3332 assert(!size2 || !(addr2 & 0x7));
3333 def2[n2] = ld1->getDef(d);
3334 size2 += def2[n2++]->reg.size;
3335 } else if (!n2) {
3336 assert(!n2);
3337 addr2 += ld1->getDef(d)->reg.size;
3338 } else {
3339 break;
3340 }
3341 }
3342
3343 // Make sure that we've processed all the values
3344 for (; ld1->defExists(d); ++d)
3345 assert(!(mask & (1 << d)));
3346
3347 updateLdStOffset(ld1, addr1, func);
3348 ld1->setType(typeOfSize(size1));
3349 for (d = 0; d < 4; ++d)
3350 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3351
3352 if (!n2)
3353 return;
3354
3355 ld2 = cloneShallow(func, ld1);
3356 updateLdStOffset(ld2, addr2, func);
3357 ld2->setType(typeOfSize(size2));
3358 for (d = 0; d < 4; ++d)
3359 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3360
3361 ld1->bb->insertAfter(ld1, ld2);
3362 }
3363
3364 // =============================================================================
3365
3366 #define RUN_PASS(l, n, f) \
3367 if (level >= (l)) { \
3368 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3369 INFO("PEEPHOLE: %s\n", #n); \
3370 n pass; \
3371 if (!pass.f(this)) \
3372 return false; \
3373 }
3374
3375 bool
3376 Program::optimizeSSA(int level)
3377 {
3378 RUN_PASS(1, DeadCodeElim, buryAll);
3379 RUN_PASS(1, CopyPropagation, run);
3380 RUN_PASS(1, MergeSplits, run);
3381 RUN_PASS(2, GlobalCSE, run);
3382 RUN_PASS(1, LocalCSE, run);
3383 RUN_PASS(2, AlgebraicOpt, run);
3384 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
3385 RUN_PASS(1, ConstantFolding, foldAll);
3386 RUN_PASS(1, LoadPropagation, run);
3387 RUN_PASS(1, IndirectPropagation, run);
3388 RUN_PASS(2, MemoryOpt, run);
3389 RUN_PASS(2, LocalCSE, run);
3390 RUN_PASS(0, DeadCodeElim, buryAll);
3391
3392 return true;
3393 }
3394
3395 bool
3396 Program::optimizePostRA(int level)
3397 {
3398 RUN_PASS(2, FlatteningPass, run);
3399 if (getTarget()->getChipset() < 0xc0)
3400 RUN_PASS(2, NV50PostRaConstantFolding, run);
3401
3402 return true;
3403 }
3404
3405 }