nv50/ir: convert an ATOM.EXCH without a destination into a store
[mesa.git] / src / gallium / drivers / nouveau / codegen / nv50_ir_peephole.cpp
1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative)
195 if (insn->op != OP_SET && insn->op != OP_SLCT && insn->op != OP_SUB)
196 return;
197 if (insn->src(1).getFile() != FILE_GPR)
198 return;
199 // This is the special OP_SET used for alphatesting, we can't reverse its
200 // arguments as that will confuse the fixup code.
201 if (insn->op == OP_SET && insn->subOp)
202 return;
203
204 Instruction *i0 = insn->getSrc(0)->getInsn();
205 Instruction *i1 = insn->getSrc(1)->getInsn();
206
207 // Swap sources to inline the less frequently used source. That way,
208 // optimistically, it will eventually be able to remove the instruction.
209 int i0refs = insn->getSrc(0)->refCount();
210 int i1refs = insn->getSrc(1)->refCount();
211
212 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
213 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
214 !targ->insnCanLoad(insn, 1, i1) ||
215 i0refs < i1refs)
216 insn->swapSources(0, 1);
217 else
218 return;
219 } else
220 if (isAttribOrSharedLoad(i1)) {
221 if (!isAttribOrSharedLoad(i0))
222 insn->swapSources(0, 1);
223 else
224 return;
225 } else {
226 return;
227 }
228
229 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
230 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
231 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
232 else
233 if (insn->op == OP_SLCT)
234 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
235 else
236 if (insn->op == OP_SUB) {
237 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
238 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
239 }
240 }
241
242 bool
243 LoadPropagation::visit(BasicBlock *bb)
244 {
245 const Target *targ = prog->getTarget();
246 Instruction *next;
247
248 for (Instruction *i = bb->getEntry(); i; i = next) {
249 next = i->next;
250
251 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
252 continue;
253
254 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
255 continue;
256
257 if (i->srcExists(1))
258 checkSwapSrc01(i);
259
260 for (int s = 0; i->srcExists(s); ++s) {
261 Instruction *ld = i->getSrc(s)->getInsn();
262
263 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
264 continue;
265 if (!targ->insnCanLoad(i, s, ld))
266 continue;
267
268 // propagate !
269 i->setSrc(s, ld->getSrc(0));
270 if (ld->src(0).isIndirect(0))
271 i->setIndirect(s, 0, ld->getIndirect(0, 0));
272
273 if (ld->getDef(0)->refCount() == 0)
274 delete_Instruction(prog, ld);
275 }
276 }
277 return true;
278 }
279
280 // =============================================================================
281
282 class IndirectPropagation : public Pass
283 {
284 private:
285 virtual bool visit(BasicBlock *);
286 };
287
288 bool
289 IndirectPropagation::visit(BasicBlock *bb)
290 {
291 const Target *targ = prog->getTarget();
292 Instruction *next;
293
294 for (Instruction *i = bb->getEntry(); i; i = next) {
295 next = i->next;
296
297 for (int s = 0; i->srcExists(s); ++s) {
298 Instruction *insn;
299 ImmediateValue imm;
300 if (!i->src(s).isIndirect(0))
301 continue;
302 insn = i->getIndirect(s, 0)->getInsn();
303 if (!insn)
304 continue;
305 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
306 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
307 !insn->src(1).getImmediate(imm) ||
308 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
309 continue;
310 i->setIndirect(s, 0, insn->getSrc(0));
311 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
312 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
313 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
314 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
315 !insn->src(1).getImmediate(imm) ||
316 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
317 continue;
318 i->setIndirect(s, 0, insn->getSrc(0));
319 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
320 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
321 } else if (insn->op == OP_MOV) {
322 if (!insn->src(0).getImmediate(imm) ||
323 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
324 continue;
325 i->setIndirect(s, 0, NULL);
326 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
327 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
328 }
329 }
330 }
331 return true;
332 }
333
334 // =============================================================================
335
336 // Evaluate constant expressions.
337 class ConstantFolding : public Pass
338 {
339 public:
340 bool foldAll(Program *);
341
342 private:
343 virtual bool visit(BasicBlock *);
344
345 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
346 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
347 void opnd(Instruction *, ImmediateValue&, int s);
348 void opnd3(Instruction *, ImmediateValue&);
349
350 void unary(Instruction *, const ImmediateValue&);
351
352 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
353
354 CmpInstruction *findOriginForTestWithZero(Value *);
355
356 unsigned int foldCount;
357
358 BuildUtil bld;
359 };
360
361 // TODO: remember generated immediates and only revisit these
362 bool
363 ConstantFolding::foldAll(Program *prog)
364 {
365 unsigned int iterCount = 0;
366 do {
367 foldCount = 0;
368 if (!run(prog))
369 return false;
370 } while (foldCount && ++iterCount < 2);
371 return true;
372 }
373
374 bool
375 ConstantFolding::visit(BasicBlock *bb)
376 {
377 Instruction *i, *next;
378
379 for (i = bb->getEntry(); i; i = next) {
380 next = i->next;
381 if (i->op == OP_MOV || i->op == OP_CALL)
382 continue;
383
384 ImmediateValue src0, src1, src2;
385
386 if (i->srcExists(2) &&
387 i->src(0).getImmediate(src0) &&
388 i->src(1).getImmediate(src1) &&
389 i->src(2).getImmediate(src2))
390 expr(i, src0, src1, src2);
391 else
392 if (i->srcExists(1) &&
393 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1))
394 expr(i, src0, src1);
395 else
396 if (i->srcExists(0) && i->src(0).getImmediate(src0))
397 opnd(i, src0, 0);
398 else
399 if (i->srcExists(1) && i->src(1).getImmediate(src1))
400 opnd(i, src1, 1);
401 if (i->srcExists(2) && i->src(2).getImmediate(src2))
402 opnd3(i, src2);
403 }
404 return true;
405 }
406
407 CmpInstruction *
408 ConstantFolding::findOriginForTestWithZero(Value *value)
409 {
410 if (!value)
411 return NULL;
412 Instruction *insn = value->getInsn();
413
414 if (insn->asCmp() && insn->op != OP_SLCT)
415 return insn->asCmp();
416
417 /* Sometimes mov's will sneak in as a result of other folding. This gets
418 * cleaned up later.
419 */
420 if (insn->op == OP_MOV)
421 return findOriginForTestWithZero(insn->getSrc(0));
422
423 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
424 if (insn->op == OP_AND) {
425 int s = 0;
426 ImmediateValue imm;
427 if (!insn->src(s).getImmediate(imm)) {
428 s = 1;
429 if (!insn->src(s).getImmediate(imm))
430 return NULL;
431 }
432 if (imm.reg.data.f32 != 1.0f)
433 return NULL;
434 /* TODO: Come up with a way to handle the condition being inverted */
435 if (insn->src(!s).mod != Modifier(0))
436 return NULL;
437 return findOriginForTestWithZero(insn->getSrc(!s));
438 }
439
440 return NULL;
441 }
442
443 void
444 Modifier::applyTo(ImmediateValue& imm) const
445 {
446 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
447 return;
448 switch (imm.reg.type) {
449 case TYPE_F32:
450 if (bits & NV50_IR_MOD_ABS)
451 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
452 if (bits & NV50_IR_MOD_NEG)
453 imm.reg.data.f32 = -imm.reg.data.f32;
454 if (bits & NV50_IR_MOD_SAT) {
455 if (imm.reg.data.f32 < 0.0f)
456 imm.reg.data.f32 = 0.0f;
457 else
458 if (imm.reg.data.f32 > 1.0f)
459 imm.reg.data.f32 = 1.0f;
460 }
461 assert(!(bits & NV50_IR_MOD_NOT));
462 break;
463
464 case TYPE_S8: // NOTE: will be extended
465 case TYPE_S16:
466 case TYPE_S32:
467 case TYPE_U8: // NOTE: treated as signed
468 case TYPE_U16:
469 case TYPE_U32:
470 if (bits & NV50_IR_MOD_ABS)
471 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
472 imm.reg.data.s32 : -imm.reg.data.s32;
473 if (bits & NV50_IR_MOD_NEG)
474 imm.reg.data.s32 = -imm.reg.data.s32;
475 if (bits & NV50_IR_MOD_NOT)
476 imm.reg.data.s32 = ~imm.reg.data.s32;
477 break;
478
479 case TYPE_F64:
480 if (bits & NV50_IR_MOD_ABS)
481 imm.reg.data.f64 = fabs(imm.reg.data.f64);
482 if (bits & NV50_IR_MOD_NEG)
483 imm.reg.data.f64 = -imm.reg.data.f64;
484 if (bits & NV50_IR_MOD_SAT) {
485 if (imm.reg.data.f64 < 0.0)
486 imm.reg.data.f64 = 0.0;
487 else
488 if (imm.reg.data.f64 > 1.0)
489 imm.reg.data.f64 = 1.0;
490 }
491 assert(!(bits & NV50_IR_MOD_NOT));
492 break;
493
494 default:
495 assert(!"invalid/unhandled type");
496 imm.reg.data.u64 = 0;
497 break;
498 }
499 }
500
501 operation
502 Modifier::getOp() const
503 {
504 switch (bits) {
505 case NV50_IR_MOD_ABS: return OP_ABS;
506 case NV50_IR_MOD_NEG: return OP_NEG;
507 case NV50_IR_MOD_SAT: return OP_SAT;
508 case NV50_IR_MOD_NOT: return OP_NOT;
509 case 0:
510 return OP_MOV;
511 default:
512 return OP_CVT;
513 }
514 }
515
516 void
517 ConstantFolding::expr(Instruction *i,
518 ImmediateValue &imm0, ImmediateValue &imm1)
519 {
520 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
521 struct Storage res;
522 DataType type = i->dType;
523
524 memset(&res.data, 0, sizeof(res.data));
525
526 switch (i->op) {
527 case OP_MAD:
528 case OP_FMA:
529 case OP_MUL:
530 if (i->dnz && i->dType == TYPE_F32) {
531 if (!isfinite(a->data.f32))
532 a->data.f32 = 0.0f;
533 if (!isfinite(b->data.f32))
534 b->data.f32 = 0.0f;
535 }
536 switch (i->dType) {
537 case TYPE_F32:
538 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
539 break;
540 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
541 case TYPE_S32:
542 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
543 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
544 break;
545 }
546 /* fallthrough */
547 case TYPE_U32:
548 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
549 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
550 break;
551 }
552 res.data.u32 = a->data.u32 * b->data.u32; break;
553 default:
554 return;
555 }
556 break;
557 case OP_DIV:
558 if (b->data.u32 == 0)
559 break;
560 switch (i->dType) {
561 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
562 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
563 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
564 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
565 default:
566 return;
567 }
568 break;
569 case OP_ADD:
570 switch (i->dType) {
571 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
572 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
573 case TYPE_S32:
574 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
575 default:
576 return;
577 }
578 break;
579 case OP_SUB:
580 switch (i->dType) {
581 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
582 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
583 case TYPE_S32:
584 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
585 default:
586 return;
587 }
588 break;
589 case OP_POW:
590 switch (i->dType) {
591 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
592 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
593 default:
594 return;
595 }
596 break;
597 case OP_MAX:
598 switch (i->dType) {
599 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
600 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
601 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
602 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
603 default:
604 return;
605 }
606 break;
607 case OP_MIN:
608 switch (i->dType) {
609 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
610 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
611 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
612 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
613 default:
614 return;
615 }
616 break;
617 case OP_AND:
618 res.data.u64 = a->data.u64 & b->data.u64;
619 break;
620 case OP_OR:
621 res.data.u64 = a->data.u64 | b->data.u64;
622 break;
623 case OP_XOR:
624 res.data.u64 = a->data.u64 ^ b->data.u64;
625 break;
626 case OP_SHL:
627 res.data.u32 = a->data.u32 << b->data.u32;
628 break;
629 case OP_SHR:
630 switch (i->dType) {
631 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
632 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
633 default:
634 return;
635 }
636 break;
637 case OP_SLCT:
638 if (a->data.u32 != b->data.u32)
639 return;
640 res.data.u32 = a->data.u32;
641 break;
642 case OP_EXTBF: {
643 int offset = b->data.u32 & 0xff;
644 int width = (b->data.u32 >> 8) & 0xff;
645 int rshift = offset;
646 int lshift = 0;
647 if (width == 0) {
648 res.data.u32 = 0;
649 break;
650 }
651 if (width + offset < 32) {
652 rshift = 32 - width;
653 lshift = 32 - width - offset;
654 }
655 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
656 res.data.u32 = util_bitreverse(a->data.u32);
657 else
658 res.data.u32 = a->data.u32;
659 switch (i->dType) {
660 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
661 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
662 default:
663 return;
664 }
665 break;
666 }
667 case OP_POPCNT:
668 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
669 break;
670 case OP_PFETCH:
671 // The two arguments to pfetch are logically added together. Normally
672 // the second argument will not be constant, but that can happen.
673 res.data.u32 = a->data.u32 + b->data.u32;
674 type = TYPE_U32;
675 break;
676 case OP_MERGE:
677 switch (i->dType) {
678 case TYPE_U64:
679 case TYPE_S64:
680 case TYPE_F64:
681 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
682 break;
683 default:
684 return;
685 }
686 break;
687 default:
688 return;
689 }
690 ++foldCount;
691
692 i->src(0).mod = Modifier(0);
693 i->src(1).mod = Modifier(0);
694 i->postFactor = 0;
695
696 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
697 i->setSrc(1, NULL);
698
699 i->getSrc(0)->reg.data = res.data;
700 i->getSrc(0)->reg.type = type;
701 i->getSrc(0)->reg.size = typeSizeof(type);
702
703 switch (i->op) {
704 case OP_MAD:
705 case OP_FMA: {
706 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
707
708 // Move the immediate into position 1, where we know it might be
709 // emittable. However it might not be anyways, as there may be other
710 // restrictions, so move it into a separate LValue.
711 bld.setPosition(i, false);
712 i->op = OP_ADD;
713 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
714 i->setSrc(0, i->getSrc(2));
715 i->src(0).mod = i->src(2).mod;
716 i->setSrc(2, NULL);
717
718 if (i->src(0).getImmediate(src0))
719 expr(i, src0, src1);
720 else
721 opnd(i, src1, 1);
722 break;
723 }
724 case OP_PFETCH:
725 // Leave PFETCH alone... we just folded its 2 args into 1.
726 break;
727 default:
728 i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
729 break;
730 }
731 i->subOp = 0;
732 }
733
734 void
735 ConstantFolding::expr(Instruction *i,
736 ImmediateValue &imm0,
737 ImmediateValue &imm1,
738 ImmediateValue &imm2)
739 {
740 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
741 struct Storage res;
742
743 memset(&res.data, 0, sizeof(res.data));
744
745 switch (i->op) {
746 case OP_INSBF: {
747 int offset = b->data.u32 & 0xff;
748 int width = (b->data.u32 >> 8) & 0xff;
749 unsigned bitmask = ((1 << width) - 1) << offset;
750 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
751 break;
752 }
753 case OP_MAD:
754 case OP_FMA: {
755 switch (i->dType) {
756 case TYPE_F32:
757 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
758 c->data.f32;
759 break;
760 case TYPE_F64:
761 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
762 break;
763 case TYPE_S32:
764 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
765 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
766 break;
767 }
768 /* fallthrough */
769 case TYPE_U32:
770 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
771 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
772 break;
773 }
774 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
775 break;
776 default:
777 return;
778 }
779 break;
780 }
781 case OP_SHLADD:
782 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
783 break;
784 default:
785 return;
786 }
787
788 ++foldCount;
789 i->src(0).mod = Modifier(0);
790 i->src(1).mod = Modifier(0);
791 i->src(2).mod = Modifier(0);
792
793 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
794 i->setSrc(1, NULL);
795 i->setSrc(2, NULL);
796
797 i->getSrc(0)->reg.data = res.data;
798 i->getSrc(0)->reg.type = i->dType;
799 i->getSrc(0)->reg.size = typeSizeof(i->dType);
800
801 i->op = OP_MOV;
802 }
803
804 void
805 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
806 {
807 Storage res;
808
809 if (i->dType != TYPE_F32)
810 return;
811 switch (i->op) {
812 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
813 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
814 case OP_SAT: res.data.f32 = CLAMP(imm.reg.data.f32, 0.0f, 1.0f); break;
815 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
816 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
817 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
818 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
819 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
820 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
821 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
822 case OP_PRESIN:
823 case OP_PREEX2:
824 // these should be handled in subsequent OP_SIN/COS/EX2
825 res.data.f32 = imm.reg.data.f32;
826 break;
827 default:
828 return;
829 }
830 i->op = OP_MOV;
831 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
832 i->src(0).mod = Modifier(0);
833 }
834
835 void
836 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
837 const int s, ImmediateValue& imm2)
838 {
839 const int t = s ? 0 : 1;
840 Instruction *insn;
841 Instruction *mul1 = NULL; // mul1 before mul2
842 int e = 0;
843 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
844 ImmediateValue imm1;
845
846 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
847
848 if (mul2->getSrc(t)->refCount() == 1) {
849 insn = mul2->getSrc(t)->getInsn();
850 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
851 mul1 = insn;
852 if (mul1 && !mul1->saturate) {
853 int s1;
854
855 if (mul1->src(s1 = 0).getImmediate(imm1) ||
856 mul1->src(s1 = 1).getImmediate(imm1)) {
857 bld.setPosition(mul1, false);
858 // a = mul r, imm1
859 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
860 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
861 mul1->src(s1).mod = Modifier(0);
862 mul2->def(0).replace(mul1->getDef(0), false);
863 mul1->saturate = mul2->saturate;
864 } else
865 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
866 // c = mul a, b
867 // d = mul c, imm -> d = mul_x_imm a, b
868 mul1->postFactor = e;
869 mul2->def(0).replace(mul1->getDef(0), false);
870 if (f < 0)
871 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
872 mul1->saturate = mul2->saturate;
873 }
874 return;
875 }
876 }
877 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
878 // b = mul a, imm
879 // d = mul b, c -> d = mul_x_imm a, c
880 int s2, t2;
881 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
882 if (!insn)
883 return;
884 mul1 = mul2;
885 mul2 = NULL;
886 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
887 t2 = s2 ? 0 : 1;
888 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
889 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
890 mul2 = insn;
891 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
892 mul2->postFactor = e;
893 mul2->setSrc(s2, mul1->src(t));
894 if (f < 0)
895 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
896 }
897 }
898 }
899
900 void
901 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
902 {
903 switch (i->op) {
904 case OP_MAD:
905 case OP_FMA:
906 if (imm2.isInteger(0)) {
907 i->op = OP_MUL;
908 i->setSrc(2, NULL);
909 foldCount++;
910 return;
911 }
912 break;
913 case OP_SHLADD:
914 if (imm2.isInteger(0)) {
915 i->op = OP_SHL;
916 i->setSrc(2, NULL);
917 foldCount++;
918 return;
919 }
920 break;
921 default:
922 return;
923 }
924 }
925
926 void
927 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
928 {
929 const Target *target = prog->getTarget();
930 const int t = !s;
931 const operation op = i->op;
932 Instruction *newi = i;
933
934 switch (i->op) {
935 case OP_SPLIT: {
936 bld.setPosition(i, false);
937
938 uint8_t size = i->getDef(0)->reg.size;
939 uint32_t mask = (1ULL << size) - 1;
940 assert(size <= 32);
941
942 uint64_t val = imm0.reg.data.u64;
943 for (int8_t d = 0; i->defExists(d); ++d) {
944 Value *def = i->getDef(d);
945 assert(def->reg.size == size);
946
947 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
948 val >>= size;
949 }
950 delete_Instruction(prog, i);
951 break;
952 }
953 case OP_MUL:
954 if (i->dType == TYPE_F32)
955 tryCollapseChainedMULs(i, s, imm0);
956
957 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
958 assert(!isFloatType(i->sType));
959 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
960 bld.setPosition(i, false);
961 // Need to set to the sign value, which is a compare.
962 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
963 TYPE_S32, i->getSrc(t), bld.mkImm(0));
964 delete_Instruction(prog, i);
965 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
966 // The high bits can't be set in this case (either mul by 0 or
967 // unsigned by 1)
968 i->op = OP_MOV;
969 i->subOp = 0;
970 i->setSrc(0, new_ImmediateValue(prog, 0u));
971 i->src(0).mod = Modifier(0);
972 i->setSrc(1, NULL);
973 } else if (!imm0.isNegative() && imm0.isPow2()) {
974 // Translate into a shift
975 imm0.applyLog2();
976 i->op = OP_SHR;
977 i->subOp = 0;
978 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
979 i->setSrc(0, i->getSrc(t));
980 i->src(0).mod = i->src(t).mod;
981 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
982 i->src(1).mod = 0;
983 }
984 } else
985 if (imm0.isInteger(0)) {
986 i->op = OP_MOV;
987 i->setSrc(0, new_ImmediateValue(prog, 0u));
988 i->src(0).mod = Modifier(0);
989 i->postFactor = 0;
990 i->setSrc(1, NULL);
991 } else
992 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
993 if (imm0.isNegative())
994 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
995 i->op = i->src(t).mod.getOp();
996 if (s == 0) {
997 i->setSrc(0, i->getSrc(1));
998 i->src(0).mod = i->src(1).mod;
999 i->src(1).mod = 0;
1000 }
1001 if (i->op != OP_CVT)
1002 i->src(0).mod = 0;
1003 i->setSrc(1, NULL);
1004 } else
1005 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1006 if (imm0.isNegative())
1007 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1008 i->op = OP_ADD;
1009 i->setSrc(s, i->getSrc(t));
1010 i->src(s).mod = i->src(t).mod;
1011 } else
1012 if (!isFloatType(i->sType) && !imm0.isNegative() && imm0.isPow2()) {
1013 i->op = OP_SHL;
1014 imm0.applyLog2();
1015 i->setSrc(0, i->getSrc(t));
1016 i->src(0).mod = i->src(t).mod;
1017 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1018 i->src(1).mod = 0;
1019 } else
1020 if (i->postFactor && i->sType == TYPE_F32) {
1021 /* Can't emit a postfactor with an immediate, have to fold it in */
1022 i->setSrc(s, new_ImmediateValue(
1023 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1024 i->postFactor = 0;
1025 }
1026 break;
1027 case OP_MAD:
1028 if (imm0.isInteger(0)) {
1029 i->setSrc(0, i->getSrc(2));
1030 i->src(0).mod = i->src(2).mod;
1031 i->setSrc(1, NULL);
1032 i->setSrc(2, NULL);
1033 i->op = i->src(0).mod.getOp();
1034 if (i->op != OP_CVT)
1035 i->src(0).mod = 0;
1036 } else
1037 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1038 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1039 if (imm0.isNegative())
1040 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1041 if (s == 0) {
1042 i->setSrc(0, i->getSrc(1));
1043 i->src(0).mod = i->src(1).mod;
1044 }
1045 i->setSrc(1, i->getSrc(2));
1046 i->src(1).mod = i->src(2).mod;
1047 i->setSrc(2, NULL);
1048 i->op = OP_ADD;
1049 } else
1050 if (s == 1 && !imm0.isNegative() && imm0.isPow2() &&
1051 target->isOpSupported(OP_SHLADD, i->dType)) {
1052 i->op = OP_SHLADD;
1053 imm0.applyLog2();
1054 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1055 }
1056 break;
1057 case OP_SUB:
1058 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1059 !isFloatType(i->dType))
1060 break;
1061 /* fallthrough */
1062 case OP_ADD:
1063 if (i->usesFlags())
1064 break;
1065 if (imm0.isInteger(0)) {
1066 if (s == 0) {
1067 i->setSrc(0, i->getSrc(1));
1068 i->src(0).mod = i->src(1).mod;
1069 if (i->op == OP_SUB)
1070 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1071 }
1072 i->setSrc(1, NULL);
1073 i->op = i->src(0).mod.getOp();
1074 if (i->op != OP_CVT)
1075 i->src(0).mod = Modifier(0);
1076 }
1077 break;
1078
1079 case OP_DIV:
1080 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1081 break;
1082 bld.setPosition(i, false);
1083 if (imm0.reg.data.u32 == 0) {
1084 break;
1085 } else
1086 if (imm0.reg.data.u32 == 1) {
1087 i->op = OP_MOV;
1088 i->setSrc(1, NULL);
1089 } else
1090 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1091 i->op = OP_SHR;
1092 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1093 } else
1094 if (i->dType == TYPE_U32) {
1095 Instruction *mul;
1096 Value *tA, *tB;
1097 const uint32_t d = imm0.reg.data.u32;
1098 uint32_t m;
1099 int r, s;
1100 uint32_t l = util_logbase2(d);
1101 if (((uint32_t)1 << l) < d)
1102 ++l;
1103 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1104 r = l ? 1 : 0;
1105 s = l ? (l - 1) : 0;
1106
1107 tA = bld.getSSA();
1108 tB = bld.getSSA();
1109 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1110 bld.loadImm(NULL, m));
1111 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1112 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1113 tA = bld.getSSA();
1114 if (r)
1115 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1116 else
1117 tA = tB;
1118 tB = s ? bld.getSSA() : i->getDef(0);
1119 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1120 if (s)
1121 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1122
1123 delete_Instruction(prog, i);
1124 } else
1125 if (imm0.reg.data.s32 == -1) {
1126 i->op = OP_NEG;
1127 i->setSrc(1, NULL);
1128 } else {
1129 LValue *tA, *tB;
1130 LValue *tD;
1131 const int32_t d = imm0.reg.data.s32;
1132 int32_t m;
1133 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1134 if ((1 << l) < abs(d))
1135 ++l;
1136 if (!l)
1137 l = 1;
1138 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1139
1140 tA = bld.getSSA();
1141 tB = bld.getSSA();
1142 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1143 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1144 if (l > 1)
1145 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1146 else
1147 tB = tA;
1148 tA = bld.getSSA();
1149 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1150 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1151 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1152 if (d < 0)
1153 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1154
1155 delete_Instruction(prog, i);
1156 }
1157 break;
1158
1159 case OP_MOD:
1160 if (i->sType == TYPE_U32 && imm0.isPow2()) {
1161 bld.setPosition(i, false);
1162 i->op = OP_AND;
1163 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1164 }
1165 break;
1166
1167 case OP_SET: // TODO: SET_AND,OR,XOR
1168 {
1169 /* This optimizes the case where the output of a set is being compared
1170 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1171 * can be a lot cleverer in our comparison.
1172 */
1173 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1174 CondCode cc, ccZ;
1175 if (imm0.reg.data.u32 != 0 || !si)
1176 return;
1177 cc = si->setCond;
1178 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1179 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1180 // first.
1181 if (s == 0)
1182 ccZ = reverseCondCode(ccZ);
1183 // If there is a negative modifier, we need to undo that, by flipping
1184 // the comparison to zero.
1185 if (i->src(t).mod.neg())
1186 ccZ = reverseCondCode(ccZ);
1187 // If this is a signed comparison, we expect the input to be a regular
1188 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1189 // is positive, so just flip the sign.
1190 if (i->sType == TYPE_S32) {
1191 assert(!isFloatType(si->dType));
1192 ccZ = reverseCondCode(ccZ);
1193 }
1194 switch (ccZ) {
1195 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1196 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1197 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1198 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1199 case CC_GT: break; // bool > 0 -- bool
1200 case CC_NE: break; // bool != 0 -- bool
1201 default:
1202 return;
1203 }
1204
1205 // Update the condition of this SET to be identical to the origin set,
1206 // but with the updated condition code. The original SET should get
1207 // DCE'd, ideally.
1208 i->op = si->op;
1209 i->asCmp()->setCond = cc;
1210 i->setSrc(0, si->src(0));
1211 i->setSrc(1, si->src(1));
1212 if (si->srcExists(2))
1213 i->setSrc(2, si->src(2));
1214 i->sType = si->sType;
1215 }
1216 break;
1217
1218 case OP_AND:
1219 {
1220 Instruction *src = i->getSrc(t)->getInsn();
1221 ImmediateValue imm1;
1222 if (imm0.reg.data.u32 == 0) {
1223 i->op = OP_MOV;
1224 i->setSrc(0, new_ImmediateValue(prog, 0u));
1225 i->src(0).mod = Modifier(0);
1226 i->setSrc(1, NULL);
1227 } else if (imm0.reg.data.u32 == ~0U) {
1228 i->op = i->src(t).mod.getOp();
1229 if (t) {
1230 i->setSrc(0, i->getSrc(t));
1231 i->src(0).mod = i->src(t).mod;
1232 }
1233 i->setSrc(1, NULL);
1234 } else if (src->asCmp()) {
1235 CmpInstruction *cmp = src->asCmp();
1236 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1237 return;
1238 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1239 return;
1240 if (imm0.reg.data.f32 != 1.0)
1241 return;
1242 if (cmp->dType != TYPE_U32)
1243 return;
1244
1245 cmp->dType = TYPE_F32;
1246 if (i->src(t).mod != Modifier(0)) {
1247 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1248 i->src(t).mod = Modifier(0);
1249 cmp->setCond = inverseCondCode(cmp->setCond);
1250 }
1251 i->op = OP_MOV;
1252 i->setSrc(s, NULL);
1253 if (t) {
1254 i->setSrc(0, i->getSrc(t));
1255 i->setSrc(t, NULL);
1256 }
1257 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1258 src->op == OP_SHR &&
1259 src->src(1).getImmediate(imm1) &&
1260 i->src(t).mod == Modifier(0) &&
1261 util_is_power_of_two(imm0.reg.data.u32 + 1)) {
1262 // low byte = offset, high byte = width
1263 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1264 i->op = OP_EXTBF;
1265 i->setSrc(0, src->getSrc(0));
1266 i->setSrc(1, new_ImmediateValue(prog, ext));
1267 } else if (src->op == OP_SHL &&
1268 src->src(1).getImmediate(imm1) &&
1269 i->src(t).mod == Modifier(0) &&
1270 util_is_power_of_two(~imm0.reg.data.u32 + 1) &&
1271 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1272 i->op = OP_MOV;
1273 i->setSrc(s, NULL);
1274 if (t) {
1275 i->setSrc(0, i->getSrc(t));
1276 i->setSrc(t, NULL);
1277 }
1278 }
1279 }
1280 break;
1281
1282 case OP_SHL:
1283 {
1284 if (s != 1 || i->src(0).mod != Modifier(0))
1285 break;
1286 // try to concatenate shifts
1287 Instruction *si = i->getSrc(0)->getInsn();
1288 if (!si)
1289 break;
1290 ImmediateValue imm1;
1291 switch (si->op) {
1292 case OP_SHL:
1293 if (si->src(1).getImmediate(imm1)) {
1294 bld.setPosition(i, false);
1295 i->setSrc(0, si->getSrc(0));
1296 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1297 }
1298 break;
1299 case OP_SHR:
1300 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1301 bld.setPosition(i, false);
1302 i->op = OP_AND;
1303 i->setSrc(0, si->getSrc(0));
1304 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1305 }
1306 break;
1307 case OP_MUL:
1308 int muls;
1309 if (isFloatType(si->dType))
1310 return;
1311 if (si->src(1).getImmediate(imm1))
1312 muls = 1;
1313 else if (si->src(0).getImmediate(imm1))
1314 muls = 0;
1315 else
1316 return;
1317
1318 bld.setPosition(i, false);
1319 i->op = OP_MUL;
1320 i->setSrc(0, si->getSrc(!muls));
1321 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1322 break;
1323 case OP_SUB:
1324 case OP_ADD:
1325 int adds;
1326 if (isFloatType(si->dType))
1327 return;
1328 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1329 adds = 0;
1330 else if (si->src(1).getImmediate(imm1))
1331 adds = 1;
1332 else
1333 return;
1334 if (si->src(!adds).mod != Modifier(0))
1335 return;
1336 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1337
1338 // This is more operations, but if one of x, y is an immediate, then
1339 // we can get a situation where (a) we can use ISCADD, or (b)
1340 // propagate the add bit into an indirect load.
1341 bld.setPosition(i, false);
1342 i->op = si->op;
1343 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1344 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1345 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1346 si->getSrc(!adds),
1347 bld.mkImm(imm0.reg.data.u32)));
1348 break;
1349 default:
1350 return;
1351 }
1352 }
1353 break;
1354
1355 case OP_ABS:
1356 case OP_NEG:
1357 case OP_SAT:
1358 case OP_LG2:
1359 case OP_RCP:
1360 case OP_SQRT:
1361 case OP_RSQ:
1362 case OP_PRESIN:
1363 case OP_SIN:
1364 case OP_COS:
1365 case OP_PREEX2:
1366 case OP_EX2:
1367 unary(i, imm0);
1368 break;
1369 case OP_BFIND: {
1370 int32_t res;
1371 switch (i->dType) {
1372 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1373 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1374 default:
1375 return;
1376 }
1377 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1378 res = 31 - res;
1379 bld.setPosition(i, false); /* make sure bld is init'ed */
1380 i->setSrc(0, bld.mkImm(res));
1381 i->setSrc(1, NULL);
1382 i->op = OP_MOV;
1383 i->subOp = 0;
1384 break;
1385 }
1386 case OP_POPCNT: {
1387 // Only deal with 1-arg POPCNT here
1388 if (i->srcExists(1))
1389 break;
1390 uint32_t res = util_bitcount(imm0.reg.data.u32);
1391 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1392 i->setSrc(1, NULL);
1393 i->op = OP_MOV;
1394 break;
1395 }
1396 case OP_CVT: {
1397 Storage res;
1398
1399 // TODO: handle 64-bit values properly
1400 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1401 return;
1402
1403 // TODO: handle single byte/word extractions
1404 if (i->subOp)
1405 return;
1406
1407 bld.setPosition(i, true); /* make sure bld is init'ed */
1408
1409 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1410 case type: \
1411 switch (i->sType) { \
1412 case TYPE_F64: \
1413 res.data.dst = util_iround(i->saturate ? \
1414 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1415 imm0.reg.data.f64); \
1416 break; \
1417 case TYPE_F32: \
1418 res.data.dst = util_iround(i->saturate ? \
1419 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1420 imm0.reg.data.f32); \
1421 break; \
1422 case TYPE_S32: \
1423 res.data.dst = i->saturate ? \
1424 CLAMP(imm0.reg.data.s32, imin, imax) : \
1425 imm0.reg.data.s32; \
1426 break; \
1427 case TYPE_U32: \
1428 res.data.dst = i->saturate ? \
1429 CLAMP(imm0.reg.data.u32, umin, umax) : \
1430 imm0.reg.data.u32; \
1431 break; \
1432 case TYPE_S16: \
1433 res.data.dst = i->saturate ? \
1434 CLAMP(imm0.reg.data.s16, imin, imax) : \
1435 imm0.reg.data.s16; \
1436 break; \
1437 case TYPE_U16: \
1438 res.data.dst = i->saturate ? \
1439 CLAMP(imm0.reg.data.u16, umin, umax) : \
1440 imm0.reg.data.u16; \
1441 break; \
1442 default: return; \
1443 } \
1444 i->setSrc(0, bld.mkImm(res.data.dst)); \
1445 break
1446
1447 switch(i->dType) {
1448 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1449 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1450 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1451 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1452 case TYPE_F32:
1453 switch (i->sType) {
1454 case TYPE_F64:
1455 res.data.f32 = i->saturate ?
1456 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1457 imm0.reg.data.f64;
1458 break;
1459 case TYPE_F32:
1460 res.data.f32 = i->saturate ?
1461 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1462 imm0.reg.data.f32;
1463 break;
1464 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1465 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1466 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1467 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1468 default:
1469 return;
1470 }
1471 i->setSrc(0, bld.mkImm(res.data.f32));
1472 break;
1473 case TYPE_F64:
1474 switch (i->sType) {
1475 case TYPE_F64:
1476 res.data.f64 = i->saturate ?
1477 CLAMP(imm0.reg.data.f64, 0.0f, 1.0f) :
1478 imm0.reg.data.f64;
1479 break;
1480 case TYPE_F32:
1481 res.data.f64 = i->saturate ?
1482 CLAMP(imm0.reg.data.f32, 0.0f, 1.0f) :
1483 imm0.reg.data.f32;
1484 break;
1485 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1486 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1487 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1488 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1489 default:
1490 return;
1491 }
1492 i->setSrc(0, bld.mkImm(res.data.f64));
1493 break;
1494 default:
1495 return;
1496 }
1497 #undef CASE
1498
1499 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1500 i->op = OP_MOV;
1501 i->saturate = 0;
1502 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1503 break;
1504 }
1505 default:
1506 return;
1507 }
1508 if (newi->op != op)
1509 foldCount++;
1510 }
1511
1512 // =============================================================================
1513
1514 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1515 class ModifierFolding : public Pass
1516 {
1517 private:
1518 virtual bool visit(BasicBlock *);
1519 };
1520
1521 bool
1522 ModifierFolding::visit(BasicBlock *bb)
1523 {
1524 const Target *target = prog->getTarget();
1525
1526 Instruction *i, *next, *mi;
1527 Modifier mod;
1528
1529 for (i = bb->getEntry(); i; i = next) {
1530 next = i->next;
1531
1532 if (0 && i->op == OP_SUB) {
1533 // turn "sub" into "add neg" (do we really want this ?)
1534 i->op = OP_ADD;
1535 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1536 }
1537
1538 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1539 mi = i->getSrc(s)->getInsn();
1540 if (!mi ||
1541 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1542 continue;
1543 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1544 if ((i->op != OP_ADD &&
1545 i->op != OP_MUL) ||
1546 (mi->op != OP_ABS &&
1547 mi->op != OP_NEG))
1548 continue;
1549 } else
1550 if (i->sType != mi->dType) {
1551 continue;
1552 }
1553 if ((mod = Modifier(mi->op)) == Modifier(0))
1554 continue;
1555 mod *= mi->src(0).mod;
1556
1557 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1558 // abs neg [abs] = abs
1559 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1560 } else
1561 if ((i->op == OP_NEG) && mod.neg()) {
1562 assert(s == 0);
1563 // neg as both opcode and modifier on same insn is prohibited
1564 // neg neg abs = abs, neg neg = identity
1565 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1566 i->op = mod.getOp();
1567 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1568 if (mod == Modifier(0))
1569 i->op = OP_MOV;
1570 }
1571
1572 if (target->isModSupported(i, s, mod)) {
1573 i->setSrc(s, mi->getSrc(0));
1574 i->src(s).mod *= mod;
1575 }
1576 }
1577
1578 if (i->op == OP_SAT) {
1579 mi = i->getSrc(0)->getInsn();
1580 if (mi &&
1581 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1582 mi->saturate = 1;
1583 mi->setDef(0, i->getDef(0));
1584 delete_Instruction(prog, i);
1585 }
1586 }
1587 }
1588
1589 return true;
1590 }
1591
1592 // =============================================================================
1593
1594 // MUL + ADD -> MAD/FMA
1595 // MIN/MAX(a, a) -> a, etc.
1596 // SLCT(a, b, const) -> cc(const) ? a : b
1597 // RCP(RCP(a)) -> a
1598 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1599 class AlgebraicOpt : public Pass
1600 {
1601 private:
1602 virtual bool visit(BasicBlock *);
1603
1604 void handleABS(Instruction *);
1605 bool handleADD(Instruction *);
1606 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1607 void handleMINMAX(Instruction *);
1608 void handleRCP(Instruction *);
1609 void handleSLCT(Instruction *);
1610 void handleLOGOP(Instruction *);
1611 void handleCVT_NEG(Instruction *);
1612 void handleCVT_CVT(Instruction *);
1613 void handleCVT_EXTBF(Instruction *);
1614 void handleSUCLAMP(Instruction *);
1615 void handleNEG(Instruction *);
1616
1617 BuildUtil bld;
1618 };
1619
1620 void
1621 AlgebraicOpt::handleABS(Instruction *abs)
1622 {
1623 Instruction *sub = abs->getSrc(0)->getInsn();
1624 DataType ty;
1625 if (!sub ||
1626 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1627 return;
1628 // expect not to have mods yet, if we do, bail
1629 if (sub->src(0).mod || sub->src(1).mod)
1630 return;
1631 // hidden conversion ?
1632 ty = intTypeToSigned(sub->dType);
1633 if (abs->dType != abs->sType || ty != abs->sType)
1634 return;
1635
1636 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1637 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1638 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1639 return;
1640
1641 Value *src0 = sub->getSrc(0);
1642 Value *src1 = sub->getSrc(1);
1643
1644 if (sub->op == OP_ADD) {
1645 Instruction *neg = sub->getSrc(1)->getInsn();
1646 if (neg && neg->op != OP_NEG) {
1647 neg = sub->getSrc(0)->getInsn();
1648 src0 = sub->getSrc(1);
1649 }
1650 if (!neg || neg->op != OP_NEG ||
1651 neg->dType != neg->sType || neg->sType != ty)
1652 return;
1653 src1 = neg->getSrc(0);
1654 }
1655
1656 // found ABS(SUB))
1657 abs->moveSources(1, 2); // move sources >=1 up by 2
1658 abs->op = OP_SAD;
1659 abs->setType(sub->dType);
1660 abs->setSrc(0, src0);
1661 abs->setSrc(1, src1);
1662 bld.setPosition(abs, false);
1663 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1664 }
1665
1666 bool
1667 AlgebraicOpt::handleADD(Instruction *add)
1668 {
1669 Value *src0 = add->getSrc(0);
1670 Value *src1 = add->getSrc(1);
1671
1672 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1673 return false;
1674
1675 bool changed = false;
1676 if (!changed && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1677 changed = tryADDToMADOrSAD(add, OP_MAD);
1678 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1679 changed = tryADDToMADOrSAD(add, OP_SAD);
1680 return changed;
1681 }
1682
1683 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1684 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1685 bool
1686 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1687 {
1688 Value *src0 = add->getSrc(0);
1689 Value *src1 = add->getSrc(1);
1690 Value *src;
1691 int s;
1692 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1693 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1694 Modifier mod[4];
1695
1696 if (src0->refCount() == 1 &&
1697 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1698 s = 0;
1699 else
1700 if (src1->refCount() == 1 &&
1701 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1702 s = 1;
1703 else
1704 return false;
1705
1706 src = add->getSrc(s);
1707
1708 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1709 return false;
1710
1711 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1712 src->getInsn()->dnz)
1713 return false;
1714
1715 if (toOp == OP_SAD) {
1716 ImmediateValue imm;
1717 if (!src->getInsn()->src(2).getImmediate(imm))
1718 return false;
1719 if (!imm.isInteger(0))
1720 return false;
1721 }
1722
1723 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1724 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1725 return false;
1726
1727 mod[0] = add->src(0).mod;
1728 mod[1] = add->src(1).mod;
1729 mod[2] = src->getUniqueInsn()->src(0).mod;
1730 mod[3] = src->getUniqueInsn()->src(1).mod;
1731
1732 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1733 return false;
1734
1735 add->op = toOp;
1736 add->subOp = src->getInsn()->subOp; // potentially mul-high
1737 add->dnz = src->getInsn()->dnz;
1738 add->dType = src->getInsn()->dType; // sign matters for imad hi
1739 add->sType = src->getInsn()->sType;
1740
1741 add->setSrc(2, add->src(s ? 0 : 1));
1742
1743 add->setSrc(0, src->getInsn()->getSrc(0));
1744 add->src(0).mod = mod[2] ^ mod[s];
1745 add->setSrc(1, src->getInsn()->getSrc(1));
1746 add->src(1).mod = mod[3];
1747
1748 return true;
1749 }
1750
1751 void
1752 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1753 {
1754 Value *src0 = minmax->getSrc(0);
1755 Value *src1 = minmax->getSrc(1);
1756
1757 if (src0 != src1 || src0->reg.file != FILE_GPR)
1758 return;
1759 if (minmax->src(0).mod == minmax->src(1).mod) {
1760 if (minmax->def(0).mayReplace(minmax->src(0))) {
1761 minmax->def(0).replace(minmax->src(0), false);
1762 minmax->bb->remove(minmax);
1763 } else {
1764 minmax->op = OP_CVT;
1765 minmax->setSrc(1, NULL);
1766 }
1767 } else {
1768 // TODO:
1769 // min(x, -x) = -abs(x)
1770 // min(x, -abs(x)) = -abs(x)
1771 // min(x, abs(x)) = x
1772 // max(x, -abs(x)) = x
1773 // max(x, abs(x)) = abs(x)
1774 // max(x, -x) = abs(x)
1775 }
1776 }
1777
1778 void
1779 AlgebraicOpt::handleRCP(Instruction *rcp)
1780 {
1781 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1782
1783 if (si && si->op == OP_RCP) {
1784 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1785 rcp->op = mod.getOp();
1786 rcp->setSrc(0, si->getSrc(0));
1787 }
1788 }
1789
1790 void
1791 AlgebraicOpt::handleSLCT(Instruction *slct)
1792 {
1793 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1794 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1795 slct->setSrc(0, slct->getSrc(1));
1796 } else
1797 if (slct->getSrc(0) != slct->getSrc(1)) {
1798 return;
1799 }
1800 slct->op = OP_MOV;
1801 slct->setSrc(1, NULL);
1802 slct->setSrc(2, NULL);
1803 }
1804
1805 void
1806 AlgebraicOpt::handleLOGOP(Instruction *logop)
1807 {
1808 Value *src0 = logop->getSrc(0);
1809 Value *src1 = logop->getSrc(1);
1810
1811 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1812 return;
1813
1814 if (src0 == src1) {
1815 if ((logop->op == OP_AND || logop->op == OP_OR) &&
1816 logop->def(0).mayReplace(logop->src(0))) {
1817 logop->def(0).replace(logop->src(0), false);
1818 delete_Instruction(prog, logop);
1819 }
1820 } else {
1821 // try AND(SET, SET) -> SET_AND(SET)
1822 Instruction *set0 = src0->getInsn();
1823 Instruction *set1 = src1->getInsn();
1824
1825 if (!set0 || set0->fixed || !set1 || set1->fixed)
1826 return;
1827 if (set1->op != OP_SET) {
1828 Instruction *xchg = set0;
1829 set0 = set1;
1830 set1 = xchg;
1831 if (set1->op != OP_SET)
1832 return;
1833 }
1834 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
1835 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
1836 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
1837 return;
1838 if (set0->op != OP_SET &&
1839 set0->op != OP_SET_AND &&
1840 set0->op != OP_SET_OR &&
1841 set0->op != OP_SET_XOR)
1842 return;
1843 if (set0->getDef(0)->refCount() > 1 &&
1844 set1->getDef(0)->refCount() > 1)
1845 return;
1846 if (set0->getPredicate() || set1->getPredicate())
1847 return;
1848 // check that they don't source each other
1849 for (int s = 0; s < 2; ++s)
1850 if (set0->getSrc(s) == set1->getDef(0) ||
1851 set1->getSrc(s) == set0->getDef(0))
1852 return;
1853
1854 set0 = cloneForward(func, set0);
1855 set1 = cloneShallow(func, set1);
1856 logop->bb->insertAfter(logop, set1);
1857 logop->bb->insertAfter(logop, set0);
1858
1859 set0->dType = TYPE_U8;
1860 set0->getDef(0)->reg.file = FILE_PREDICATE;
1861 set0->getDef(0)->reg.size = 1;
1862 set1->setSrc(2, set0->getDef(0));
1863 set1->op = redOp;
1864 set1->setDef(0, logop->getDef(0));
1865 delete_Instruction(prog, logop);
1866 }
1867 }
1868
1869 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
1870 // nv50:
1871 // F2I(NEG(I2F(ABS(SET))))
1872 void
1873 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
1874 {
1875 Instruction *insn = cvt->getSrc(0)->getInsn();
1876 if (cvt->sType != TYPE_F32 ||
1877 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
1878 return;
1879 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
1880 return;
1881 if (insn->src(0).mod != Modifier(0))
1882 return;
1883 insn = insn->getSrc(0)->getInsn();
1884
1885 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
1886 if (insn && insn->op == OP_CVT &&
1887 insn->dType == TYPE_F32 &&
1888 insn->sType == TYPE_S32) {
1889 insn = insn->getSrc(0)->getInsn();
1890 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
1891 insn->src(0).mod)
1892 return;
1893 insn = insn->getSrc(0)->getInsn();
1894 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
1895 return;
1896 } else
1897 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
1898 return;
1899 }
1900
1901 Instruction *bset = cloneShallow(func, insn);
1902 bset->dType = TYPE_U32;
1903 bset->setDef(0, cvt->getDef(0));
1904 cvt->bb->insertAfter(cvt, bset);
1905 delete_Instruction(prog, cvt);
1906 }
1907
1908 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
1909 // does a type conversion, this becomes trickier as there might be range
1910 // changes/etc. We could handle those in theory as long as the range was being
1911 // reduced or kept the same.
1912 void
1913 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
1914 {
1915 Instruction *insn = cvt->getSrc(0)->getInsn();
1916 RoundMode rnd = insn->rnd;
1917
1918 if (insn->saturate ||
1919 insn->subOp ||
1920 insn->dType != insn->sType ||
1921 insn->dType != cvt->sType)
1922 return;
1923
1924 switch (insn->op) {
1925 case OP_CEIL:
1926 rnd = ROUND_PI;
1927 break;
1928 case OP_FLOOR:
1929 rnd = ROUND_MI;
1930 break;
1931 case OP_TRUNC:
1932 rnd = ROUND_ZI;
1933 break;
1934 case OP_CVT:
1935 break;
1936 default:
1937 return;
1938 }
1939
1940 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
1941 rnd = (RoundMode)(rnd & 3);
1942
1943 cvt->rnd = rnd;
1944 cvt->setSrc(0, insn->getSrc(0));
1945 cvt->src(0).mod *= insn->src(0).mod;
1946 cvt->sType = insn->sType;
1947 }
1948
1949 // Some shaders extract packed bytes out of words and convert them to
1950 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
1951 // nv50 for word sizes.
1952 //
1953 // CVT(EXTBF(x, byte/word))
1954 // CVT(AND(bytemask, x))
1955 // CVT(AND(bytemask, SHR(x, 8/16/24)))
1956 // CVT(SHR(x, 16/24))
1957 void
1958 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
1959 {
1960 Instruction *insn = cvt->getSrc(0)->getInsn();
1961 ImmediateValue imm;
1962 Value *arg = NULL;
1963 unsigned width, offset;
1964 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
1965 return;
1966 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
1967 width = (imm.reg.data.u32 >> 8) & 0xff;
1968 offset = imm.reg.data.u32 & 0xff;
1969 arg = insn->getSrc(0);
1970
1971 if (width != 8 && width != 16)
1972 return;
1973 if (width == 8 && offset & 0x7)
1974 return;
1975 if (width == 16 && offset & 0xf)
1976 return;
1977 } else if (insn->op == OP_AND) {
1978 int s;
1979 if (insn->src(0).getImmediate(imm))
1980 s = 0;
1981 else if (insn->src(1).getImmediate(imm))
1982 s = 1;
1983 else
1984 return;
1985
1986 if (imm.reg.data.u32 == 0xff)
1987 width = 8;
1988 else if (imm.reg.data.u32 == 0xffff)
1989 width = 16;
1990 else
1991 return;
1992
1993 arg = insn->getSrc(!s);
1994 Instruction *shift = arg->getInsn();
1995 offset = 0;
1996 if (shift && shift->op == OP_SHR &&
1997 shift->sType == cvt->sType &&
1998 shift->src(1).getImmediate(imm) &&
1999 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2000 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2001 arg = shift->getSrc(0);
2002 offset = imm.reg.data.u32;
2003 }
2004 // We just AND'd the high bits away, which means this is effectively an
2005 // unsigned value.
2006 cvt->sType = TYPE_U32;
2007 } else if (insn->op == OP_SHR &&
2008 insn->sType == cvt->sType &&
2009 insn->src(1).getImmediate(imm)) {
2010 arg = insn->getSrc(0);
2011 if (imm.reg.data.u32 == 24) {
2012 width = 8;
2013 offset = 24;
2014 } else if (imm.reg.data.u32 == 16) {
2015 width = 16;
2016 offset = 16;
2017 } else {
2018 return;
2019 }
2020 }
2021
2022 if (!arg)
2023 return;
2024
2025 // Irrespective of what came earlier, we can undo a shift on the argument
2026 // by adjusting the offset.
2027 Instruction *shift = arg->getInsn();
2028 if (shift && shift->op == OP_SHL &&
2029 shift->src(1).getImmediate(imm) &&
2030 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2031 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2032 imm.reg.data.u32 <= offset) {
2033 arg = shift->getSrc(0);
2034 offset -= imm.reg.data.u32;
2035 }
2036
2037 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2038 // annoying to detect them.
2039
2040 if (width == 8) {
2041 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2042 } else {
2043 assert(width == 16);
2044 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2045 }
2046 cvt->setSrc(0, arg);
2047 cvt->subOp = offset >> 3;
2048 }
2049
2050 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2051 void
2052 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2053 {
2054 ImmediateValue imm;
2055 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2056 int s;
2057 Instruction *add;
2058
2059 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2060
2061 // look for ADD (TODO: only count references by non-SUCLAMP)
2062 if (insn->getSrc(0)->refCount() > 1)
2063 return;
2064 add = insn->getSrc(0)->getInsn();
2065 if (!add || add->op != OP_ADD ||
2066 (add->dType != TYPE_U32 &&
2067 add->dType != TYPE_S32))
2068 return;
2069
2070 // look for immediate
2071 for (s = 0; s < 2; ++s)
2072 if (add->src(s).getImmediate(imm))
2073 break;
2074 if (s >= 2)
2075 return;
2076 s = s ? 0 : 1;
2077 // determine if immediate fits
2078 val += imm.reg.data.s32;
2079 if (val > 31 || val < -32)
2080 return;
2081 // determine if other addend fits
2082 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2083 return;
2084
2085 bld.setPosition(insn, false); // make sure bld is init'ed
2086 // replace sources
2087 insn->setSrc(2, bld.mkImm(val));
2088 insn->setSrc(0, add->getSrc(s));
2089 }
2090
2091 // NEG(AND(SET, 1)) -> SET
2092 void
2093 AlgebraicOpt::handleNEG(Instruction *i) {
2094 Instruction *src = i->getSrc(0)->getInsn();
2095 ImmediateValue imm;
2096 int b;
2097
2098 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2099 return;
2100
2101 if (src->src(0).getImmediate(imm))
2102 b = 1;
2103 else if (src->src(1).getImmediate(imm))
2104 b = 0;
2105 else
2106 return;
2107
2108 if (!imm.isInteger(1))
2109 return;
2110
2111 Instruction *set = src->getSrc(b)->getInsn();
2112 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2113 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2114 !isFloatType(set->dType)) {
2115 i->def(0).replace(set->getDef(0), false);
2116 }
2117 }
2118
2119 bool
2120 AlgebraicOpt::visit(BasicBlock *bb)
2121 {
2122 Instruction *next;
2123 for (Instruction *i = bb->getEntry(); i; i = next) {
2124 next = i->next;
2125 switch (i->op) {
2126 case OP_ABS:
2127 handleABS(i);
2128 break;
2129 case OP_ADD:
2130 handleADD(i);
2131 break;
2132 case OP_RCP:
2133 handleRCP(i);
2134 break;
2135 case OP_MIN:
2136 case OP_MAX:
2137 handleMINMAX(i);
2138 break;
2139 case OP_SLCT:
2140 handleSLCT(i);
2141 break;
2142 case OP_AND:
2143 case OP_OR:
2144 case OP_XOR:
2145 handleLOGOP(i);
2146 break;
2147 case OP_CVT:
2148 handleCVT_NEG(i);
2149 handleCVT_CVT(i);
2150 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2151 handleCVT_EXTBF(i);
2152 break;
2153 case OP_SUCLAMP:
2154 handleSUCLAMP(i);
2155 break;
2156 case OP_NEG:
2157 handleNEG(i);
2158 break;
2159 default:
2160 break;
2161 }
2162 }
2163
2164 return true;
2165 }
2166
2167 // =============================================================================
2168
2169 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2170 class LateAlgebraicOpt : public Pass
2171 {
2172 private:
2173 virtual bool visit(Instruction *);
2174
2175 void handleADD(Instruction *);
2176 bool tryADDToSHLADD(Instruction *);
2177 };
2178
2179 void
2180 LateAlgebraicOpt::handleADD(Instruction *add)
2181 {
2182 Value *src0 = add->getSrc(0);
2183 Value *src1 = add->getSrc(1);
2184
2185 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2186 return;
2187
2188 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2189 tryADDToSHLADD(add);
2190 }
2191
2192 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2193 bool
2194 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2195 {
2196 Value *src0 = add->getSrc(0);
2197 Value *src1 = add->getSrc(1);
2198 ImmediateValue imm;
2199 Instruction *shl;
2200 Value *src;
2201 int s;
2202
2203 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2204 || isFloatType(add->dType))
2205 return false;
2206
2207 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2208 s = 0;
2209 else
2210 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2211 s = 1;
2212 else
2213 return false;
2214
2215 src = add->getSrc(s);
2216 shl = src->getUniqueInsn();
2217
2218 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2219 return false;
2220
2221 if (!shl->src(1).getImmediate(imm))
2222 return false;
2223
2224 add->op = OP_SHLADD;
2225 add->setSrc(2, add->src(!s));
2226 // SHL can't have any modifiers, but the ADD source may have had
2227 // one. Preserve it.
2228 add->setSrc(0, shl->getSrc(0));
2229 if (s == 1)
2230 add->src(0).mod = add->src(1).mod;
2231 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2232 add->src(1).mod = Modifier(0);
2233
2234 return true;
2235 }
2236
2237 bool
2238 LateAlgebraicOpt::visit(Instruction *i)
2239 {
2240 switch (i->op) {
2241 case OP_ADD:
2242 handleADD(i);
2243 break;
2244 default:
2245 break;
2246 }
2247
2248 return true;
2249 }
2250
2251 // =============================================================================
2252
2253 // Split 64-bit MUL and MAD
2254 class Split64BitOpPreRA : public Pass
2255 {
2256 private:
2257 virtual bool visit(BasicBlock *);
2258 void split64MulMad(Function *, Instruction *, DataType);
2259
2260 BuildUtil bld;
2261 };
2262
2263 bool
2264 Split64BitOpPreRA::visit(BasicBlock *bb)
2265 {
2266 Instruction *i, *next;
2267 Modifier mod;
2268
2269 for (i = bb->getEntry(); i; i = next) {
2270 next = i->next;
2271
2272 DataType hTy;
2273 switch (i->dType) {
2274 case TYPE_U64: hTy = TYPE_U32; break;
2275 case TYPE_S64: hTy = TYPE_S32; break;
2276 default:
2277 continue;
2278 }
2279
2280 if (i->op == OP_MAD || i->op == OP_MUL)
2281 split64MulMad(func, i, hTy);
2282 }
2283
2284 return true;
2285 }
2286
2287 void
2288 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2289 {
2290 assert(i->op == OP_MAD || i->op == OP_MUL);
2291 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2292 assert(typeSizeof(hTy) == 4);
2293
2294 bld.setPosition(i, true);
2295
2296 Value *zero = bld.mkImm(0u);
2297 Value *carry = bld.getSSA(1, FILE_FLAGS);
2298
2299 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2300 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2301 // gives the following operations:
2302 // * `d.low = low(a.low * b.low) (+ c.low)?`
2303 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2304 // + high(a.low * b.low) (+ c.high)?`
2305 //
2306 // To compute the high bits, we can split in the following operations:
2307 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2308 // * `tmp2 = low(a.low * b.high) + tmp1`
2309 // * `d.high = high(a.low * b.low) + tmp2`
2310 //
2311 // mkSplit put lower bits at index 0 and higher bits at index 1
2312
2313 Value *op1[2];
2314 if (i->getSrc(0)->reg.size == 8)
2315 bld.mkSplit(op1, 4, i->getSrc(0));
2316 else {
2317 op1[0] = i->getSrc(0);
2318 op1[1] = zero;
2319 }
2320 Value *op2[2];
2321 if (i->getSrc(1)->reg.size == 8)
2322 bld.mkSplit(op2, 4, i->getSrc(1));
2323 else {
2324 op2[0] = i->getSrc(1);
2325 op2[1] = zero;
2326 }
2327
2328 Value *op3[2] = { NULL, NULL };
2329 if (i->op == OP_MAD) {
2330 if (i->getSrc(2)->reg.size == 8)
2331 bld.mkSplit(op3, 4, i->getSrc(2));
2332 else {
2333 op3[0] = i->getSrc(2);
2334 op3[1] = zero;
2335 }
2336 }
2337
2338 Value *tmpRes1Hi = bld.getSSA();
2339 if (i->op == OP_MAD)
2340 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2341 else
2342 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2343
2344 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2345
2346 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2347
2348 // If it was a MAD, add the carry from the low bits
2349 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2350 // d.high
2351 if (i->op == OP_MAD)
2352 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2353 else
2354 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2355
2356 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2357 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2358 if (i->op == OP_MAD)
2359 hiPart3->setFlagsSrc(3, carry);
2360
2361 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2362
2363 delete_Instruction(fn->getProgram(), i);
2364 }
2365
2366 // =============================================================================
2367
2368 static inline void
2369 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2370 {
2371 if (offset != ldst->getSrc(0)->reg.data.offset) {
2372 if (ldst->getSrc(0)->refCount() > 1)
2373 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2374 ldst->getSrc(0)->reg.data.offset = offset;
2375 }
2376 }
2377
2378 // Combine loads and stores, forward stores to loads where possible.
2379 class MemoryOpt : public Pass
2380 {
2381 private:
2382 class Record
2383 {
2384 public:
2385 Record *next;
2386 Instruction *insn;
2387 const Value *rel[2];
2388 const Value *base;
2389 int32_t offset;
2390 int8_t fileIndex;
2391 uint8_t size;
2392 bool locked;
2393 Record *prev;
2394
2395 bool overlaps(const Instruction *ldst) const;
2396
2397 inline void link(Record **);
2398 inline void unlink(Record **);
2399 inline void set(const Instruction *ldst);
2400 };
2401
2402 public:
2403 MemoryOpt();
2404
2405 Record *loads[DATA_FILE_COUNT];
2406 Record *stores[DATA_FILE_COUNT];
2407
2408 MemoryPool recordPool;
2409
2410 private:
2411 virtual bool visit(BasicBlock *);
2412 bool runOpt(BasicBlock *);
2413
2414 Record **getList(const Instruction *);
2415
2416 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2417
2418 // merge @insn into load/store instruction from @rec
2419 bool combineLd(Record *rec, Instruction *ld);
2420 bool combineSt(Record *rec, Instruction *st);
2421
2422 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2423 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2424 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2425
2426 void addRecord(Instruction *ldst);
2427 void purgeRecords(Instruction *const st, DataFile);
2428 void lockStores(Instruction *const ld);
2429 void reset();
2430
2431 private:
2432 Record *prevRecord;
2433 };
2434
2435 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2436 {
2437 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2438 loads[i] = NULL;
2439 stores[i] = NULL;
2440 }
2441 prevRecord = NULL;
2442 }
2443
2444 void
2445 MemoryOpt::reset()
2446 {
2447 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2448 Record *it, *next;
2449 for (it = loads[i]; it; it = next) {
2450 next = it->next;
2451 recordPool.release(it);
2452 }
2453 loads[i] = NULL;
2454 for (it = stores[i]; it; it = next) {
2455 next = it->next;
2456 recordPool.release(it);
2457 }
2458 stores[i] = NULL;
2459 }
2460 }
2461
2462 bool
2463 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2464 {
2465 int32_t offRc = rec->offset;
2466 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2467 int sizeRc = rec->size;
2468 int sizeLd = typeSizeof(ld->dType);
2469 int size = sizeRc + sizeLd;
2470 int d, j;
2471
2472 if (!prog->getTarget()->
2473 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2474 return false;
2475 // no unaligned loads
2476 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2477 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2478 return false;
2479 // for compute indirect loads are not guaranteed to be aligned
2480 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2481 return false;
2482
2483 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2484
2485 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2486
2487 if (offLd < offRc) {
2488 int sz;
2489 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2490 // d: nr of definitions in ld
2491 // j: nr of definitions in rec->insn, move:
2492 for (d = d + j - 1; j > 0; --j, --d)
2493 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2494
2495 if (rec->insn->getSrc(0)->refCount() > 1)
2496 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2497 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2498
2499 d = 0;
2500 } else {
2501 d = j;
2502 }
2503 // move definitions of @ld to @rec->insn
2504 for (j = 0; sizeLd; ++j, ++d) {
2505 sizeLd -= ld->getDef(j)->reg.size;
2506 rec->insn->setDef(d, ld->getDef(j));
2507 }
2508
2509 rec->size = size;
2510 rec->insn->getSrc(0)->reg.size = size;
2511 rec->insn->setType(typeOfSize(size));
2512
2513 delete_Instruction(prog, ld);
2514
2515 return true;
2516 }
2517
2518 bool
2519 MemoryOpt::combineSt(Record *rec, Instruction *st)
2520 {
2521 int32_t offRc = rec->offset;
2522 int32_t offSt = st->getSrc(0)->reg.data.offset;
2523 int sizeRc = rec->size;
2524 int sizeSt = typeSizeof(st->dType);
2525 int s = sizeSt / 4;
2526 int size = sizeRc + sizeSt;
2527 int j, k;
2528 Value *src[4]; // no modifiers in ValueRef allowed for st
2529 Value *extra[3];
2530
2531 if (!prog->getTarget()->
2532 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2533 return false;
2534 // no unaligned stores
2535 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2536 return false;
2537 // for compute indirect stores are not guaranteed to be aligned
2538 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2539 return false;
2540
2541 st->takeExtraSources(0, extra); // save predicate and indirect address
2542
2543 if (offRc < offSt) {
2544 // save values from @st
2545 for (s = 0; sizeSt; ++s) {
2546 sizeSt -= st->getSrc(s + 1)->reg.size;
2547 src[s] = st->getSrc(s + 1);
2548 }
2549 // set record's values as low sources of @st
2550 for (j = 1; sizeRc; ++j) {
2551 sizeRc -= rec->insn->getSrc(j)->reg.size;
2552 st->setSrc(j, rec->insn->getSrc(j));
2553 }
2554 // set saved values as high sources of @st
2555 for (k = j, j = 0; j < s; ++j)
2556 st->setSrc(k++, src[j]);
2557
2558 updateLdStOffset(st, offRc, func);
2559 } else {
2560 for (j = 1; sizeSt; ++j)
2561 sizeSt -= st->getSrc(j)->reg.size;
2562 for (s = 1; sizeRc; ++j, ++s) {
2563 sizeRc -= rec->insn->getSrc(s)->reg.size;
2564 st->setSrc(j, rec->insn->getSrc(s));
2565 }
2566 rec->offset = offSt;
2567 }
2568 st->putExtraSources(0, extra); // restore pointer and predicate
2569
2570 delete_Instruction(prog, rec->insn);
2571 rec->insn = st;
2572 rec->size = size;
2573 rec->insn->getSrc(0)->reg.size = size;
2574 rec->insn->setType(typeOfSize(size));
2575 return true;
2576 }
2577
2578 void
2579 MemoryOpt::Record::set(const Instruction *ldst)
2580 {
2581 const Symbol *mem = ldst->getSrc(0)->asSym();
2582 fileIndex = mem->reg.fileIndex;
2583 rel[0] = ldst->getIndirect(0, 0);
2584 rel[1] = ldst->getIndirect(0, 1);
2585 offset = mem->reg.data.offset;
2586 base = mem->getBase();
2587 size = typeSizeof(ldst->sType);
2588 }
2589
2590 void
2591 MemoryOpt::Record::link(Record **list)
2592 {
2593 next = *list;
2594 if (next)
2595 next->prev = this;
2596 prev = NULL;
2597 *list = this;
2598 }
2599
2600 void
2601 MemoryOpt::Record::unlink(Record **list)
2602 {
2603 if (next)
2604 next->prev = prev;
2605 if (prev)
2606 prev->next = next;
2607 else
2608 *list = next;
2609 }
2610
2611 MemoryOpt::Record **
2612 MemoryOpt::getList(const Instruction *insn)
2613 {
2614 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2615 return &loads[insn->src(0).getFile()];
2616 return &stores[insn->src(0).getFile()];
2617 }
2618
2619 void
2620 MemoryOpt::addRecord(Instruction *i)
2621 {
2622 Record **list = getList(i);
2623 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2624
2625 it->link(list);
2626 it->set(i);
2627 it->insn = i;
2628 it->locked = false;
2629 }
2630
2631 MemoryOpt::Record *
2632 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2633 {
2634 const Symbol *sym = insn->getSrc(0)->asSym();
2635 const int size = typeSizeof(insn->sType);
2636 Record *rec = NULL;
2637 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2638
2639 for (; it; it = it->next) {
2640 if (it->locked && insn->op != OP_LOAD)
2641 continue;
2642 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2643 it->rel[0] != insn->getIndirect(0, 0) ||
2644 it->fileIndex != sym->reg.fileIndex ||
2645 it->rel[1] != insn->getIndirect(0, 1))
2646 continue;
2647
2648 if (it->offset < sym->reg.data.offset) {
2649 if (it->offset + it->size >= sym->reg.data.offset) {
2650 isAdj = (it->offset + it->size == sym->reg.data.offset);
2651 if (!isAdj)
2652 return it;
2653 if (!(it->offset & 0x7))
2654 rec = it;
2655 }
2656 } else {
2657 isAdj = it->offset != sym->reg.data.offset;
2658 if (size <= it->size && !isAdj)
2659 return it;
2660 else
2661 if (!(sym->reg.data.offset & 0x7))
2662 if (it->offset - size <= sym->reg.data.offset)
2663 rec = it;
2664 }
2665 }
2666 return rec;
2667 }
2668
2669 bool
2670 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2671 {
2672 Instruction *st = rec->insn;
2673 int32_t offSt = rec->offset;
2674 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2675 int d, s;
2676
2677 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2678 offSt += st->getSrc(s)->reg.size;
2679 if (offSt != offLd)
2680 return false;
2681
2682 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2683 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
2684 return false;
2685 if (st->getSrc(s)->reg.file != FILE_GPR)
2686 return false;
2687 ld->def(d).replace(st->src(s), false);
2688 }
2689 ld->bb->remove(ld);
2690 return true;
2691 }
2692
2693 bool
2694 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
2695 {
2696 Instruction *ldR = rec->insn;
2697 int32_t offR = rec->offset;
2698 int32_t offE = ldE->getSrc(0)->reg.data.offset;
2699 int dR, dE;
2700
2701 assert(offR <= offE);
2702 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
2703 offR += ldR->getDef(dR)->reg.size;
2704 if (offR != offE)
2705 return false;
2706
2707 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
2708 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
2709 return false;
2710 ldE->def(dE).replace(ldR->getDef(dR), false);
2711 }
2712
2713 delete_Instruction(prog, ldE);
2714 return true;
2715 }
2716
2717 bool
2718 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
2719 {
2720 const Instruction *const ri = rec->insn;
2721 Value *extra[3];
2722
2723 int32_t offS = st->getSrc(0)->reg.data.offset;
2724 int32_t offR = rec->offset;
2725 int32_t endS = offS + typeSizeof(st->dType);
2726 int32_t endR = offR + typeSizeof(ri->dType);
2727
2728 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
2729
2730 st->takeExtraSources(0, extra);
2731
2732 if (offR < offS) {
2733 Value *vals[10];
2734 int s, n;
2735 int k = 0;
2736 // get non-replaced sources of ri
2737 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
2738 vals[k++] = ri->getSrc(s);
2739 n = s;
2740 // get replaced sources of st
2741 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
2742 vals[k++] = st->getSrc(s);
2743 // skip replaced sources of ri
2744 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
2745 // get non-replaced sources after values covered by st
2746 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
2747 vals[k++] = ri->getSrc(s);
2748 assert((unsigned int)k <= ARRAY_SIZE(vals));
2749 for (s = 0; s < k; ++s)
2750 st->setSrc(s + 1, vals[s]);
2751 st->setSrc(0, ri->getSrc(0));
2752 } else
2753 if (endR > endS) {
2754 int j, s;
2755 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
2756 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
2757 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
2758 st->setSrc(s++, ri->getSrc(j));
2759 }
2760 st->putExtraSources(0, extra);
2761
2762 delete_Instruction(prog, rec->insn);
2763
2764 rec->insn = st;
2765 rec->offset = st->getSrc(0)->reg.data.offset;
2766
2767 st->setType(typeOfSize(rec->size));
2768
2769 return true;
2770 }
2771
2772 bool
2773 MemoryOpt::Record::overlaps(const Instruction *ldst) const
2774 {
2775 Record that;
2776 that.set(ldst);
2777
2778 if (this->fileIndex != that.fileIndex)
2779 return false;
2780
2781 if (this->rel[0] || that.rel[0])
2782 return this->base == that.base;
2783 return
2784 (this->offset < that.offset + that.size) &&
2785 (this->offset + this->size > that.offset);
2786 }
2787
2788 // We must not eliminate stores that affect the result of @ld if
2789 // we find later stores to the same location, and we may no longer
2790 // merge them with later stores.
2791 // The stored value can, however, still be used to determine the value
2792 // returned by future loads.
2793 void
2794 MemoryOpt::lockStores(Instruction *const ld)
2795 {
2796 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
2797 if (!r->locked && r->overlaps(ld))
2798 r->locked = true;
2799 }
2800
2801 // Prior loads from the location of @st are no longer valid.
2802 // Stores to the location of @st may no longer be used to derive
2803 // the value at it nor be coalesced into later stores.
2804 void
2805 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
2806 {
2807 if (st)
2808 f = st->src(0).getFile();
2809
2810 for (Record *r = loads[f]; r; r = r->next)
2811 if (!st || r->overlaps(st))
2812 r->unlink(&loads[f]);
2813
2814 for (Record *r = stores[f]; r; r = r->next)
2815 if (!st || r->overlaps(st))
2816 r->unlink(&stores[f]);
2817 }
2818
2819 bool
2820 MemoryOpt::visit(BasicBlock *bb)
2821 {
2822 bool ret = runOpt(bb);
2823 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
2824 // where 96 bit memory operations are forbidden.
2825 if (ret)
2826 ret = runOpt(bb);
2827 return ret;
2828 }
2829
2830 bool
2831 MemoryOpt::runOpt(BasicBlock *bb)
2832 {
2833 Instruction *ldst, *next;
2834 Record *rec;
2835 bool isAdjacent = true;
2836
2837 for (ldst = bb->getEntry(); ldst; ldst = next) {
2838 bool keep = true;
2839 bool isLoad = true;
2840 next = ldst->next;
2841
2842 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
2843 if (ldst->isDead()) {
2844 // might have been produced by earlier optimization
2845 delete_Instruction(prog, ldst);
2846 continue;
2847 }
2848 } else
2849 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
2850 if (typeSizeof(ldst->dType) == 4 &&
2851 ldst->src(1).getFile() == FILE_GPR &&
2852 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
2853 delete_Instruction(prog, ldst);
2854 continue;
2855 }
2856 isLoad = false;
2857 } else {
2858 // TODO: maybe have all fixed ops act as barrier ?
2859 if (ldst->op == OP_CALL ||
2860 ldst->op == OP_BAR ||
2861 ldst->op == OP_MEMBAR) {
2862 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2863 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2864 purgeRecords(NULL, FILE_MEMORY_SHARED);
2865 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2866 } else
2867 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
2868 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
2869 purgeRecords(NULL, FILE_MEMORY_LOCAL);
2870 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
2871 purgeRecords(NULL, FILE_MEMORY_SHARED);
2872 } else {
2873 purgeRecords(NULL, ldst->src(0).getFile());
2874 }
2875 } else
2876 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
2877 purgeRecords(NULL, FILE_SHADER_OUTPUT);
2878 }
2879 continue;
2880 }
2881 if (ldst->getPredicate()) // TODO: handle predicated ld/st
2882 continue;
2883 if (ldst->perPatch) // TODO: create separate per-patch lists
2884 continue;
2885
2886 if (isLoad) {
2887 DataFile file = ldst->src(0).getFile();
2888
2889 // if ld l[]/g[] look for previous store to eliminate the reload
2890 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
2891 // TODO: shared memory ?
2892 rec = findRecord(ldst, false, isAdjacent);
2893 if (rec && !isAdjacent)
2894 keep = !replaceLdFromSt(ldst, rec);
2895 }
2896
2897 // or look for ld from the same location and replace this one
2898 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
2899 if (rec) {
2900 if (!isAdjacent)
2901 keep = !replaceLdFromLd(ldst, rec);
2902 else
2903 // or combine a previous load with this one
2904 keep = !combineLd(rec, ldst);
2905 }
2906 if (keep)
2907 lockStores(ldst);
2908 } else {
2909 rec = findRecord(ldst, false, isAdjacent);
2910 if (rec) {
2911 if (!isAdjacent)
2912 keep = !replaceStFromSt(ldst, rec);
2913 else
2914 keep = !combineSt(rec, ldst);
2915 }
2916 if (keep)
2917 purgeRecords(ldst, DATA_FILE_COUNT);
2918 }
2919 if (keep)
2920 addRecord(ldst);
2921 }
2922 reset();
2923
2924 return true;
2925 }
2926
2927 // =============================================================================
2928
2929 // Turn control flow into predicated instructions (after register allocation !).
2930 // TODO:
2931 // Could move this to before register allocation on NVC0 and also handle nested
2932 // constructs.
2933 class FlatteningPass : public Pass
2934 {
2935 private:
2936 virtual bool visit(Function *);
2937 virtual bool visit(BasicBlock *);
2938
2939 bool tryPredicateConditional(BasicBlock *);
2940 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
2941 void tryPropagateBranch(BasicBlock *);
2942 inline bool isConstantCondition(Value *pred);
2943 inline bool mayPredicate(const Instruction *, const Value *pred) const;
2944 inline void removeFlow(Instruction *);
2945
2946 uint8_t gpr_unit;
2947 };
2948
2949 bool
2950 FlatteningPass::isConstantCondition(Value *pred)
2951 {
2952 Instruction *insn = pred->getUniqueInsn();
2953 assert(insn);
2954 if (insn->op != OP_SET || insn->srcExists(2))
2955 return false;
2956
2957 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
2958 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
2959 DataFile file;
2960 if (ld) {
2961 if (ld->op != OP_MOV && ld->op != OP_LOAD)
2962 return false;
2963 if (ld->src(0).isIndirect(0))
2964 return false;
2965 file = ld->src(0).getFile();
2966 } else {
2967 file = insn->src(s).getFile();
2968 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
2969 // in register "units", which can vary between targets.
2970 if (file == FILE_GPR) {
2971 Value *v = insn->getSrc(s);
2972 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
2973 int units = bytes >> gpr_unit;
2974 if (units > prog->maxGPR)
2975 file = FILE_IMMEDIATE;
2976 }
2977 }
2978 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
2979 return false;
2980 }
2981 return true;
2982 }
2983
2984 void
2985 FlatteningPass::removeFlow(Instruction *insn)
2986 {
2987 FlowInstruction *term = insn ? insn->asFlow() : NULL;
2988 if (!term)
2989 return;
2990 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
2991
2992 if (term->op == OP_BRA) {
2993 // TODO: this might get more difficult when we get arbitrary BRAs
2994 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
2995 return;
2996 } else
2997 if (term->op != OP_JOIN)
2998 return;
2999
3000 Value *pred = term->getPredicate();
3001
3002 delete_Instruction(prog, term);
3003
3004 if (pred && pred->refCount() == 0) {
3005 Instruction *pSet = pred->getUniqueInsn();
3006 pred->join->reg.data.id = -1; // deallocate
3007 if (pSet->isDead())
3008 delete_Instruction(prog, pSet);
3009 }
3010 }
3011
3012 void
3013 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3014 {
3015 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3016 if (i->isNop())
3017 continue;
3018 assert(!i->getPredicate());
3019 i->setPredicate(cc, pred);
3020 }
3021 removeFlow(bb->getExit());
3022 }
3023
3024 bool
3025 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3026 {
3027 if (insn->isPseudo())
3028 return true;
3029 // TODO: calls where we don't know which registers are modified
3030
3031 if (!prog->getTarget()->mayPredicate(insn, pred))
3032 return false;
3033 for (int d = 0; insn->defExists(d); ++d)
3034 if (insn->getDef(d)->equals(pred))
3035 return false;
3036 return true;
3037 }
3038
3039 // If we jump to BRA/RET/EXIT, replace the jump with it.
3040 // NOTE: We do not update the CFG anymore here !
3041 //
3042 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3043 // BB:0
3044 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3045 // BB1:
3046 // bra BB:3
3047 // BB2:
3048 // ...
3049 // BB3:
3050 // ...
3051 void
3052 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3053 {
3054 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3055 BasicBlock *bf = i->asFlow()->target.bb;
3056
3057 if (bf->getInsnCount() != 1)
3058 continue;
3059
3060 FlowInstruction *bra = i->asFlow();
3061 FlowInstruction *rep = bf->getExit()->asFlow();
3062
3063 if (!rep || rep->getPredicate())
3064 continue;
3065 if (rep->op != OP_BRA &&
3066 rep->op != OP_JOIN &&
3067 rep->op != OP_EXIT)
3068 continue;
3069
3070 // TODO: If there are multiple branches to @rep, only the first would
3071 // be replaced, so only remove them after this pass is done ?
3072 // Also, need to check all incident blocks for fall-through exits and
3073 // add the branch there.
3074 bra->op = rep->op;
3075 bra->target.bb = rep->target.bb;
3076 if (bf->cfg.incidentCount() == 1)
3077 bf->remove(rep);
3078 }
3079 }
3080
3081 bool
3082 FlatteningPass::visit(Function *fn)
3083 {
3084 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3085
3086 return true;
3087 }
3088
3089 bool
3090 FlatteningPass::visit(BasicBlock *bb)
3091 {
3092 if (tryPredicateConditional(bb))
3093 return true;
3094
3095 // try to attach join to previous instruction
3096 if (prog->getTarget()->hasJoin) {
3097 Instruction *insn = bb->getExit();
3098 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3099 insn = insn->prev;
3100 if (insn && !insn->getPredicate() &&
3101 !insn->asFlow() &&
3102 insn->op != OP_DISCARD &&
3103 insn->op != OP_TEXBAR &&
3104 !isTextureOp(insn->op) && // probably just nve4
3105 !isSurfaceOp(insn->op) && // not confirmed
3106 insn->op != OP_LINTERP && // probably just nve4
3107 insn->op != OP_PINTERP && // probably just nve4
3108 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3109 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3110 !insn->isNop()) {
3111 insn->join = 1;
3112 bb->remove(bb->getExit());
3113 return true;
3114 }
3115 }
3116 }
3117
3118 tryPropagateBranch(bb);
3119
3120 return true;
3121 }
3122
3123 bool
3124 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3125 {
3126 BasicBlock *bL = NULL, *bR = NULL;
3127 unsigned int nL = 0, nR = 0, limit = 12;
3128 Instruction *insn;
3129 unsigned int mask;
3130
3131 mask = bb->initiatesSimpleConditional();
3132 if (!mask)
3133 return false;
3134
3135 assert(bb->getExit());
3136 Value *pred = bb->getExit()->getPredicate();
3137 assert(pred);
3138
3139 if (isConstantCondition(pred))
3140 limit = 4;
3141
3142 Graph::EdgeIterator ei = bb->cfg.outgoing();
3143
3144 if (mask & 1) {
3145 bL = BasicBlock::get(ei.getNode());
3146 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3147 if (!mayPredicate(insn, pred))
3148 return false;
3149 if (nL > limit)
3150 return false; // too long, do a real branch
3151 }
3152 ei.next();
3153
3154 if (mask & 2) {
3155 bR = BasicBlock::get(ei.getNode());
3156 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3157 if (!mayPredicate(insn, pred))
3158 return false;
3159 if (nR > limit)
3160 return false; // too long, do a real branch
3161 }
3162
3163 if (bL)
3164 predicateInstructions(bL, pred, bb->getExit()->cc);
3165 if (bR)
3166 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3167
3168 if (bb->joinAt) {
3169 bb->remove(bb->joinAt);
3170 bb->joinAt = NULL;
3171 }
3172 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3173
3174 // remove potential join operations at the end of the conditional
3175 if (prog->getTarget()->joinAnterior) {
3176 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3177 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3178 removeFlow(bb->getEntry());
3179 }
3180
3181 return true;
3182 }
3183
3184 // =============================================================================
3185
3186 // Fold Immediate into MAD; must be done after register allocation due to
3187 // constraint SDST == SSRC2
3188 // TODO:
3189 // Does NVC0+ have other situations where this pass makes sense?
3190 class NV50PostRaConstantFolding : public Pass
3191 {
3192 private:
3193 virtual bool visit(BasicBlock *);
3194 };
3195
3196 static bool
3197 post_ra_dead(Instruction *i)
3198 {
3199 for (int d = 0; i->defExists(d); ++d)
3200 if (i->getDef(d)->refCount())
3201 return false;
3202 return true;
3203 }
3204
3205 bool
3206 NV50PostRaConstantFolding::visit(BasicBlock *bb)
3207 {
3208 Value *vtmp;
3209 Instruction *def;
3210
3211 for (Instruction *i = bb->getFirst(); i; i = i->next) {
3212 switch (i->op) {
3213 case OP_MAD:
3214 if (i->def(0).getFile() != FILE_GPR ||
3215 i->src(0).getFile() != FILE_GPR ||
3216 i->src(1).getFile() != FILE_GPR ||
3217 i->src(2).getFile() != FILE_GPR ||
3218 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3219 break;
3220
3221 if (i->getDef(0)->reg.data.id >= 64 ||
3222 i->getSrc(0)->reg.data.id >= 64)
3223 break;
3224
3225 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3226 break;
3227
3228 if (i->getPredicate())
3229 break;
3230
3231 def = i->getSrc(1)->getInsn();
3232 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3233 def = def->getSrc(0)->getInsn();
3234 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3235 vtmp = i->getSrc(1);
3236 if (isFloatType(i->sType)) {
3237 i->setSrc(1, def->getSrc(0));
3238 } else {
3239 ImmediateValue val;
3240 bool ret = def->src(0).getImmediate(val);
3241 assert(ret);
3242 if (i->getSrc(1)->reg.data.id & 1)
3243 val.reg.data.u32 >>= 16;
3244 val.reg.data.u32 &= 0xffff;
3245 i->setSrc(1, new_ImmediateValue(bb->getProgram(), val.reg.data.u32));
3246 }
3247
3248 /* There's no post-RA dead code elimination, so do it here
3249 * XXX: if we add more code-removing post-RA passes, we might
3250 * want to create a post-RA dead-code elim pass */
3251 if (post_ra_dead(vtmp->getInsn())) {
3252 Value *src = vtmp->getInsn()->getSrc(0);
3253 // Careful -- splits will have already been removed from the
3254 // functions. Don't double-delete.
3255 if (vtmp->getInsn()->bb)
3256 delete_Instruction(prog, vtmp->getInsn());
3257 if (src->getInsn() && post_ra_dead(src->getInsn()))
3258 delete_Instruction(prog, src->getInsn());
3259 }
3260
3261 break;
3262 }
3263 break;
3264 default:
3265 break;
3266 }
3267 }
3268
3269 return true;
3270 }
3271
3272 // =============================================================================
3273
3274 // Common subexpression elimination. Stupid O^2 implementation.
3275 class LocalCSE : public Pass
3276 {
3277 private:
3278 virtual bool visit(BasicBlock *);
3279
3280 inline bool tryReplace(Instruction **, Instruction *);
3281
3282 DLList ops[OP_LAST + 1];
3283 };
3284
3285 class GlobalCSE : public Pass
3286 {
3287 private:
3288 virtual bool visit(BasicBlock *);
3289 };
3290
3291 bool
3292 Instruction::isActionEqual(const Instruction *that) const
3293 {
3294 if (this->op != that->op ||
3295 this->dType != that->dType ||
3296 this->sType != that->sType)
3297 return false;
3298 if (this->cc != that->cc)
3299 return false;
3300
3301 if (this->asTex()) {
3302 if (memcmp(&this->asTex()->tex,
3303 &that->asTex()->tex,
3304 sizeof(this->asTex()->tex)))
3305 return false;
3306 } else
3307 if (this->asCmp()) {
3308 if (this->asCmp()->setCond != that->asCmp()->setCond)
3309 return false;
3310 } else
3311 if (this->asFlow()) {
3312 return false;
3313 } else {
3314 if (this->ipa != that->ipa ||
3315 this->lanes != that->lanes ||
3316 this->perPatch != that->perPatch)
3317 return false;
3318 if (this->postFactor != that->postFactor)
3319 return false;
3320 }
3321
3322 if (this->subOp != that->subOp ||
3323 this->saturate != that->saturate ||
3324 this->rnd != that->rnd ||
3325 this->ftz != that->ftz ||
3326 this->dnz != that->dnz ||
3327 this->cache != that->cache ||
3328 this->mask != that->mask)
3329 return false;
3330
3331 return true;
3332 }
3333
3334 bool
3335 Instruction::isResultEqual(const Instruction *that) const
3336 {
3337 unsigned int d, s;
3338
3339 // NOTE: location of discard only affects tex with liveOnly and quadops
3340 if (!this->defExists(0) && this->op != OP_DISCARD)
3341 return false;
3342
3343 if (!isActionEqual(that))
3344 return false;
3345
3346 if (this->predSrc != that->predSrc)
3347 return false;
3348
3349 for (d = 0; this->defExists(d); ++d) {
3350 if (!that->defExists(d) ||
3351 !this->getDef(d)->equals(that->getDef(d), false))
3352 return false;
3353 }
3354 if (that->defExists(d))
3355 return false;
3356
3357 for (s = 0; this->srcExists(s); ++s) {
3358 if (!that->srcExists(s))
3359 return false;
3360 if (this->src(s).mod != that->src(s).mod)
3361 return false;
3362 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3363 return false;
3364 }
3365 if (that->srcExists(s))
3366 return false;
3367
3368 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3369 switch (src(0).getFile()) {
3370 case FILE_MEMORY_CONST:
3371 case FILE_SHADER_INPUT:
3372 return true;
3373 case FILE_SHADER_OUTPUT:
3374 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3375 default:
3376 return false;
3377 }
3378 }
3379
3380 return true;
3381 }
3382
3383 // pull through common expressions from different in-blocks
3384 bool
3385 GlobalCSE::visit(BasicBlock *bb)
3386 {
3387 Instruction *phi, *next, *ik;
3388 int s;
3389
3390 // TODO: maybe do this with OP_UNION, too
3391
3392 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3393 next = phi->next;
3394 if (phi->getSrc(0)->refCount() > 1)
3395 continue;
3396 ik = phi->getSrc(0)->getInsn();
3397 if (!ik)
3398 continue; // probably a function input
3399 if (ik->defCount(0xff) > 1)
3400 continue; // too painful to check if we can really push this forward
3401 for (s = 1; phi->srcExists(s); ++s) {
3402 if (phi->getSrc(s)->refCount() > 1)
3403 break;
3404 if (!phi->getSrc(s)->getInsn() ||
3405 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3406 break;
3407 }
3408 if (!phi->srcExists(s)) {
3409 Instruction *entry = bb->getEntry();
3410 ik->bb->remove(ik);
3411 if (!entry || entry->op != OP_JOIN)
3412 bb->insertHead(ik);
3413 else
3414 bb->insertAfter(entry, ik);
3415 ik->setDef(0, phi->getDef(0));
3416 delete_Instruction(prog, phi);
3417 }
3418 }
3419
3420 return true;
3421 }
3422
3423 bool
3424 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3425 {
3426 Instruction *old = *ptr;
3427
3428 // TODO: maybe relax this later (causes trouble with OP_UNION)
3429 if (i->isPredicated())
3430 return false;
3431
3432 if (!old->isResultEqual(i))
3433 return false;
3434
3435 for (int d = 0; old->defExists(d); ++d)
3436 old->def(d).replace(i->getDef(d), false);
3437 delete_Instruction(prog, old);
3438 *ptr = NULL;
3439 return true;
3440 }
3441
3442 bool
3443 LocalCSE::visit(BasicBlock *bb)
3444 {
3445 unsigned int replaced;
3446
3447 do {
3448 Instruction *ir, *next;
3449
3450 replaced = 0;
3451
3452 // will need to know the order of instructions
3453 int serial = 0;
3454 for (ir = bb->getFirst(); ir; ir = ir->next)
3455 ir->serial = serial++;
3456
3457 for (ir = bb->getFirst(); ir; ir = next) {
3458 int s;
3459 Value *src = NULL;
3460
3461 next = ir->next;
3462
3463 if (ir->fixed) {
3464 ops[ir->op].insert(ir);
3465 continue;
3466 }
3467
3468 for (s = 0; ir->srcExists(s); ++s)
3469 if (ir->getSrc(s)->asLValue())
3470 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3471 src = ir->getSrc(s);
3472
3473 if (src) {
3474 for (Value::UseIterator it = src->uses.begin();
3475 it != src->uses.end(); ++it) {
3476 Instruction *ik = (*it)->getInsn();
3477 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3478 if (tryReplace(&ir, ik))
3479 break;
3480 }
3481 } else {
3482 DLLIST_FOR_EACH(&ops[ir->op], iter)
3483 {
3484 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3485 if (tryReplace(&ir, ik))
3486 break;
3487 }
3488 }
3489
3490 if (ir)
3491 ops[ir->op].insert(ir);
3492 else
3493 ++replaced;
3494 }
3495 for (unsigned int i = 0; i <= OP_LAST; ++i)
3496 ops[i].clear();
3497
3498 } while (replaced);
3499
3500 return true;
3501 }
3502
3503 // =============================================================================
3504
3505 // Remove computations of unused values.
3506 class DeadCodeElim : public Pass
3507 {
3508 public:
3509 bool buryAll(Program *);
3510
3511 private:
3512 virtual bool visit(BasicBlock *);
3513
3514 void checkSplitLoad(Instruction *ld); // for partially dead loads
3515
3516 unsigned int deadCount;
3517 };
3518
3519 bool
3520 DeadCodeElim::buryAll(Program *prog)
3521 {
3522 do {
3523 deadCount = 0;
3524 if (!this->run(prog, false, false))
3525 return false;
3526 } while (deadCount);
3527
3528 return true;
3529 }
3530
3531 bool
3532 DeadCodeElim::visit(BasicBlock *bb)
3533 {
3534 Instruction *prev;
3535
3536 for (Instruction *i = bb->getExit(); i; i = prev) {
3537 prev = i->prev;
3538 if (i->isDead()) {
3539 ++deadCount;
3540 delete_Instruction(prog, i);
3541 } else
3542 if (i->defExists(1) &&
3543 i->subOp == 0 &&
3544 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3545 checkSplitLoad(i);
3546 } else
3547 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3548 if (i->op == OP_ATOM ||
3549 i->op == OP_SUREDP ||
3550 i->op == OP_SUREDB) {
3551 i->setDef(0, NULL);
3552 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3553 i->cache = CACHE_CV;
3554 i->op = OP_STORE;
3555 i->subOp = 0;
3556 }
3557 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3558 i->setDef(0, i->getDef(1));
3559 i->setDef(1, NULL);
3560 }
3561 }
3562 }
3563 return true;
3564 }
3565
3566 // Each load can go into up to 4 destinations, any of which might potentially
3567 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3568 // of where the holes are. We find the first contiguous region, put it into
3569 // the first load, and then put the second contiguous region into the second
3570 // load. There can be at most 2 contiguous regions.
3571 //
3572 // Note that there are some restrictions, for example it's not possible to do
3573 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3574 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3575 // split into a 64-bit and 32-bit load.
3576 void
3577 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3578 {
3579 Instruction *ld2 = NULL; // can get at most 2 loads
3580 Value *def1[4];
3581 Value *def2[4];
3582 int32_t addr1, addr2;
3583 int32_t size1, size2;
3584 int d, n1, n2;
3585 uint32_t mask = 0xffffffff;
3586
3587 for (d = 0; ld1->defExists(d); ++d)
3588 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3589 mask &= ~(1 << d);
3590 if (mask == 0xffffffff)
3591 return;
3592
3593 addr1 = ld1->getSrc(0)->reg.data.offset;
3594 n1 = n2 = 0;
3595 size1 = size2 = 0;
3596
3597 // Compute address/width for first load
3598 for (d = 0; ld1->defExists(d); ++d) {
3599 if (mask & (1 << d)) {
3600 if (size1 && (addr1 & 0x7))
3601 break;
3602 def1[n1] = ld1->getDef(d);
3603 size1 += def1[n1++]->reg.size;
3604 } else
3605 if (!n1) {
3606 addr1 += ld1->getDef(d)->reg.size;
3607 } else {
3608 break;
3609 }
3610 }
3611
3612 // Scale back the size of the first load until it can be loaded. This
3613 // typically happens for TYPE_B96 loads.
3614 while (n1 &&
3615 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3616 typeOfSize(size1))) {
3617 size1 -= def1[--n1]->reg.size;
3618 d--;
3619 }
3620
3621 // Compute address/width for second load
3622 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
3623 if (mask & (1 << d)) {
3624 assert(!size2 || !(addr2 & 0x7));
3625 def2[n2] = ld1->getDef(d);
3626 size2 += def2[n2++]->reg.size;
3627 } else if (!n2) {
3628 assert(!n2);
3629 addr2 += ld1->getDef(d)->reg.size;
3630 } else {
3631 break;
3632 }
3633 }
3634
3635 // Make sure that we've processed all the values
3636 for (; ld1->defExists(d); ++d)
3637 assert(!(mask & (1 << d)));
3638
3639 updateLdStOffset(ld1, addr1, func);
3640 ld1->setType(typeOfSize(size1));
3641 for (d = 0; d < 4; ++d)
3642 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
3643
3644 if (!n2)
3645 return;
3646
3647 ld2 = cloneShallow(func, ld1);
3648 updateLdStOffset(ld2, addr2, func);
3649 ld2->setType(typeOfSize(size2));
3650 for (d = 0; d < 4; ++d)
3651 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
3652
3653 ld1->bb->insertAfter(ld1, ld2);
3654 }
3655
3656 // =============================================================================
3657
3658 #define RUN_PASS(l, n, f) \
3659 if (level >= (l)) { \
3660 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
3661 INFO("PEEPHOLE: %s\n", #n); \
3662 n pass; \
3663 if (!pass.f(this)) \
3664 return false; \
3665 }
3666
3667 bool
3668 Program::optimizeSSA(int level)
3669 {
3670 RUN_PASS(1, DeadCodeElim, buryAll);
3671 RUN_PASS(1, CopyPropagation, run);
3672 RUN_PASS(1, MergeSplits, run);
3673 RUN_PASS(2, GlobalCSE, run);
3674 RUN_PASS(1, LocalCSE, run);
3675 RUN_PASS(2, AlgebraicOpt, run);
3676 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
3677 RUN_PASS(1, ConstantFolding, foldAll);
3678 RUN_PASS(2, LateAlgebraicOpt, run);
3679 RUN_PASS(1, Split64BitOpPreRA, run);
3680 RUN_PASS(1, LoadPropagation, run);
3681 RUN_PASS(1, IndirectPropagation, run);
3682 RUN_PASS(2, MemoryOpt, run);
3683 RUN_PASS(2, LocalCSE, run);
3684 RUN_PASS(0, DeadCodeElim, buryAll);
3685
3686 return true;
3687 }
3688
3689 bool
3690 Program::optimizePostRA(int level)
3691 {
3692 RUN_PASS(2, FlatteningPass, run);
3693 if (getTarget()->getChipset() < 0xc0)
3694 RUN_PASS(2, NV50PostRaConstantFolding, run);
3695
3696 return true;
3697 }
3698
3699 }