Merge commit 'origin/gallium-0.1' into gallium-0.2
[mesa.git] / src / gallium / auxiliary / rtasm / rtasm_ppc_spe.c
1 /*
2 * (C) Copyright IBM Corporation 2008
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 * Real-time assembly generation interface for Cell B.E. SPEs.
28 *
29 * \author Ian Romanick <idr@us.ibm.com>
30 * \author Brian Paul
31 */
32
33
34 #include <stdio.h>
35 #include "pipe/p_compiler.h"
36 #include "util/u_memory.h"
37 #include "rtasm_ppc_spe.h"
38
39
40 #ifdef GALLIUM_CELL
41 /**
42 * SPE instruction types
43 *
44 * There are 6 primary instruction encodings used on the Cell's SPEs. Each of
45 * the following unions encodes one type.
46 *
47 * \bug
48 * If, at some point, we start generating SPE code from a little-endian host
49 * these unions will not work.
50 */
51 /*@{*/
52 /**
53 * Encode one output register with two input registers
54 */
55 union spe_inst_RR {
56 uint32_t bits;
57 struct {
58 unsigned op:11;
59 unsigned rB:7;
60 unsigned rA:7;
61 unsigned rT:7;
62 } inst;
63 };
64
65
66 /**
67 * Encode one output register with three input registers
68 */
69 union spe_inst_RRR {
70 uint32_t bits;
71 struct {
72 unsigned op:4;
73 unsigned rT:7;
74 unsigned rB:7;
75 unsigned rA:7;
76 unsigned rC:7;
77 } inst;
78 };
79
80
81 /**
82 * Encode one output register with one input reg. and a 7-bit signed immed
83 */
84 union spe_inst_RI7 {
85 uint32_t bits;
86 struct {
87 unsigned op:11;
88 unsigned i7:7;
89 unsigned rA:7;
90 unsigned rT:7;
91 } inst;
92 };
93
94
95 /**
96 * Encode one output register with one input reg. and an 8-bit signed immed
97 */
98 union spe_inst_RI8 {
99 uint32_t bits;
100 struct {
101 unsigned op:10;
102 unsigned i8:8;
103 unsigned rA:7;
104 unsigned rT:7;
105 } inst;
106 };
107
108
109 /**
110 * Encode one output register with one input reg. and a 10-bit signed immed
111 */
112 union spe_inst_RI10 {
113 uint32_t bits;
114 struct {
115 unsigned op:8;
116 unsigned i10:10;
117 unsigned rA:7;
118 unsigned rT:7;
119 } inst;
120 };
121
122
123 /**
124 * Encode one output register with a 16-bit signed immediate
125 */
126 union spe_inst_RI16 {
127 uint32_t bits;
128 struct {
129 unsigned op:9;
130 unsigned i16:16;
131 unsigned rT:7;
132 } inst;
133 };
134
135
136 /**
137 * Encode one output register with a 18-bit signed immediate
138 */
139 union spe_inst_RI18 {
140 uint32_t bits;
141 struct {
142 unsigned op:7;
143 unsigned i18:18;
144 unsigned rT:7;
145 } inst;
146 };
147 /*@}*/
148
149
150 static void
151 indent(const struct spe_function *p)
152 {
153 int i;
154 for (i = 0; i < p->indent; i++) {
155 putchar(' ');
156 }
157 }
158
159
160 static const char *
161 rem_prefix(const char *longname)
162 {
163 return longname + 4;
164 }
165
166
167 static const char *
168 reg_name(int reg)
169 {
170 switch (reg) {
171 case SPE_REG_SP:
172 return "$sp";
173 case SPE_REG_RA:
174 return "$lr";
175 default:
176 {
177 /* cycle through four buffers to handle multiple calls per printf */
178 static char buf[4][10];
179 static int b = 0;
180 b = (b + 1) % 4;
181 sprintf(buf[b], "$%d", reg);
182 return buf[b];
183 }
184 }
185 }
186
187
188 static void
189 emit_instruction(struct spe_function *p, uint32_t inst_bits)
190 {
191 if (!p->store)
192 return; /* out of memory, drop the instruction */
193
194 if (p->num_inst == p->max_inst) {
195 /* allocate larger buffer */
196 uint32_t *newbuf;
197 p->max_inst *= 2; /* 2x larger */
198 newbuf = align_malloc(p->max_inst * SPE_INST_SIZE, 16);
199 if (newbuf) {
200 memcpy(newbuf, p->store, p->num_inst * SPE_INST_SIZE);
201 }
202 align_free(p->store);
203 p->store = newbuf;
204 if (!p->store) {
205 /* out of memory */
206 p->num_inst = 0;
207 return;
208 }
209 }
210
211 p->store[p->num_inst++] = inst_bits;
212 }
213
214
215
216 static void emit_RR(struct spe_function *p, unsigned op, unsigned rT,
217 unsigned rA, unsigned rB, const char *name)
218 {
219 union spe_inst_RR inst;
220 inst.inst.op = op;
221 inst.inst.rB = rB;
222 inst.inst.rA = rA;
223 inst.inst.rT = rT;
224 emit_instruction(p, inst.bits);
225 if (p->print) {
226 indent(p);
227 printf("%s\t%s, %s, %s\n",
228 rem_prefix(name), reg_name(rT), reg_name(rA), reg_name(rB));
229 }
230 }
231
232
233 static void emit_RRR(struct spe_function *p, unsigned op, unsigned rT,
234 unsigned rA, unsigned rB, unsigned rC, const char *name)
235 {
236 union spe_inst_RRR inst;
237 inst.inst.op = op;
238 inst.inst.rT = rT;
239 inst.inst.rB = rB;
240 inst.inst.rA = rA;
241 inst.inst.rC = rC;
242 emit_instruction(p, inst.bits);
243 if (p->print) {
244 indent(p);
245 printf("%s\t%s, %s, %s, %s\n", rem_prefix(name), reg_name(rT),
246 reg_name(rA), reg_name(rB), reg_name(rC));
247 }
248 }
249
250
251 static void emit_RI7(struct spe_function *p, unsigned op, unsigned rT,
252 unsigned rA, int imm, const char *name)
253 {
254 union spe_inst_RI7 inst;
255 inst.inst.op = op;
256 inst.inst.i7 = imm;
257 inst.inst.rA = rA;
258 inst.inst.rT = rT;
259 emit_instruction(p, inst.bits);
260 if (p->print) {
261 indent(p);
262 printf("%s\t%s, %s, 0x%x\n",
263 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
264 }
265 }
266
267
268
269 static void emit_RI8(struct spe_function *p, unsigned op, unsigned rT,
270 unsigned rA, int imm, const char *name)
271 {
272 union spe_inst_RI8 inst;
273 inst.inst.op = op;
274 inst.inst.i8 = imm;
275 inst.inst.rA = rA;
276 inst.inst.rT = rT;
277 emit_instruction(p, inst.bits);
278 if (p->print) {
279 indent(p);
280 printf("%s\t%s, %s, 0x%x\n",
281 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
282 }
283 }
284
285
286
287 static void emit_RI10(struct spe_function *p, unsigned op, unsigned rT,
288 unsigned rA, int imm, const char *name)
289 {
290 union spe_inst_RI10 inst;
291 inst.inst.op = op;
292 inst.inst.i10 = imm;
293 inst.inst.rA = rA;
294 inst.inst.rT = rT;
295 emit_instruction(p, inst.bits);
296 if (p->print) {
297 indent(p);
298 printf("%s\t%s, %s, 0x%x\n",
299 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
300 }
301 }
302
303
304 /** As above, but do range checking on signed immediate value */
305 static void emit_RI10s(struct spe_function *p, unsigned op, unsigned rT,
306 unsigned rA, int imm, const char *name)
307 {
308 assert(imm <= 511);
309 assert(imm >= -512);
310 emit_RI10(p, op, rT, rA, imm, name);
311 }
312
313
314 static void emit_RI16(struct spe_function *p, unsigned op, unsigned rT,
315 int imm, const char *name)
316 {
317 union spe_inst_RI16 inst;
318 inst.inst.op = op;
319 inst.inst.i16 = imm;
320 inst.inst.rT = rT;
321 emit_instruction(p, inst.bits);
322 if (p->print) {
323 indent(p);
324 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
325 }
326 }
327
328
329 static void emit_RI18(struct spe_function *p, unsigned op, unsigned rT,
330 int imm, const char *name)
331 {
332 union spe_inst_RI18 inst;
333 inst.inst.op = op;
334 inst.inst.i18 = imm;
335 inst.inst.rT = rT;
336 emit_instruction(p, inst.bits);
337 if (p->print) {
338 indent(p);
339 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
340 }
341 }
342
343
344 #define EMIT(_name, _op) \
345 void _name (struct spe_function *p) \
346 { \
347 emit_RR(p, _op, 0, 0, 0, __FUNCTION__); \
348 }
349
350 #define EMIT_(_name, _op) \
351 void _name (struct spe_function *p, unsigned rT) \
352 { \
353 emit_RR(p, _op, rT, 0, 0, __FUNCTION__); \
354 }
355
356 #define EMIT_R(_name, _op) \
357 void _name (struct spe_function *p, unsigned rT, unsigned rA) \
358 { \
359 emit_RR(p, _op, rT, rA, 0, __FUNCTION__); \
360 }
361
362 #define EMIT_RR(_name, _op) \
363 void _name (struct spe_function *p, unsigned rT, unsigned rA, unsigned rB) \
364 { \
365 emit_RR(p, _op, rT, rA, rB, __FUNCTION__); \
366 }
367
368 #define EMIT_RRR(_name, _op) \
369 void _name (struct spe_function *p, unsigned rT, unsigned rA, unsigned rB, unsigned rC) \
370 { \
371 emit_RRR(p, _op, rT, rA, rB, rC, __FUNCTION__); \
372 }
373
374 #define EMIT_RI7(_name, _op) \
375 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
376 { \
377 emit_RI7(p, _op, rT, rA, imm, __FUNCTION__); \
378 }
379
380 #define EMIT_RI8(_name, _op, bias) \
381 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
382 { \
383 emit_RI8(p, _op, rT, rA, bias - imm, __FUNCTION__); \
384 }
385
386 #define EMIT_RI10(_name, _op) \
387 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
388 { \
389 emit_RI10(p, _op, rT, rA, imm, __FUNCTION__); \
390 }
391
392 #define EMIT_RI10s(_name, _op) \
393 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
394 { \
395 emit_RI10s(p, _op, rT, rA, imm, __FUNCTION__); \
396 }
397
398 #define EMIT_RI16(_name, _op) \
399 void _name (struct spe_function *p, unsigned rT, int imm) \
400 { \
401 emit_RI16(p, _op, rT, imm, __FUNCTION__); \
402 }
403
404 #define EMIT_RI18(_name, _op) \
405 void _name (struct spe_function *p, unsigned rT, int imm) \
406 { \
407 emit_RI18(p, _op, rT, imm, __FUNCTION__); \
408 }
409
410 #define EMIT_I16(_name, _op) \
411 void _name (struct spe_function *p, int imm) \
412 { \
413 emit_RI16(p, _op, 0, imm, __FUNCTION__); \
414 }
415
416 #include "rtasm_ppc_spe.h"
417
418
419
420 /**
421 * Initialize an spe_function.
422 * \param code_size initial size of instruction buffer to allocate, in bytes.
423 * If zero, use a default.
424 */
425 void spe_init_func(struct spe_function *p, unsigned code_size)
426 {
427 unsigned int i;
428
429 if (!code_size)
430 code_size = 64;
431
432 p->num_inst = 0;
433 p->max_inst = code_size / SPE_INST_SIZE;
434 p->store = align_malloc(code_size, 16);
435
436 p->set_count = 0;
437 memset(p->regs, 0, SPE_NUM_REGS * sizeof(p->regs[0]));
438
439 /* Conservatively treat R0 - R2 and R80 - R127 as non-volatile.
440 */
441 p->regs[0] = p->regs[1] = p->regs[2] = 1;
442 for (i = 80; i <= 127; i++) {
443 p->regs[i] = 1;
444 }
445
446 p->print = false;
447 p->indent = 0;
448 }
449
450
451 void spe_release_func(struct spe_function *p)
452 {
453 assert(p->num_inst <= p->max_inst);
454 if (p->store != NULL) {
455 align_free(p->store);
456 }
457 p->store = NULL;
458 }
459
460
461 /** Return current code size in bytes. */
462 unsigned spe_code_size(const struct spe_function *p)
463 {
464 return p->num_inst * SPE_INST_SIZE;
465 }
466
467
468 /**
469 * Allocate a SPE register.
470 * \return register index or -1 if none left.
471 */
472 int spe_allocate_available_register(struct spe_function *p)
473 {
474 unsigned i;
475 for (i = 0; i < SPE_NUM_REGS; i++) {
476 if (p->regs[i] == 0) {
477 p->regs[i] = 1;
478 return i;
479 }
480 }
481
482 return -1;
483 }
484
485
486 /**
487 * Mark the given SPE register as "allocated".
488 */
489 int spe_allocate_register(struct spe_function *p, int reg)
490 {
491 assert(reg < SPE_NUM_REGS);
492 assert(p->regs[reg] == 0);
493 p->regs[reg] = 1;
494 return reg;
495 }
496
497
498 /**
499 * Mark the given SPE register as "unallocated". Note that this should
500 * only be used on registers allocated in the current register set; an
501 * assertion will fail if an attempt is made to deallocate a register
502 * allocated in an earlier register set.
503 */
504 void spe_release_register(struct spe_function *p, int reg)
505 {
506 assert(reg < SPE_NUM_REGS);
507 assert(p->regs[reg] == 1);
508
509 p->regs[reg] = 0;
510 }
511
512 /**
513 * Start a new set of registers. This can be called if
514 * it will be difficult later to determine exactly what
515 * registers were actually allocated during a code generation
516 * sequence, and you really just want to deallocate all of them.
517 */
518 void spe_allocate_register_set(struct spe_function *p)
519 {
520 unsigned int i;
521
522 /* Keep track of the set count. If it ever wraps around to 0,
523 * we're in trouble.
524 */
525 p->set_count++;
526 assert(p->set_count > 0);
527
528 /* Increment the allocation count of all registers currently
529 * allocated. Then any registers that are allocated in this set
530 * will be the only ones with a count of 1; they'll all be released
531 * when the register set is released.
532 */
533 for (i = 0; i < SPE_NUM_REGS; i++) {
534 if (p->regs[i] > 0)
535 p->regs[i]++;
536 }
537 }
538
539 void spe_release_register_set(struct spe_function *p)
540 {
541 unsigned int i;
542
543 /* If the set count drops below zero, we're in trouble. */
544 assert(p->set_count > 0);
545 p->set_count--;
546
547 /* Drop the allocation level of all registers. Any allocated
548 * during this register set will drop to 0 and then become
549 * available.
550 */
551 for (i = 0; i < SPE_NUM_REGS; i++) {
552 if (p->regs[i] > 0)
553 p->regs[i]--;
554 }
555 }
556
557
558 unsigned
559 spe_get_registers_used(const struct spe_function *p, ubyte used[])
560 {
561 unsigned i, num = 0;
562 /* only count registers in the range available to callers */
563 for (i = 2; i < 80; i++) {
564 if (p->regs[i]) {
565 used[num++] = i;
566 }
567 }
568 return num;
569 }
570
571
572 void
573 spe_print_code(struct spe_function *p, boolean enable)
574 {
575 p->print = enable;
576 }
577
578
579 void
580 spe_indent(struct spe_function *p, int spaces)
581 {
582 p->indent += spaces;
583 }
584
585
586 void
587 spe_comment(struct spe_function *p, int rel_indent, const char *s)
588 {
589 if (p->print) {
590 p->indent += rel_indent;
591 indent(p);
592 p->indent -= rel_indent;
593 printf("# %s\n", s);
594 }
595 }
596
597
598 /**
599 * Load quad word.
600 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
601 */
602 void spe_lqd(struct spe_function *p, unsigned rT, unsigned rA, int offset)
603 {
604 const boolean pSave = p->print;
605
606 /* offset must be a multiple of 16 */
607 assert(offset % 16 == 0);
608 /* offset must fit in 10-bit signed int field, after shifting */
609 assert((offset >> 4) <= 511);
610 assert((offset >> 4) >= -512);
611
612 p->print = FALSE;
613 emit_RI10(p, 0x034, rT, rA, offset >> 4, "spe_lqd");
614 p->print = pSave;
615
616 if (p->print) {
617 indent(p);
618 printf("lqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
619 }
620 }
621
622
623 /**
624 * Store quad word.
625 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
626 */
627 void spe_stqd(struct spe_function *p, unsigned rT, unsigned rA, int offset)
628 {
629 const boolean pSave = p->print;
630
631 /* offset must be a multiple of 16 */
632 assert(offset % 16 == 0);
633 /* offset must fit in 10-bit signed int field, after shifting */
634 assert((offset >> 4) <= 511);
635 assert((offset >> 4) >= -512);
636
637 p->print = FALSE;
638 emit_RI10(p, 0x024, rT, rA, offset >> 4, "spe_stqd");
639 p->print = pSave;
640
641 if (p->print) {
642 indent(p);
643 printf("stqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
644 }
645 }
646
647
648 /**
649 * For branch instructions:
650 * \param d if 1, disable interupts if branch is taken
651 * \param e if 1, enable interupts if branch is taken
652 * If d and e are both zero, don't change interupt status (right?)
653 */
654
655 /** Branch Indirect to address in rA */
656 void spe_bi(struct spe_function *p, unsigned rA, int d, int e)
657 {
658 emit_RI7(p, 0x1a8, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
659 }
660
661 /** Interupt Return */
662 void spe_iret(struct spe_function *p, unsigned rA, int d, int e)
663 {
664 emit_RI7(p, 0x1aa, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
665 }
666
667 /** Branch indirect and set link on external data */
668 void spe_bisled(struct spe_function *p, unsigned rT, unsigned rA, int d,
669 int e)
670 {
671 emit_RI7(p, 0x1ab, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
672 }
673
674 /** Branch indirect and set link. Save PC in rT, jump to rA. */
675 void spe_bisl(struct spe_function *p, unsigned rT, unsigned rA, int d,
676 int e)
677 {
678 emit_RI7(p, 0x1a9, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
679 }
680
681 /** Branch indirect if zero word. If rT.word[0]==0, jump to rA. */
682 void spe_biz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
683 {
684 emit_RI7(p, 0x128, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
685 }
686
687 /** Branch indirect if non-zero word. If rT.word[0]!=0, jump to rA. */
688 void spe_binz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
689 {
690 emit_RI7(p, 0x129, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
691 }
692
693 /** Branch indirect if zero halfword. If rT.halfword[1]==0, jump to rA. */
694 void spe_bihz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
695 {
696 emit_RI7(p, 0x12a, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
697 }
698
699 /** Branch indirect if non-zero halfword. If rT.halfword[1]!=0, jump to rA. */
700 void spe_bihnz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
701 {
702 emit_RI7(p, 0x12b, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
703 }
704
705
706 /* Hint-for-branch instructions
707 */
708 #if 0
709 hbr;
710 hbra;
711 hbrr;
712 #endif
713
714
715 /* Control instructions
716 */
717 #if 0
718 stop;
719 EMIT_RR (spe_stopd, 0x140);
720 EMIT_ (spe_nop, 0x201);
721 sync;
722 EMIT_ (spe_dsync, 0x003);
723 EMIT_R (spe_mfspr, 0x00c);
724 EMIT_R (spe_mtspr, 0x10c);
725 #endif
726
727
728 /**
729 ** Helper / "macro" instructions.
730 ** Use somewhat verbose names as a reminder that these aren't native
731 ** SPE instructions.
732 **/
733
734
735 void
736 spe_load_float(struct spe_function *p, unsigned rT, float x)
737 {
738 if (x == 0.0f) {
739 spe_il(p, rT, 0x0);
740 }
741 else if (x == 0.5f) {
742 spe_ilhu(p, rT, 0x3f00);
743 }
744 else if (x == 1.0f) {
745 spe_ilhu(p, rT, 0x3f80);
746 }
747 else if (x == -1.0f) {
748 spe_ilhu(p, rT, 0xbf80);
749 }
750 else {
751 union {
752 float f;
753 unsigned u;
754 } bits;
755 bits.f = x;
756 spe_ilhu(p, rT, bits.u >> 16);
757 spe_iohl(p, rT, bits.u & 0xffff);
758 }
759 }
760
761
762 void
763 spe_load_int(struct spe_function *p, unsigned rT, int i)
764 {
765 if (-32768 <= i && i <= 32767) {
766 spe_il(p, rT, i);
767 }
768 else {
769 spe_ilhu(p, rT, i >> 16);
770 if (i & 0xffff)
771 spe_iohl(p, rT, i & 0xffff);
772 }
773 }
774
775 void spe_load_uint(struct spe_function *p, unsigned rT, unsigned int ui)
776 {
777 /* If the whole value is in the lower 18 bits, use ila, which
778 * doesn't sign-extend. Otherwise, if the two halfwords of
779 * the constant are identical, use ilh. Otherwise, if every byte of
780 * the desired value is 0x00 or 0xff, we can use Form Select Mask for
781 * Bytes Immediate (fsmbi) to load the value in a single instruction.
782 * Otherwise, in the general case, we have to use ilhu followed by iohl.
783 */
784 if ((ui & 0x0003ffff) == ui) {
785 spe_ila(p, rT, ui);
786 }
787 else if ((ui >> 16) == (ui & 0xffff)) {
788 spe_ilh(p, rT, ui & 0xffff);
789 }
790 else if (
791 ((ui & 0x000000ff) == 0 || (ui & 0x000000ff) == 0x000000ff) &&
792 ((ui & 0x0000ff00) == 0 || (ui & 0x0000ff00) == 0x0000ff00) &&
793 ((ui & 0x00ff0000) == 0 || (ui & 0x00ff0000) == 0x00ff0000) &&
794 ((ui & 0xff000000) == 0 || (ui & 0xff000000) == 0xff000000)
795 ) {
796 unsigned int mask = 0;
797 /* fsmbi duplicates each bit in the given mask eight times,
798 * using a 16-bit value to initialize a 16-byte quadword.
799 * Each 4-bit nybble of the mask corresponds to a full word
800 * of the result; look at the value and figure out the mask
801 * (replicated for each word in the quadword), and then
802 * form the "select mask" to get the value.
803 */
804 if ((ui & 0x000000ff) == 0x000000ff) mask |= 0x1111;
805 if ((ui & 0x0000ff00) == 0x0000ff00) mask |= 0x2222;
806 if ((ui & 0x00ff0000) == 0x00ff0000) mask |= 0x4444;
807 if ((ui & 0xff000000) == 0xff000000) mask |= 0x8888;
808 spe_fsmbi(p, rT, mask);
809 }
810 else {
811 /* The general case: this usually uses two instructions, but
812 * may use only one if the low-order 16 bits of each word are 0.
813 */
814 spe_ilhu(p, rT, ui >> 16);
815 if (ui & 0xffff)
816 spe_iohl(p, rT, ui & 0xffff);
817 }
818 }
819
820 /**
821 * This function is constructed identically to spe_xor_uint() below.
822 * Changes to one should be made in the other.
823 */
824 void
825 spe_and_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
826 {
827 /* If we can, emit a single instruction, either And Byte Immediate
828 * (which uses the same constant across each byte), And Halfword Immediate
829 * (which sign-extends a 10-bit immediate to 16 bits and uses that
830 * across each halfword), or And Word Immediate (which sign-extends
831 * a 10-bit immediate to 32 bits).
832 *
833 * Otherwise, we'll need to use a temporary register.
834 */
835 unsigned int tmp;
836
837 /* If the upper 23 bits are all 0s or all 1s, sign extension
838 * will work and we can use And Word Immediate
839 */
840 tmp = ui & 0xfffffe00;
841 if (tmp == 0xfffffe00 || tmp == 0) {
842 spe_andi(p, rT, rA, ui & 0x000003ff);
843 return;
844 }
845
846 /* If the ui field is symmetric along halfword boundaries and
847 * the upper 7 bits of each halfword are all 0s or 1s, we
848 * can use And Halfword Immediate
849 */
850 tmp = ui & 0xfe00fe00;
851 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
852 spe_andhi(p, rT, rA, ui & 0x000003ff);
853 return;
854 }
855
856 /* If the ui field is symmetric in each byte, then we can use
857 * the And Byte Immediate instruction.
858 */
859 tmp = ui & 0x000000ff;
860 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
861 spe_andbi(p, rT, rA, tmp);
862 return;
863 }
864
865 /* Otherwise, we'll have to use a temporary register. */
866 unsigned int tmp_reg = spe_allocate_available_register(p);
867 spe_load_uint(p, tmp_reg, ui);
868 spe_and(p, rT, rA, tmp_reg);
869 spe_release_register(p, tmp_reg);
870 }
871
872
873 /**
874 * This function is constructed identically to spe_and_uint() above.
875 * Changes to one should be made in the other.
876 */
877 void
878 spe_xor_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
879 {
880 /* If we can, emit a single instruction, either Exclusive Or Byte
881 * Immediate (which uses the same constant across each byte), Exclusive
882 * Or Halfword Immediate (which sign-extends a 10-bit immediate to
883 * 16 bits and uses that across each halfword), or Exclusive Or Word
884 * Immediate (which sign-extends a 10-bit immediate to 32 bits).
885 *
886 * Otherwise, we'll need to use a temporary register.
887 */
888 unsigned int tmp;
889
890 /* If the upper 23 bits are all 0s or all 1s, sign extension
891 * will work and we can use Exclusive Or Word Immediate
892 */
893 tmp = ui & 0xfffffe00;
894 if (tmp == 0xfffffe00 || tmp == 0) {
895 spe_xori(p, rT, rA, ui & 0x000003ff);
896 return;
897 }
898
899 /* If the ui field is symmetric along halfword boundaries and
900 * the upper 7 bits of each halfword are all 0s or 1s, we
901 * can use Exclusive Or Halfword Immediate
902 */
903 tmp = ui & 0xfe00fe00;
904 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
905 spe_xorhi(p, rT, rA, ui & 0x000003ff);
906 return;
907 }
908
909 /* If the ui field is symmetric in each byte, then we can use
910 * the Exclusive Or Byte Immediate instruction.
911 */
912 tmp = ui & 0x000000ff;
913 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
914 spe_xorbi(p, rT, rA, tmp);
915 return;
916 }
917
918 /* Otherwise, we'll have to use a temporary register. */
919 unsigned int tmp_reg = spe_allocate_available_register(p);
920 spe_load_uint(p, tmp_reg, ui);
921 spe_xor(p, rT, rA, tmp_reg);
922 spe_release_register(p, tmp_reg);
923 }
924
925 void
926 spe_compare_equal_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
927 {
928 /* If the comparison value is 9 bits or less, it fits inside a
929 * Compare Equal Word Immediate instruction.
930 */
931 if ((ui & 0x000001ff) == ui) {
932 spe_ceqi(p, rT, rA, ui);
933 }
934 /* Otherwise, we're going to have to load a word first. */
935 else {
936 unsigned int tmp_reg = spe_allocate_available_register(p);
937 spe_load_uint(p, tmp_reg, ui);
938 spe_ceq(p, rT, rA, tmp_reg);
939 spe_release_register(p, tmp_reg);
940 }
941 }
942
943 void
944 spe_compare_greater_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
945 {
946 /* If the comparison value is 10 bits or less, it fits inside a
947 * Compare Logical Greater Than Word Immediate instruction.
948 */
949 if ((ui & 0x000003ff) == ui) {
950 spe_clgti(p, rT, rA, ui);
951 }
952 /* Otherwise, we're going to have to load a word first. */
953 else {
954 unsigned int tmp_reg = spe_allocate_available_register(p);
955 spe_load_uint(p, tmp_reg, ui);
956 spe_clgt(p, rT, rA, tmp_reg);
957 spe_release_register(p, tmp_reg);
958 }
959 }
960
961 void
962 spe_splat(struct spe_function *p, unsigned rT, unsigned rA)
963 {
964 /* Use a temporary, just in case rT == rA */
965 unsigned int tmp_reg = spe_allocate_available_register(p);
966 /* Duplicate bytes 0, 1, 2, and 3 across the whole register */
967 spe_ila(p, tmp_reg, 0x00010203);
968 spe_shufb(p, rT, rA, rA, tmp_reg);
969 spe_release_register(p, tmp_reg);
970 }
971
972
973 void
974 spe_complement(struct spe_function *p, unsigned rT, unsigned rA)
975 {
976 spe_nor(p, rT, rA, rA);
977 }
978
979
980 void
981 spe_move(struct spe_function *p, unsigned rT, unsigned rA)
982 {
983 /* Use different instructions depending on the instruction address
984 * to take advantage of the dual pipelines.
985 */
986 if (p->num_inst & 1)
987 spe_shlqbyi(p, rT, rA, 0); /* odd pipe */
988 else
989 spe_ori(p, rT, rA, 0); /* even pipe */
990 }
991
992
993 void
994 spe_zero(struct spe_function *p, unsigned rT)
995 {
996 spe_xor(p, rT, rT, rT);
997 }
998
999
1000 void
1001 spe_splat_word(struct spe_function *p, unsigned rT, unsigned rA, int word)
1002 {
1003 assert(word >= 0);
1004 assert(word <= 3);
1005
1006 if (word == 0) {
1007 int tmp1 = rT;
1008 spe_ila(p, tmp1, 66051);
1009 spe_shufb(p, rT, rA, rA, tmp1);
1010 }
1011 else {
1012 /* XXX review this, we may not need the rotqbyi instruction */
1013 int tmp1 = rT;
1014 int tmp2 = spe_allocate_available_register(p);
1015
1016 spe_ila(p, tmp1, 66051);
1017 spe_rotqbyi(p, tmp2, rA, 4 * word);
1018 spe_shufb(p, rT, tmp2, tmp2, tmp1);
1019
1020 spe_release_register(p, tmp2);
1021 }
1022 }
1023
1024 /**
1025 * For each 32-bit float element of rA and rB, choose the smaller of the
1026 * two, compositing them into the rT register.
1027 *
1028 * The Float Compare Greater Than (fcgt) instruction will put 1s into
1029 * compare_reg where rA > rB, and 0s where rA <= rB.
1030 *
1031 * Then the Select Bits (selb) instruction will take bits from rA where
1032 * compare_reg is 0, and from rB where compare_reg is 1; i.e., from rA
1033 * where rA <= rB and from rB where rB > rA, which is exactly the
1034 * "min" operation.
1035 *
1036 * The compare_reg could in many cases be the same as rT, unless
1037 * rT == rA || rt == rB. But since this is common in constructions
1038 * like "x = min(x, a)", we always allocate a new register to be safe.
1039 */
1040 void
1041 spe_float_min(struct spe_function *p, unsigned rT, unsigned rA, unsigned rB)
1042 {
1043 unsigned int compare_reg = spe_allocate_available_register(p);
1044 spe_fcgt(p, compare_reg, rA, rB);
1045 spe_selb(p, rT, rA, rB, compare_reg);
1046 spe_release_register(p, compare_reg);
1047 }
1048
1049 /**
1050 * For each 32-bit float element of rA and rB, choose the greater of the
1051 * two, compositing them into the rT register.
1052 *
1053 * The logic is similar to that of spe_float_min() above; the only
1054 * difference is that the registers on spe_selb() have been reversed,
1055 * so that the larger of the two is selected instead of the smaller.
1056 */
1057 void
1058 spe_float_max(struct spe_function *p, unsigned rT, unsigned rA, unsigned rB)
1059 {
1060 unsigned int compare_reg = spe_allocate_available_register(p);
1061 spe_fcgt(p, compare_reg, rA, rB);
1062 spe_selb(p, rT, rB, rA, compare_reg);
1063 spe_release_register(p, compare_reg);
1064 }
1065
1066 #endif /* GALLIUM_CELL */