cell: additional 'offset' checking in spe_lqd(), spe_stqd()
[mesa.git] / src / gallium / auxiliary / rtasm / rtasm_ppc_spe.c
1 /*
2 * (C) Copyright IBM Corporation 2008
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 * Real-time assembly generation interface for Cell B.E. SPEs.
28 *
29 * \author Ian Romanick <idr@us.ibm.com>
30 * \author Brian Paul
31 */
32
33
34 #include <stdio.h>
35 #include "pipe/p_compiler.h"
36 #include "util/u_memory.h"
37 #include "rtasm_ppc_spe.h"
38
39
40 #ifdef GALLIUM_CELL
41 /**
42 * SPE instruction types
43 *
44 * There are 6 primary instruction encodings used on the Cell's SPEs. Each of
45 * the following unions encodes one type.
46 *
47 * \bug
48 * If, at some point, we start generating SPE code from a little-endian host
49 * these unions will not work.
50 */
51 /*@{*/
52 /**
53 * Encode one output register with two input registers
54 */
55 union spe_inst_RR {
56 uint32_t bits;
57 struct {
58 unsigned op:11;
59 unsigned rB:7;
60 unsigned rA:7;
61 unsigned rT:7;
62 } inst;
63 };
64
65
66 /**
67 * Encode one output register with three input registers
68 */
69 union spe_inst_RRR {
70 uint32_t bits;
71 struct {
72 unsigned op:4;
73 unsigned rT:7;
74 unsigned rB:7;
75 unsigned rA:7;
76 unsigned rC:7;
77 } inst;
78 };
79
80
81 /**
82 * Encode one output register with one input reg. and a 7-bit signed immed
83 */
84 union spe_inst_RI7 {
85 uint32_t bits;
86 struct {
87 unsigned op:11;
88 unsigned i7:7;
89 unsigned rA:7;
90 unsigned rT:7;
91 } inst;
92 };
93
94
95 /**
96 * Encode one output register with one input reg. and an 8-bit signed immed
97 */
98 union spe_inst_RI8 {
99 uint32_t bits;
100 struct {
101 unsigned op:10;
102 unsigned i8:8;
103 unsigned rA:7;
104 unsigned rT:7;
105 } inst;
106 };
107
108
109 /**
110 * Encode one output register with one input reg. and a 10-bit signed immed
111 */
112 union spe_inst_RI10 {
113 uint32_t bits;
114 struct {
115 unsigned op:8;
116 unsigned i10:10;
117 unsigned rA:7;
118 unsigned rT:7;
119 } inst;
120 };
121
122
123 /**
124 * Encode one output register with a 16-bit signed immediate
125 */
126 union spe_inst_RI16 {
127 uint32_t bits;
128 struct {
129 unsigned op:9;
130 unsigned i16:16;
131 unsigned rT:7;
132 } inst;
133 };
134
135
136 /**
137 * Encode one output register with a 18-bit signed immediate
138 */
139 union spe_inst_RI18 {
140 uint32_t bits;
141 struct {
142 unsigned op:7;
143 unsigned i18:18;
144 unsigned rT:7;
145 } inst;
146 };
147 /*@}*/
148
149
150 static void
151 indent(const struct spe_function *p)
152 {
153 int i;
154 for (i = 0; i < p->indent; i++) {
155 putchar(' ');
156 }
157 }
158
159
160 static const char *
161 rem_prefix(const char *longname)
162 {
163 return longname + 4;
164 }
165
166
167 static const char *
168 reg_name(int reg)
169 {
170 switch (reg) {
171 case SPE_REG_SP:
172 return "$sp";
173 case SPE_REG_RA:
174 return "$lr";
175 default:
176 {
177 /* cycle through four buffers to handle multiple calls per printf */
178 static char buf[4][10];
179 static int b = 0;
180 b = (b + 1) % 4;
181 sprintf(buf[b], "$%d", reg);
182 return buf[b];
183 }
184 }
185 }
186
187
188 static void emit_RR(struct spe_function *p, unsigned op, unsigned rT,
189 unsigned rA, unsigned rB, const char *name)
190 {
191 union spe_inst_RR inst;
192 inst.inst.op = op;
193 inst.inst.rB = rB;
194 inst.inst.rA = rA;
195 inst.inst.rT = rT;
196 p->store[p->num_inst++] = inst.bits;
197 assert(p->num_inst <= p->max_inst);
198 if (p->print) {
199 indent(p);
200 printf("%s\t%s, %s, %s\n",
201 rem_prefix(name), reg_name(rT), reg_name(rA), reg_name(rB));
202 }
203 }
204
205
206 static void emit_RRR(struct spe_function *p, unsigned op, unsigned rT,
207 unsigned rA, unsigned rB, unsigned rC, const char *name)
208 {
209 union spe_inst_RRR inst;
210 inst.inst.op = op;
211 inst.inst.rT = rT;
212 inst.inst.rB = rB;
213 inst.inst.rA = rA;
214 inst.inst.rC = rC;
215 p->store[p->num_inst++] = inst.bits;
216 assert(p->num_inst <= p->max_inst);
217 if (p->print) {
218 indent(p);
219 printf("%s\t%s, %s, %s, %s\n", rem_prefix(name), reg_name(rT),
220 reg_name(rA), reg_name(rB), reg_name(rC));
221 }
222 }
223
224
225 static void emit_RI7(struct spe_function *p, unsigned op, unsigned rT,
226 unsigned rA, int imm, const char *name)
227 {
228 union spe_inst_RI7 inst;
229 inst.inst.op = op;
230 inst.inst.i7 = imm;
231 inst.inst.rA = rA;
232 inst.inst.rT = rT;
233 p->store[p->num_inst++] = inst.bits;
234 assert(p->num_inst <= p->max_inst);
235 if (p->print) {
236 indent(p);
237 printf("%s\t%s, %s, 0x%x\n",
238 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
239 }
240 }
241
242
243
244 static void emit_RI8(struct spe_function *p, unsigned op, unsigned rT,
245 unsigned rA, int imm, const char *name)
246 {
247 union spe_inst_RI8 inst;
248 inst.inst.op = op;
249 inst.inst.i8 = imm;
250 inst.inst.rA = rA;
251 inst.inst.rT = rT;
252 p->store[p->num_inst++] = inst.bits;
253 assert(p->num_inst <= p->max_inst);
254 if (p->print) {
255 indent(p);
256 printf("%s\t%s, %s, 0x%x\n",
257 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
258 }
259 }
260
261
262
263 static void emit_RI10(struct spe_function *p, unsigned op, unsigned rT,
264 unsigned rA, int imm, const char *name)
265 {
266 union spe_inst_RI10 inst;
267 inst.inst.op = op;
268 inst.inst.i10 = imm;
269 inst.inst.rA = rA;
270 inst.inst.rT = rT;
271 p->store[p->num_inst++] = inst.bits;
272 assert(p->num_inst <= p->max_inst);
273 if (p->print) {
274 indent(p);
275 printf("%s\t%s, %s, 0x%x\n",
276 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
277 }
278 }
279
280
281 static void emit_RI16(struct spe_function *p, unsigned op, unsigned rT,
282 int imm, const char *name)
283 {
284 union spe_inst_RI16 inst;
285 inst.inst.op = op;
286 inst.inst.i16 = imm;
287 inst.inst.rT = rT;
288 p->store[p->num_inst++] = inst.bits;
289 assert(p->num_inst <= p->max_inst);
290 if (p->print) {
291 indent(p);
292 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
293 }
294 }
295
296
297 static void emit_RI18(struct spe_function *p, unsigned op, unsigned rT,
298 int imm, const char *name)
299 {
300 union spe_inst_RI18 inst;
301 inst.inst.op = op;
302 inst.inst.i18 = imm;
303 inst.inst.rT = rT;
304 p->store[p->num_inst++] = inst.bits;
305 assert(p->num_inst <= p->max_inst);
306 if (p->print) {
307 indent(p);
308 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
309 }
310 }
311
312
313
314
315 #define EMIT_(_name, _op) \
316 void _name (struct spe_function *p, unsigned rT) \
317 { \
318 emit_RR(p, _op, rT, 0, 0, __FUNCTION__); \
319 }
320
321 #define EMIT_R(_name, _op) \
322 void _name (struct spe_function *p, unsigned rT, unsigned rA) \
323 { \
324 emit_RR(p, _op, rT, rA, 0, __FUNCTION__); \
325 }
326
327 #define EMIT_RR(_name, _op) \
328 void _name (struct spe_function *p, unsigned rT, unsigned rA, unsigned rB) \
329 { \
330 emit_RR(p, _op, rT, rA, rB, __FUNCTION__); \
331 }
332
333 #define EMIT_RRR(_name, _op) \
334 void _name (struct spe_function *p, unsigned rT, unsigned rA, unsigned rB, unsigned rC) \
335 { \
336 emit_RRR(p, _op, rT, rA, rB, rC, __FUNCTION__); \
337 }
338
339 #define EMIT_RI7(_name, _op) \
340 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
341 { \
342 emit_RI7(p, _op, rT, rA, imm, __FUNCTION__); \
343 }
344
345 #define EMIT_RI8(_name, _op, bias) \
346 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
347 { \
348 emit_RI8(p, _op, rT, rA, bias - imm, __FUNCTION__); \
349 }
350
351 #define EMIT_RI10(_name, _op) \
352 void _name (struct spe_function *p, unsigned rT, unsigned rA, int imm) \
353 { \
354 emit_RI10(p, _op, rT, rA, imm, __FUNCTION__); \
355 }
356
357 #define EMIT_RI16(_name, _op) \
358 void _name (struct spe_function *p, unsigned rT, int imm) \
359 { \
360 emit_RI16(p, _op, rT, imm, __FUNCTION__); \
361 }
362
363 #define EMIT_RI18(_name, _op) \
364 void _name (struct spe_function *p, unsigned rT, int imm) \
365 { \
366 emit_RI18(p, _op, rT, imm, __FUNCTION__); \
367 }
368
369 #define EMIT_I16(_name, _op) \
370 void _name (struct spe_function *p, int imm) \
371 { \
372 emit_RI16(p, _op, 0, imm, __FUNCTION__); \
373 }
374
375 #include "rtasm_ppc_spe.h"
376
377
378
379 /**
380 * Initialize an spe_function.
381 * \param code_size size of instruction buffer to allocate, in bytes.
382 */
383 void spe_init_func(struct spe_function *p, unsigned code_size)
384 {
385 unsigned int i;
386
387 p->store = align_malloc(code_size, 16);
388 p->num_inst = 0;
389 p->max_inst = code_size / SPE_INST_SIZE;
390
391 p->set_count = 0;
392 memset(p->regs, 0, SPE_NUM_REGS * sizeof(p->regs[0]));
393
394 /* Conservatively treat R0 - R2 and R80 - R127 as non-volatile.
395 */
396 p->regs[0] = p->regs[1] = p->regs[2] = 1;
397 for (i = 80; i <= 127; i++) {
398 p->regs[i] = 1;
399 }
400
401 p->print = false;
402 p->indent = 0;
403 }
404
405
406 void spe_release_func(struct spe_function *p)
407 {
408 assert(p->num_inst <= p->max_inst);
409 if (p->store != NULL) {
410 align_free(p->store);
411 }
412 p->store = NULL;
413 }
414
415
416 /** Return current code size in bytes. */
417 unsigned spe_code_size(const struct spe_function *p)
418 {
419 return p->num_inst * SPE_INST_SIZE;
420 }
421
422
423 /**
424 * Allocate a SPE register.
425 * \return register index or -1 if none left.
426 */
427 int spe_allocate_available_register(struct spe_function *p)
428 {
429 unsigned i;
430 for (i = 0; i < SPE_NUM_REGS; i++) {
431 if (p->regs[i] == 0) {
432 p->regs[i] = 1;
433 return i;
434 }
435 }
436
437 return -1;
438 }
439
440
441 /**
442 * Mark the given SPE register as "allocated".
443 */
444 int spe_allocate_register(struct spe_function *p, int reg)
445 {
446 assert(reg < SPE_NUM_REGS);
447 assert(p->regs[reg] == 0);
448 p->regs[reg] = 1;
449 return reg;
450 }
451
452
453 /**
454 * Mark the given SPE register as "unallocated". Note that this should
455 * only be used on registers allocated in the current register set; an
456 * assertion will fail if an attempt is made to deallocate a register
457 * allocated in an earlier register set.
458 */
459 void spe_release_register(struct spe_function *p, int reg)
460 {
461 assert(reg < SPE_NUM_REGS);
462 assert(p->regs[reg] == 1);
463
464 p->regs[reg] = 0;
465 }
466
467 /**
468 * Start a new set of registers. This can be called if
469 * it will be difficult later to determine exactly what
470 * registers were actually allocated during a code generation
471 * sequence, and you really just want to deallocate all of them.
472 */
473 void spe_allocate_register_set(struct spe_function *p)
474 {
475 unsigned int i;
476
477 /* Keep track of the set count. If it ever wraps around to 0,
478 * we're in trouble.
479 */
480 p->set_count++;
481 assert(p->set_count > 0);
482
483 /* Increment the allocation count of all registers currently
484 * allocated. Then any registers that are allocated in this set
485 * will be the only ones with a count of 1; they'll all be released
486 * when the register set is released.
487 */
488 for (i = 0; i < SPE_NUM_REGS; i++) {
489 if (p->regs[i] > 0)
490 p->regs[i]++;
491 }
492 }
493
494 void spe_release_register_set(struct spe_function *p)
495 {
496 unsigned int i;
497
498 /* If the set count drops below zero, we're in trouble. */
499 assert(p->set_count > 0);
500 p->set_count--;
501
502 /* Drop the allocation level of all registers. Any allocated
503 * during this register set will drop to 0 and then become
504 * available.
505 */
506 for (i = 0; i < SPE_NUM_REGS; i++) {
507 if (p->regs[i] > 0)
508 p->regs[i]--;
509 }
510 }
511
512
513 unsigned
514 spe_get_registers_used(const struct spe_function *p, ubyte used[])
515 {
516 unsigned i, num = 0;
517 /* only count registers in the range available to callers */
518 for (i = 2; i < 80; i++) {
519 if (p->regs[i]) {
520 used[num++] = i;
521 }
522 }
523 return num;
524 }
525
526
527 void
528 spe_print_code(struct spe_function *p, boolean enable)
529 {
530 p->print = enable;
531 }
532
533
534 void
535 spe_indent(struct spe_function *p, int spaces)
536 {
537 p->indent += spaces;
538 }
539
540
541 void
542 spe_comment(struct spe_function *p, int rel_indent, const char *s)
543 {
544 if (p->print) {
545 p->indent += rel_indent;
546 indent(p);
547 p->indent -= rel_indent;
548 printf("# %s\n", s);
549 }
550 }
551
552
553 /**
554 * Load quad word.
555 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
556 */
557 void spe_lqd(struct spe_function *p, unsigned rT, unsigned rA, int offset)
558 {
559 const boolean pSave = p->print;
560
561 /* offset must be a multiple of 16 */
562 assert(offset % 16 == 0);
563 /* offset must fit in 10-bit signed int field, after shifting */
564 assert((offset >> 4) <= 511);
565 assert((offset >> 4) >= -512);
566
567 p->print = FALSE;
568 emit_RI10(p, 0x034, rT, rA, offset >> 4, "spe_lqd");
569 p->print = pSave;
570
571 if (p->print) {
572 indent(p);
573 printf("lqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
574 }
575 }
576
577
578 /**
579 * Store quad word.
580 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
581 */
582 void spe_stqd(struct spe_function *p, unsigned rT, unsigned rA, int offset)
583 {
584 const boolean pSave = p->print;
585
586 /* offset must be a multiple of 16 */
587 assert(offset % 16 == 0);
588 /* offset must fit in 10-bit signed int field, after shifting */
589 assert((offset >> 4) <= 511);
590 assert((offset >> 4) >= -512);
591
592 p->print = FALSE;
593 emit_RI10(p, 0x024, rT, rA, offset >> 4, "spe_stqd");
594 p->print = pSave;
595
596 if (p->print) {
597 indent(p);
598 printf("stqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
599 }
600 }
601
602
603 /**
604 * For branch instructions:
605 * \param d if 1, disable interupts if branch is taken
606 * \param e if 1, enable interupts if branch is taken
607 * If d and e are both zero, don't change interupt status (right?)
608 */
609
610 /** Branch Indirect to address in rA */
611 void spe_bi(struct spe_function *p, unsigned rA, int d, int e)
612 {
613 emit_RI7(p, 0x1a8, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
614 }
615
616 /** Interupt Return */
617 void spe_iret(struct spe_function *p, unsigned rA, int d, int e)
618 {
619 emit_RI7(p, 0x1aa, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
620 }
621
622 /** Branch indirect and set link on external data */
623 void spe_bisled(struct spe_function *p, unsigned rT, unsigned rA, int d,
624 int e)
625 {
626 emit_RI7(p, 0x1ab, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
627 }
628
629 /** Branch indirect and set link. Save PC in rT, jump to rA. */
630 void spe_bisl(struct spe_function *p, unsigned rT, unsigned rA, int d,
631 int e)
632 {
633 emit_RI7(p, 0x1a9, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
634 }
635
636 /** Branch indirect if zero word. If rT.word[0]==0, jump to rA. */
637 void spe_biz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
638 {
639 emit_RI7(p, 0x128, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
640 }
641
642 /** Branch indirect if non-zero word. If rT.word[0]!=0, jump to rA. */
643 void spe_binz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
644 {
645 emit_RI7(p, 0x129, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
646 }
647
648 /** Branch indirect if zero halfword. If rT.halfword[1]==0, jump to rA. */
649 void spe_bihz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
650 {
651 emit_RI7(p, 0x12a, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
652 }
653
654 /** Branch indirect if non-zero halfword. If rT.halfword[1]!=0, jump to rA. */
655 void spe_bihnz(struct spe_function *p, unsigned rT, unsigned rA, int d, int e)
656 {
657 emit_RI7(p, 0x12b, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
658 }
659
660
661 /* Hint-for-branch instructions
662 */
663 #if 0
664 hbr;
665 hbra;
666 hbrr;
667 #endif
668
669
670 /* Control instructions
671 */
672 #if 0
673 stop;
674 EMIT_RR (spe_stopd, 0x140);
675 EMIT_ (spe_lnop, 0x001);
676 EMIT_ (spe_nop, 0x201);
677 sync;
678 EMIT_ (spe_dsync, 0x003);
679 EMIT_R (spe_mfspr, 0x00c);
680 EMIT_R (spe_mtspr, 0x10c);
681 #endif
682
683
684 /**
685 ** Helper / "macro" instructions.
686 ** Use somewhat verbose names as a reminder that these aren't native
687 ** SPE instructions.
688 **/
689
690
691 void
692 spe_load_float(struct spe_function *p, unsigned rT, float x)
693 {
694 if (x == 0.0f) {
695 spe_il(p, rT, 0x0);
696 }
697 else if (x == 0.5f) {
698 spe_ilhu(p, rT, 0x3f00);
699 }
700 else if (x == 1.0f) {
701 spe_ilhu(p, rT, 0x3f80);
702 }
703 else if (x == -1.0f) {
704 spe_ilhu(p, rT, 0xbf80);
705 }
706 else {
707 union {
708 float f;
709 unsigned u;
710 } bits;
711 bits.f = x;
712 spe_ilhu(p, rT, bits.u >> 16);
713 spe_iohl(p, rT, bits.u & 0xffff);
714 }
715 }
716
717
718 void
719 spe_load_int(struct spe_function *p, unsigned rT, int i)
720 {
721 if (-32768 <= i && i <= 32767) {
722 spe_il(p, rT, i);
723 }
724 else {
725 spe_ilhu(p, rT, i >> 16);
726 if (i & 0xffff)
727 spe_iohl(p, rT, i & 0xffff);
728 }
729 }
730
731 void spe_load_uint(struct spe_function *p, unsigned rT, unsigned int ui)
732 {
733 /* If the whole value is in the lower 18 bits, use ila, which
734 * doesn't sign-extend. Otherwise, if the two halfwords of
735 * the constant are identical, use ilh. Otherwise, if every byte of
736 * the desired value is 0x00 or 0xff, we can use Form Select Mask for
737 * Bytes Immediate (fsmbi) to load the value in a single instruction.
738 * Otherwise, in the general case, we have to use ilhu followed by iohl.
739 */
740 if ((ui & 0x0003ffff) == ui) {
741 spe_ila(p, rT, ui);
742 }
743 else if ((ui >> 16) == (ui & 0xffff)) {
744 spe_ilh(p, rT, ui & 0xffff);
745 }
746 else if (
747 ((ui & 0x000000ff) == 0 || (ui & 0x000000ff) == 0x000000ff) &&
748 ((ui & 0x0000ff00) == 0 || (ui & 0x0000ff00) == 0x0000ff00) &&
749 ((ui & 0x00ff0000) == 0 || (ui & 0x00ff0000) == 0x00ff0000) &&
750 ((ui & 0xff000000) == 0 || (ui & 0xff000000) == 0xff000000)
751 ) {
752 unsigned int mask = 0;
753 /* fsmbi duplicates each bit in the given mask eight times,
754 * using a 16-bit value to initialize a 16-byte quadword.
755 * Each 4-bit nybble of the mask corresponds to a full word
756 * of the result; look at the value and figure out the mask
757 * (replicated for each word in the quadword), and then
758 * form the "select mask" to get the value.
759 */
760 if ((ui & 0x000000ff) == 0x000000ff) mask |= 0x1111;
761 if ((ui & 0x0000ff00) == 0x0000ff00) mask |= 0x2222;
762 if ((ui & 0x00ff0000) == 0x00ff0000) mask |= 0x4444;
763 if ((ui & 0xff000000) == 0xff000000) mask |= 0x8888;
764 spe_fsmbi(p, rT, mask);
765 }
766 else {
767 /* The general case: this usually uses two instructions, but
768 * may use only one if the low-order 16 bits of each word are 0.
769 */
770 spe_ilhu(p, rT, ui >> 16);
771 if (ui & 0xffff)
772 spe_iohl(p, rT, ui & 0xffff);
773 }
774 }
775
776 /**
777 * This function is constructed identically to spe_xor_uint() below.
778 * Changes to one should be made in the other.
779 */
780 void
781 spe_and_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
782 {
783 /* If we can, emit a single instruction, either And Byte Immediate
784 * (which uses the same constant across each byte), And Halfword Immediate
785 * (which sign-extends a 10-bit immediate to 16 bits and uses that
786 * across each halfword), or And Word Immediate (which sign-extends
787 * a 10-bit immediate to 32 bits).
788 *
789 * Otherwise, we'll need to use a temporary register.
790 */
791 unsigned int tmp;
792
793 /* If the upper 23 bits are all 0s or all 1s, sign extension
794 * will work and we can use And Word Immediate
795 */
796 tmp = ui & 0xfffffe00;
797 if (tmp == 0xfffffe00 || tmp == 0) {
798 spe_andi(p, rT, rA, ui & 0x000003ff);
799 return;
800 }
801
802 /* If the ui field is symmetric along halfword boundaries and
803 * the upper 7 bits of each halfword are all 0s or 1s, we
804 * can use And Halfword Immediate
805 */
806 tmp = ui & 0xfe00fe00;
807 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
808 spe_andhi(p, rT, rA, ui & 0x000003ff);
809 return;
810 }
811
812 /* If the ui field is symmetric in each byte, then we can use
813 * the And Byte Immediate instruction.
814 */
815 tmp = ui & 0x000000ff;
816 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
817 spe_andbi(p, rT, rA, tmp);
818 return;
819 }
820
821 /* Otherwise, we'll have to use a temporary register. */
822 unsigned int tmp_reg = spe_allocate_available_register(p);
823 spe_load_uint(p, tmp_reg, ui);
824 spe_and(p, rT, rA, tmp_reg);
825 spe_release_register(p, tmp_reg);
826 }
827
828
829 /**
830 * This function is constructed identically to spe_and_uint() above.
831 * Changes to one should be made in the other.
832 */
833 void
834 spe_xor_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
835 {
836 /* If we can, emit a single instruction, either Exclusive Or Byte
837 * Immediate (which uses the same constant across each byte), Exclusive
838 * Or Halfword Immediate (which sign-extends a 10-bit immediate to
839 * 16 bits and uses that across each halfword), or Exclusive Or Word
840 * Immediate (which sign-extends a 10-bit immediate to 32 bits).
841 *
842 * Otherwise, we'll need to use a temporary register.
843 */
844 unsigned int tmp;
845
846 /* If the upper 23 bits are all 0s or all 1s, sign extension
847 * will work and we can use Exclusive Or Word Immediate
848 */
849 tmp = ui & 0xfffffe00;
850 if (tmp == 0xfffffe00 || tmp == 0) {
851 spe_xori(p, rT, rA, ui & 0x000003ff);
852 return;
853 }
854
855 /* If the ui field is symmetric along halfword boundaries and
856 * the upper 7 bits of each halfword are all 0s or 1s, we
857 * can use Exclusive Or Halfword Immediate
858 */
859 tmp = ui & 0xfe00fe00;
860 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
861 spe_xorhi(p, rT, rA, ui & 0x000003ff);
862 return;
863 }
864
865 /* If the ui field is symmetric in each byte, then we can use
866 * the Exclusive Or Byte Immediate instruction.
867 */
868 tmp = ui & 0x000000ff;
869 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
870 spe_xorbi(p, rT, rA, tmp);
871 return;
872 }
873
874 /* Otherwise, we'll have to use a temporary register. */
875 unsigned int tmp_reg = spe_allocate_available_register(p);
876 spe_load_uint(p, tmp_reg, ui);
877 spe_xor(p, rT, rA, tmp_reg);
878 spe_release_register(p, tmp_reg);
879 }
880
881 void
882 spe_compare_equal_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
883 {
884 /* If the comparison value is 9 bits or less, it fits inside a
885 * Compare Equal Word Immediate instruction.
886 */
887 if ((ui & 0x000001ff) == ui) {
888 spe_ceqi(p, rT, rA, ui);
889 }
890 /* Otherwise, we're going to have to load a word first. */
891 else {
892 unsigned int tmp_reg = spe_allocate_available_register(p);
893 spe_load_uint(p, tmp_reg, ui);
894 spe_ceq(p, rT, rA, tmp_reg);
895 spe_release_register(p, tmp_reg);
896 }
897 }
898
899 void
900 spe_compare_greater_uint(struct spe_function *p, unsigned rT, unsigned rA, unsigned int ui)
901 {
902 /* If the comparison value is 10 bits or less, it fits inside a
903 * Compare Logical Greater Than Word Immediate instruction.
904 */
905 if ((ui & 0x000003ff) == ui) {
906 spe_clgti(p, rT, rA, ui);
907 }
908 /* Otherwise, we're going to have to load a word first. */
909 else {
910 unsigned int tmp_reg = spe_allocate_available_register(p);
911 spe_load_uint(p, tmp_reg, ui);
912 spe_clgt(p, rT, rA, tmp_reg);
913 spe_release_register(p, tmp_reg);
914 }
915 }
916
917 void
918 spe_splat(struct spe_function *p, unsigned rT, unsigned rA)
919 {
920 /* Duplicate bytes 0, 1, 2, and 3 across the whole register */
921 spe_ila(p, rT, 0x00010203);
922 spe_shufb(p, rT, rA, rA, rT);
923 }
924
925
926 void
927 spe_complement(struct spe_function *p, unsigned rT, unsigned rA)
928 {
929 spe_nor(p, rT, rA, rA);
930 }
931
932
933 void
934 spe_move(struct spe_function *p, unsigned rT, unsigned rA)
935 {
936 /* Use different instructions depending on the instruction address
937 * to take advantage of the dual pipelines.
938 */
939 if (p->num_inst & 1)
940 spe_shlqbyi(p, rT, rA, 0); /* odd pipe */
941 else
942 spe_ori(p, rT, rA, 0); /* even pipe */
943 }
944
945
946 void
947 spe_zero(struct spe_function *p, unsigned rT)
948 {
949 spe_xor(p, rT, rT, rT);
950 }
951
952
953 void
954 spe_splat_word(struct spe_function *p, unsigned rT, unsigned rA, int word)
955 {
956 assert(word >= 0);
957 assert(word <= 3);
958
959 if (word == 0) {
960 int tmp1 = rT;
961 spe_ila(p, tmp1, 66051);
962 spe_shufb(p, rT, rA, rA, tmp1);
963 }
964 else {
965 /* XXX review this, we may not need the rotqbyi instruction */
966 int tmp1 = rT;
967 int tmp2 = spe_allocate_available_register(p);
968
969 spe_ila(p, tmp1, 66051);
970 spe_rotqbyi(p, tmp2, rA, 4 * word);
971 spe_shufb(p, rT, tmp2, tmp2, tmp1);
972
973 spe_release_register(p, tmp2);
974 }
975 }
976
977 /**
978 * For each 32-bit float element of rA and rB, choose the smaller of the
979 * two, compositing them into the rT register.
980 *
981 * The Float Compare Greater Than (fcgt) instruction will put 1s into
982 * compare_reg where rA > rB, and 0s where rA <= rB.
983 *
984 * Then the Select Bits (selb) instruction will take bits from rA where
985 * compare_reg is 0, and from rB where compare_reg is 1; i.e., from rA
986 * where rA <= rB and from rB where rB > rA, which is exactly the
987 * "min" operation.
988 *
989 * The compare_reg could in many cases be the same as rT, unless
990 * rT == rA || rt == rB. But since this is common in constructions
991 * like "x = min(x, a)", we always allocate a new register to be safe.
992 */
993 void
994 spe_float_min(struct spe_function *p, unsigned rT, unsigned rA, unsigned rB)
995 {
996 unsigned int compare_reg = spe_allocate_available_register(p);
997 spe_fcgt(p, compare_reg, rA, rB);
998 spe_selb(p, rT, rA, rB, compare_reg);
999 spe_release_register(p, compare_reg);
1000 }
1001
1002 /**
1003 * For each 32-bit float element of rA and rB, choose the greater of the
1004 * two, compositing them into the rT register.
1005 *
1006 * The logic is similar to that of spe_float_min() above; the only
1007 * difference is that the registers on spe_selb() have been reversed,
1008 * so that the larger of the two is selected instead of the smaller.
1009 */
1010 void
1011 spe_float_max(struct spe_function *p, unsigned rT, unsigned rA, unsigned rB)
1012 {
1013 unsigned int compare_reg = spe_allocate_available_register(p);
1014 spe_fcgt(p, compare_reg, rA, rB);
1015 spe_selb(p, rT, rB, rA, compare_reg);
1016 spe_release_register(p, compare_reg);
1017 }
1018
1019 #endif /* GALLIUM_CELL */