dd574ac02a65c6a54c04fbcc1e4e978f2cbf746b
[mesa.git] / src / gallium / auxiliary / tgsi / tgsi_ppc.c
1 /**************************************************************************
2 *
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * TGSI to PowerPC code generation.
30 */
31
32 #include "pipe/p_config.h"
33
34 #if defined(PIPE_ARCH_PPC)
35
36 #include "pipe/p_debug.h"
37 #include "pipe/p_shader_tokens.h"
38 #include "util/u_math.h"
39 #include "util/u_memory.h"
40 #include "util/u_sse.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_util.h"
43 #include "tgsi_dump.h"
44 #include "tgsi_exec.h"
45 #include "tgsi_ppc.h"
46 #include "rtasm/rtasm_ppc.h"
47
48
49 /**
50 * Since it's pretty much impossible to form PPC vector immediates, load
51 * them from memory here:
52 */
53 const float ppc_builtin_constants[] ALIGN16_ATTRIB = {
54 1.0f, -128.0f, 128.0, 0.0
55 };
56
57
58 #define FOR_EACH_CHANNEL( CHAN )\
59 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
60
61 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
62 ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
63
64 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
65 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
66
67 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
68 FOR_EACH_CHANNEL( CHAN )\
69 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
70
71 #define CHAN_X 0
72 #define CHAN_Y 1
73 #define CHAN_Z 2
74 #define CHAN_W 3
75
76
77 /**
78 * How many TGSI temps should be implemented with real PPC vector registers
79 * rather than memory.
80 */
81 #define MAX_PPC_TEMPS 4
82
83
84 struct reg_chan_vec
85 {
86 struct tgsi_full_src_register src;
87 uint chan;
88 uint vec;
89 };
90
91
92 /**
93 * Context/state used during code gen.
94 */
95 struct gen_context
96 {
97 struct ppc_function *f;
98 int inputs_reg; /**< GP register pointing to input params */
99 int outputs_reg; /**< GP register pointing to output params */
100 int temps_reg; /**< GP register pointing to temporary "registers" */
101 int immed_reg; /**< GP register pointing to immediates buffer */
102 int const_reg; /**< GP register pointing to constants buffer */
103 int builtins_reg; /**< GP register pointint to built-in constants */
104
105 int offset_reg; /**< used to reduce redundant li instructions */
106 int offset_value;
107
108 int one_vec; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
109 int bit31_vec; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
110
111 /**
112 * Map TGSI temps to PPC vector temps.
113 * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
114 * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
115 */
116 int temps_map[MAX_PPC_TEMPS][4];
117
118 /**
119 * Cache of src registers.
120 * This is used to avoid redundant load instructions.
121 */
122 struct {
123 struct tgsi_full_src_register src;
124 uint chan;
125 uint vec;
126 } regs[12]; /* 3 src regs, 4 channels */
127 uint num_regs;
128 };
129
130
131 /**
132 * Initialize code generation context.
133 */
134 static void
135 init_gen_context(struct gen_context *gen, struct ppc_function *func)
136 {
137 uint i;
138
139 memset(gen, 0, sizeof(*gen));
140 gen->f = func;
141 gen->inputs_reg = ppc_reserve_register(func, 3); /* first function param */
142 gen->outputs_reg = ppc_reserve_register(func, 4); /* second function param */
143 gen->temps_reg = ppc_reserve_register(func, 5); /* ... */
144 gen->immed_reg = ppc_reserve_register(func, 6);
145 gen->const_reg = ppc_reserve_register(func, 7);
146 gen->builtins_reg = ppc_reserve_register(func, 8);
147 gen->one_vec = -1;
148 gen->bit31_vec = -1;
149 gen->offset_reg = -1;
150 gen->offset_value = -9999999;
151 for (i = 0; i < MAX_PPC_TEMPS; i++) {
152 gen->temps_map[i][0] = ppc_allocate_vec_register(gen->f);
153 gen->temps_map[i][1] = ppc_allocate_vec_register(gen->f);
154 gen->temps_map[i][2] = ppc_allocate_vec_register(gen->f);
155 gen->temps_map[i][3] = ppc_allocate_vec_register(gen->f);
156 }
157 }
158
159
160 /**
161 * All PPC vector load/store instructions form an effective address
162 * by adding the contents of two registers. For example:
163 * lvx v2,r8,r9 # v2 = memory[r8 + r9]
164 * stvx v2,r8,r9 # memory[r8 + r9] = v2;
165 * So our lvx/stvx instructions are typically preceded by an 'li' instruction
166 * to load r9 (above) with an immediate (an offset).
167 * This code emits that 'li' instruction, but only if the offset value is
168 * different than the previous 'li'.
169 * This optimization seems to save about 10% in the instruction count.
170 * Note that we need to unconditionally emit an 'li' inside basic blocks
171 * (such as inside loops).
172 */
173 static int
174 emit_li_offset(struct gen_context *gen, int offset)
175 {
176 if (gen->offset_reg <= 0) {
177 /* allocate a GP register for storing load/store offset */
178 gen->offset_reg = ppc_allocate_register(gen->f);
179 }
180
181 /* emit new 'li' if offset is changing */
182 if (gen->offset_value < 0 || gen->offset_value != offset) {
183 gen->offset_value = offset;
184 ppc_li(gen->f, gen->offset_reg, offset);
185 }
186
187 return gen->offset_reg;
188 }
189
190
191 /**
192 * Forces subsequent emit_li_offset() calls to emit an 'li'.
193 * To be called at the top of basic blocks.
194 */
195 static void
196 reset_li_offset(struct gen_context *gen)
197 {
198 gen->offset_value = -9999999;
199 }
200
201
202
203 /**
204 * Load the given vector register with {value, value, value, value}.
205 * The value must be in the ppu_builtin_constants[] array.
206 * We wouldn't need this if there was a simple way to load PPC vector
207 * registers with immediate values!
208 */
209 static void
210 load_constant_vec(struct gen_context *gen, int dst_vec, float value)
211 {
212 uint pos;
213 for (pos = 0; pos < Elements(ppc_builtin_constants); pos++) {
214 if (ppc_builtin_constants[pos] == value) {
215 int offset = pos * 4;
216 int offset_reg = emit_li_offset(gen, offset);
217
218 /* Load 4-byte word into vector register.
219 * The vector slot depends on the effective address we load from.
220 * We know that our builtins start at a 16-byte boundary so we
221 * know that 'swizzle' tells us which vector slot will have the
222 * loaded word. The other vector slots will be undefined.
223 */
224 ppc_lvewx(gen->f, dst_vec, gen->builtins_reg, offset_reg);
225 /* splat word[pos % 4] across the vector reg */
226 ppc_vspltw(gen->f, dst_vec, dst_vec, pos % 4);
227 return;
228 }
229 }
230 assert(0 && "Need to add new constant to ppc_builtin_constants array");
231 }
232
233
234 /**
235 * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
236 */
237 static int
238 gen_one_vec(struct gen_context *gen)
239 {
240 if (gen->one_vec < 0) {
241 gen->one_vec = ppc_allocate_vec_register(gen->f);
242 load_constant_vec(gen, gen->one_vec, 1.0f);
243 }
244 return gen->one_vec;
245 }
246
247 /**
248 * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
249 */
250 static int
251 gen_get_bit31_vec(struct gen_context *gen)
252 {
253 if (gen->bit31_vec < 0) {
254 gen->bit31_vec = ppc_allocate_vec_register(gen->f);
255 ppc_vspltisw(gen->f, gen->bit31_vec, -1);
256 ppc_vslw(gen->f, gen->bit31_vec, gen->bit31_vec, gen->bit31_vec);
257 }
258 return gen->bit31_vec;
259 }
260
261
262 /**
263 * Register fetch. Return PPC vector register with result.
264 */
265 static int
266 emit_fetch(struct gen_context *gen,
267 const struct tgsi_full_src_register *reg,
268 const unsigned chan_index)
269 {
270 uint swizzle = tgsi_util_get_full_src_register_extswizzle(reg, chan_index);
271 int dst_vec = -1;
272
273 switch (swizzle) {
274 case TGSI_EXTSWIZZLE_X:
275 case TGSI_EXTSWIZZLE_Y:
276 case TGSI_EXTSWIZZLE_Z:
277 case TGSI_EXTSWIZZLE_W:
278 switch (reg->SrcRegister.File) {
279 case TGSI_FILE_INPUT:
280 {
281 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
282 int offset_reg = emit_li_offset(gen, offset);
283 dst_vec = ppc_allocate_vec_register(gen->f);
284 ppc_lvx(gen->f, dst_vec, gen->inputs_reg, offset_reg);
285 }
286 break;
287 case TGSI_FILE_TEMPORARY:
288 if (reg->SrcRegister.Index < MAX_PPC_TEMPS) {
289 /* use PPC vec register */
290 dst_vec = gen->temps_map[reg->SrcRegister.Index][swizzle];
291 }
292 else {
293 /* use memory-based temp register "file" */
294 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
295 int offset_reg = emit_li_offset(gen, offset);
296 dst_vec = ppc_allocate_vec_register(gen->f);
297 ppc_lvx(gen->f, dst_vec, gen->temps_reg, offset_reg);
298 }
299 break;
300 case TGSI_FILE_IMMEDIATE:
301 {
302 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
303 int offset_reg = emit_li_offset(gen, offset);
304 dst_vec = ppc_allocate_vec_register(gen->f);
305 /* Load 4-byte word into vector register.
306 * The vector slot depends on the effective address we load from.
307 * We know that our immediates start at a 16-byte boundary so we
308 * know that 'swizzle' tells us which vector slot will have the
309 * loaded word. The other vector slots will be undefined.
310 */
311 ppc_lvewx(gen->f, dst_vec, gen->immed_reg, offset_reg);
312 /* splat word[swizzle] across the vector reg */
313 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
314 }
315 break;
316 case TGSI_FILE_CONSTANT:
317 {
318 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
319 int offset_reg = emit_li_offset(gen, offset);
320 dst_vec = ppc_allocate_vec_register(gen->f);
321 /* Load 4-byte word into vector register.
322 * The vector slot depends on the effective address we load from.
323 * We know that our constants start at a 16-byte boundary so we
324 * know that 'swizzle' tells us which vector slot will have the
325 * loaded word. The other vector slots will be undefined.
326 */
327 ppc_lvewx(gen->f, dst_vec, gen->const_reg, offset_reg);
328 /* splat word[swizzle] across the vector reg */
329 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
330 }
331 break;
332 default:
333 assert( 0 );
334 }
335 break;
336 case TGSI_EXTSWIZZLE_ZERO:
337 ppc_vzero(gen->f, dst_vec);
338 break;
339 case TGSI_EXTSWIZZLE_ONE:
340 {
341 int one_vec = gen_one_vec(gen);
342 dst_vec = ppc_allocate_vec_register(gen->f);
343 ppc_vmove(gen->f, dst_vec, one_vec);
344 }
345 break;
346 default:
347 assert( 0 );
348 }
349
350 assert(dst_vec >= 0);
351
352 {
353 uint sign_op = tgsi_util_get_full_src_register_sign_mode(reg, chan_index);
354 if (sign_op != TGSI_UTIL_SIGN_KEEP) {
355 int bit31_vec = gen_get_bit31_vec(gen);
356
357 switch (sign_op) {
358 case TGSI_UTIL_SIGN_CLEAR:
359 /* vec = vec & ~bit31 */
360 ppc_vandc(gen->f, dst_vec, dst_vec, bit31_vec);
361 break;
362 case TGSI_UTIL_SIGN_SET:
363 /* vec = vec | bit31 */
364 ppc_vor(gen->f, dst_vec, dst_vec, bit31_vec);
365 break;
366 case TGSI_UTIL_SIGN_TOGGLE:
367 /* vec = vec ^ bit31 */
368 ppc_vxor(gen->f, dst_vec, dst_vec, bit31_vec);
369 break;
370 default:
371 assert(0);
372 }
373 }
374 }
375
376 return dst_vec;
377 }
378
379
380
381 /**
382 * Test if two TGSI src registers refer to the same memory location.
383 * We use this to avoid redundant register loads.
384 */
385 static boolean
386 equal_src_locs(const struct tgsi_full_src_register *a, uint chan_a,
387 const struct tgsi_full_src_register *b, uint chan_b)
388 {
389 int swz_a, swz_b;
390 int sign_a, sign_b;
391 if (a->SrcRegister.File != b->SrcRegister.File)
392 return FALSE;
393 if (a->SrcRegister.Index != b->SrcRegister.Index)
394 return FALSE;
395 swz_a = tgsi_util_get_full_src_register_extswizzle(a, chan_a);
396 swz_b = tgsi_util_get_full_src_register_extswizzle(b, chan_b);
397 if (swz_a != swz_b)
398 return FALSE;
399 sign_a = tgsi_util_get_full_src_register_sign_mode(a, chan_a);
400 sign_b = tgsi_util_get_full_src_register_sign_mode(b, chan_b);
401 if (sign_a != sign_b)
402 return FALSE;
403 return TRUE;
404 }
405
406
407 /**
408 * Given a TGSI src register and channel index, return the PPC vector
409 * register containing the value. We use a cache to prevent re-loading
410 * the same register multiple times.
411 * \return index of PPC vector register with the desired src operand
412 */
413 static int
414 get_src_vec(struct gen_context *gen,
415 struct tgsi_full_instruction *inst, int src_reg, uint chan)
416 {
417 const const struct tgsi_full_src_register *src =
418 &inst->FullSrcRegisters[src_reg];
419 int vec;
420 uint i;
421
422 /* check the cache */
423 for (i = 0; i < gen->num_regs; i++) {
424 if (equal_src_locs(&gen->regs[i].src, gen->regs[i].chan, src, chan)) {
425 /* cache hit */
426 assert(gen->regs[i].vec >= 0);
427 return gen->regs[i].vec;
428 }
429 }
430
431 /* cache miss: allocate new vec reg and emit fetch/load code */
432 vec = emit_fetch(gen, src, chan);
433 gen->regs[gen->num_regs].src = *src;
434 gen->regs[gen->num_regs].chan = chan;
435 gen->regs[gen->num_regs].vec = vec;
436 gen->num_regs++;
437
438 assert(gen->num_regs <= Elements(gen->regs));
439
440 assert(vec >= 0);
441
442 return vec;
443 }
444
445
446 /**
447 * Clear the src operand cache. To be called at the end of each emit function.
448 */
449 static void
450 release_src_vecs(struct gen_context *gen)
451 {
452 uint i;
453 for (i = 0; i < gen->num_regs; i++) {
454 const const struct tgsi_full_src_register src = gen->regs[i].src;
455 if (!(src.SrcRegister.File == TGSI_FILE_TEMPORARY &&
456 src.SrcRegister.Index < MAX_PPC_TEMPS)) {
457 ppc_release_vec_register(gen->f, gen->regs[i].vec);
458 }
459 }
460 gen->num_regs = 0;
461 }
462
463
464
465 static int
466 get_dst_vec(struct gen_context *gen,
467 const struct tgsi_full_instruction *inst,
468 unsigned chan_index)
469 {
470 const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
471
472 if (reg->DstRegister.File == TGSI_FILE_TEMPORARY &&
473 reg->DstRegister.Index < MAX_PPC_TEMPS) {
474 int vec = gen->temps_map[reg->DstRegister.Index][chan_index];
475 return vec;
476 }
477 else {
478 return ppc_allocate_vec_register(gen->f);
479 }
480 }
481
482
483 /**
484 * Register store. Store 'src_vec' at location indicated by 'reg'.
485 * \param free_vec Should the src_vec be released when done?
486 */
487 static void
488 emit_store(struct gen_context *gen,
489 int src_vec,
490 const struct tgsi_full_instruction *inst,
491 unsigned chan_index,
492 boolean free_vec)
493 {
494 const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
495
496 switch (reg->DstRegister.File) {
497 case TGSI_FILE_OUTPUT:
498 {
499 int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
500 int offset_reg = emit_li_offset(gen, offset);
501 ppc_stvx(gen->f, src_vec, gen->outputs_reg, offset_reg);
502 }
503 break;
504 case TGSI_FILE_TEMPORARY:
505 if (reg->DstRegister.Index < MAX_PPC_TEMPS) {
506 if (!free_vec) {
507 int dst_vec = gen->temps_map[reg->DstRegister.Index][chan_index];
508 if (dst_vec != src_vec)
509 ppc_vmove(gen->f, dst_vec, src_vec);
510 }
511 free_vec = FALSE;
512 }
513 else {
514 int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
515 int offset_reg = emit_li_offset(gen, offset);
516 ppc_stvx(gen->f, src_vec, gen->temps_reg, offset_reg);
517 }
518 break;
519 #if 0
520 case TGSI_FILE_ADDRESS:
521 emit_addrs(
522 func,
523 xmm,
524 reg->DstRegister.Index,
525 chan_index );
526 break;
527 #endif
528 default:
529 assert( 0 );
530 }
531
532 #if 0
533 switch( inst->Instruction.Saturate ) {
534 case TGSI_SAT_NONE:
535 break;
536
537 case TGSI_SAT_ZERO_ONE:
538 /* assert( 0 ); */
539 break;
540
541 case TGSI_SAT_MINUS_PLUS_ONE:
542 assert( 0 );
543 break;
544 }
545 #endif
546
547 if (free_vec)
548 ppc_release_vec_register(gen->f, src_vec);
549 }
550
551
552 static void
553 emit_scalar_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
554 {
555 int v0, v1;
556 uint chan_index;
557
558 v0 = get_src_vec(gen, inst, 0, CHAN_X);
559 v1 = ppc_allocate_vec_register(gen->f);
560
561 switch (inst->Instruction.Opcode) {
562 case TGSI_OPCODE_RSQ:
563 /* v1 = 1.0 / sqrt(v0) */
564 ppc_vrsqrtefp(gen->f, v1, v0);
565 break;
566 case TGSI_OPCODE_RCP:
567 /* v1 = 1.0 / v0 */
568 ppc_vrefp(gen->f, v1, v0);
569 break;
570 default:
571 assert(0);
572 }
573
574 FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
575 emit_store(gen, v1, inst, chan_index, FALSE);
576 }
577
578 release_src_vecs(gen);
579 ppc_release_vec_register(gen->f, v1);
580 }
581
582
583 static void
584 emit_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
585 {
586 uint chan_index;
587 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
588 int v0 = get_src_vec(gen, inst, 0, chan_index); /* v0 = srcreg[0] */
589 int v1 = get_dst_vec(gen, inst, chan_index);
590 switch (inst->Instruction.Opcode) {
591 case TGSI_OPCODE_ABS:
592 /* turn off the most significant bit of each vector float word */
593 {
594 int bit31_vec = gen_get_bit31_vec(gen);
595 ppc_vandc(gen->f, v1, v0, bit31_vec); /* v1 = v0 & ~bit31 */
596 }
597 break;
598 case TGSI_OPCODE_FLOOR:
599 ppc_vrfim(gen->f, v1, v0); /* v1 = floor(v0) */
600 break;
601 case TGSI_OPCODE_FRAC:
602 ppc_vrfim(gen->f, v1, v0); /* tmp = floor(v0) */
603 ppc_vsubfp(gen->f, v1, v0, v1); /* v1 = v0 - v1 */
604 break;
605 case TGSI_OPCODE_EXPBASE2:
606 ppc_vexptefp(gen->f, v1, v0); /* v1 = 2^v0 */
607 break;
608 case TGSI_OPCODE_LOGBASE2:
609 /* XXX this may be broken! */
610 ppc_vlogefp(gen->f, v1, v0); /* v1 = log2(v0) */
611 break;
612 case TGSI_OPCODE_MOV:
613 if (v0 != v1)
614 ppc_vmove(gen->f, v1, v0);
615 break;
616 default:
617 assert(0);
618 }
619 emit_store(gen, v1, inst, chan_index, TRUE); /* store v0 */
620 }
621
622 release_src_vecs(gen);
623 }
624
625
626 static void
627 emit_binop(struct gen_context *gen, struct tgsi_full_instruction *inst)
628 {
629 int zero_vec = -1;
630 uint chan;
631
632 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL) {
633 zero_vec = ppc_allocate_vec_register(gen->f);
634 ppc_vzero(gen->f, zero_vec);
635 }
636
637 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
638 /* fetch src operands */
639 int v0 = get_src_vec(gen, inst, 0, chan);
640 int v1 = get_src_vec(gen, inst, 1, chan);
641 int v2 = get_dst_vec(gen, inst, chan);
642
643 /* emit binop */
644 switch (inst->Instruction.Opcode) {
645 case TGSI_OPCODE_ADD:
646 ppc_vaddfp(gen->f, v2, v0, v1);
647 break;
648 case TGSI_OPCODE_SUB:
649 ppc_vsubfp(gen->f, v2, v0, v1);
650 break;
651 case TGSI_OPCODE_MUL:
652 ppc_vmaddfp(gen->f, v2, v0, v1, zero_vec);
653 break;
654 case TGSI_OPCODE_MIN:
655 ppc_vminfp(gen->f, v2, v0, v1);
656 break;
657 case TGSI_OPCODE_MAX:
658 ppc_vmaxfp(gen->f, v2, v0, v1);
659 break;
660 default:
661 assert(0);
662 }
663
664 /* store v2 */
665 emit_store(gen, v2, inst, chan, TRUE);
666 }
667
668 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL)
669 ppc_release_vec_register(gen->f, zero_vec);
670
671 release_src_vecs(gen);
672 }
673
674
675 static void
676 emit_triop(struct gen_context *gen, struct tgsi_full_instruction *inst)
677 {
678 uint chan;
679
680 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
681 /* fetch src operands */
682 int v0 = get_src_vec(gen, inst, 0, chan);
683 int v1 = get_src_vec(gen, inst, 1, chan);
684 int v2 = get_src_vec(gen, inst, 2, chan);
685 int v3 = get_dst_vec(gen, inst, chan);
686
687 /* emit ALU */
688 switch (inst->Instruction.Opcode) {
689 case TGSI_OPCODE_MAD:
690 ppc_vmaddfp(gen->f, v3, v0, v1, v2); /* v3 = v0 * v1 + v2 */
691 break;
692 case TGSI_OPCODE_LRP:
693 ppc_vsubfp(gen->f, v3, v1, v2); /* v3 = v1 - v2 */
694 ppc_vmaddfp(gen->f, v3, v0, v3, v2); /* v3 = v0 * v3 + v2 */
695 break;
696 default:
697 assert(0);
698 }
699
700 /* store v3 */
701 emit_store(gen, v3, inst, chan, TRUE);
702 }
703
704 release_src_vecs(gen);
705 }
706
707
708 /**
709 * Vector comparisons, resulting in 1.0 or 0.0 values.
710 */
711 static void
712 emit_inequality(struct gen_context *gen, struct tgsi_full_instruction *inst)
713 {
714 uint chan;
715 int one_vec = gen_one_vec(gen);
716
717 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
718 /* fetch src operands */
719 int v0 = get_src_vec(gen, inst, 0, chan);
720 int v1 = get_src_vec(gen, inst, 1, chan);
721 int v2 = get_dst_vec(gen, inst, chan);
722 boolean complement = FALSE;
723
724 switch (inst->Instruction.Opcode) {
725 case TGSI_OPCODE_SNE:
726 complement = TRUE;
727 /* fall-through */
728 case TGSI_OPCODE_SEQ:
729 ppc_vcmpeqfpx(gen->f, v2, v0, v1); /* v2 = v0 == v1 ? ~0 : 0 */
730 break;
731
732 case TGSI_OPCODE_SGE:
733 complement = TRUE;
734 /* fall-through */
735 case TGSI_OPCODE_SLT:
736 ppc_vcmpgtfpx(gen->f, v2, v1, v0); /* v2 = v1 > v0 ? ~0 : 0 */
737 break;
738
739 case TGSI_OPCODE_SLE:
740 complement = TRUE;
741 /* fall-through */
742 case TGSI_OPCODE_SGT:
743 ppc_vcmpgtfpx(gen->f, v2, v0, v1); /* v2 = v0 > v1 ? ~0 : 0 */
744 break;
745 default:
746 assert(0);
747 }
748
749 /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
750
751 if (complement)
752 ppc_vandc(gen->f, v2, one_vec, v2); /* v2 = one_vec & ~v2 */
753 else
754 ppc_vand(gen->f, v2, one_vec, v2); /* v2 = one_vec & v2 */
755
756 /* store v2 */
757 emit_store(gen, v2, inst, chan, TRUE);
758 }
759
760 release_src_vecs(gen);
761 }
762
763
764 static void
765 emit_dotprod(struct gen_context *gen, struct tgsi_full_instruction *inst)
766 {
767 int v0, v1, v2;
768 uint chan_index;
769
770 v2 = ppc_allocate_vec_register(gen->f);
771
772 ppc_vxor(gen->f, v2, v2, v2); /* v2 = {0, 0, 0, 0} */
773
774 v0 = get_src_vec(gen, inst, 0, CHAN_X); /* v0 = src0.XXXX */
775 v1 = get_src_vec(gen, inst, 1, CHAN_X); /* v1 = src1.XXXX */
776 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
777
778 v0 = get_src_vec(gen, inst, 0, CHAN_Y); /* v0 = src0.YYYY */
779 v1 = get_src_vec(gen, inst, 1, CHAN_Y); /* v1 = src1.YYYY */
780 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
781
782 v0 = get_src_vec(gen, inst, 0, CHAN_Z); /* v0 = src0.ZZZZ */
783 v1 = get_src_vec(gen, inst, 1, CHAN_Z); /* v1 = src1.ZZZZ */
784 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
785
786 if (inst->Instruction.Opcode == TGSI_OPCODE_DP4) {
787 v0 = get_src_vec(gen, inst, 0, CHAN_W); /* v0 = src0.WWWW */
788 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
789 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
790 }
791 else if (inst->Instruction.Opcode == TGSI_OPCODE_DPH) {
792 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
793 ppc_vaddfp(gen->f, v2, v2, v1); /* v2 = v2 + v1 */
794 }
795
796 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
797 emit_store(gen, v2, inst, chan_index, FALSE); /* store v2, free v2 later */
798 }
799
800 release_src_vecs(gen);
801
802 ppc_release_vec_register(gen->f, v2);
803 }
804
805
806 /** Approximation for vr = pow(va, vb) */
807 static void
808 ppc_vec_pow(struct ppc_function *f, int vr, int va, int vb)
809 {
810 /* pow(a,b) ~= exp2(log2(a) * b) */
811 int t_vec = ppc_allocate_vec_register(f);
812 int zero_vec = ppc_allocate_vec_register(f);
813
814 ppc_vzero(f, zero_vec);
815
816 ppc_vlogefp(f, t_vec, va); /* t = log2(va) */
817 ppc_vmaddfp(f, t_vec, t_vec, vb, zero_vec); /* t = t * vb */
818 ppc_vexptefp(f, vr, t_vec); /* vr = 2^t */
819
820 ppc_release_vec_register(f, t_vec);
821 ppc_release_vec_register(f, zero_vec);
822 }
823
824
825 static void
826 emit_lit(struct gen_context *gen, struct tgsi_full_instruction *inst)
827 {
828 int one_vec = gen_one_vec(gen);
829
830 /* Compute X */
831 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
832 emit_store(gen, one_vec, inst, CHAN_X, FALSE);
833 }
834
835 /* Compute Y, Z */
836 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
837 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
838 int x_vec;
839 int zero_vec = ppc_allocate_vec_register(gen->f);
840
841 x_vec = get_src_vec(gen, inst, 0, CHAN_X); /* x_vec = src[0].x */
842
843 ppc_vzero(gen->f, zero_vec); /* zero = {0,0,0,0} */
844 ppc_vmaxfp(gen->f, x_vec, x_vec, zero_vec); /* x_vec = max(x_vec, 0) */
845
846 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
847 emit_store(gen, x_vec, inst, CHAN_Y, FALSE);
848 }
849
850 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
851 int y_vec, w_vec;
852 int z_vec = ppc_allocate_vec_register(gen->f);
853 int pow_vec = ppc_allocate_vec_register(gen->f);
854 int pos_vec = ppc_allocate_vec_register(gen->f);
855 int p128_vec = ppc_allocate_vec_register(gen->f);
856 int n128_vec = ppc_allocate_vec_register(gen->f);
857
858 y_vec = get_src_vec(gen, inst, 0, CHAN_Y); /* y_vec = src[0].y */
859 ppc_vmaxfp(gen->f, y_vec, y_vec, zero_vec); /* y_vec = max(y_vec, 0) */
860
861 w_vec = get_src_vec(gen, inst, 0, CHAN_W); /* w_vec = src[0].w */
862
863 /* clamp W to [-128, 128] */
864 load_constant_vec(gen, p128_vec, 128.0f);
865 load_constant_vec(gen, n128_vec, -128.0f);
866 ppc_vmaxfp(gen->f, w_vec, w_vec, n128_vec); /* w = max(w, -128) */
867 ppc_vminfp(gen->f, w_vec, w_vec, p128_vec); /* w = min(w, 128) */
868
869 /* if temp.x > 0
870 * z = pow(tmp.y, tmp.w)
871 * else
872 * z = 0.0
873 */
874 ppc_vec_pow(gen->f, pow_vec, y_vec, w_vec); /* pow = pow(y, w) */
875 ppc_vcmpgtfpx(gen->f, pos_vec, x_vec, zero_vec); /* pos = x > 0 */
876 ppc_vand(gen->f, z_vec, pow_vec, pos_vec); /* z = pow & pos */
877
878 emit_store(gen, z_vec, inst, CHAN_Z, FALSE);
879
880 ppc_release_vec_register(gen->f, z_vec);
881 ppc_release_vec_register(gen->f, pow_vec);
882 ppc_release_vec_register(gen->f, pos_vec);
883 ppc_release_vec_register(gen->f, p128_vec);
884 ppc_release_vec_register(gen->f, n128_vec);
885 }
886
887 ppc_release_vec_register(gen->f, zero_vec);
888 }
889
890 /* Compute W */
891 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
892 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
893 }
894
895 release_src_vecs(gen);
896 }
897
898
899 static int
900 emit_instruction(struct gen_context *gen,
901 struct tgsi_full_instruction *inst)
902 {
903 switch (inst->Instruction.Opcode) {
904 case TGSI_OPCODE_MOV:
905 case TGSI_OPCODE_ABS:
906 case TGSI_OPCODE_FLOOR:
907 case TGSI_OPCODE_FRAC:
908 case TGSI_OPCODE_EXPBASE2:
909 case TGSI_OPCODE_LOGBASE2:
910 emit_unaryop(gen, inst);
911 break;
912 case TGSI_OPCODE_RSQ:
913 case TGSI_OPCODE_RCP:
914 emit_scalar_unaryop(gen, inst);
915 break;
916 case TGSI_OPCODE_ADD:
917 case TGSI_OPCODE_SUB:
918 case TGSI_OPCODE_MUL:
919 case TGSI_OPCODE_MIN:
920 case TGSI_OPCODE_MAX:
921 emit_binop(gen, inst);
922 break;
923 case TGSI_OPCODE_SEQ:
924 case TGSI_OPCODE_SNE:
925 case TGSI_OPCODE_SLT:
926 case TGSI_OPCODE_SGT:
927 case TGSI_OPCODE_SLE:
928 case TGSI_OPCODE_SGE:
929 emit_inequality(gen, inst);
930 break;
931 case TGSI_OPCODE_MAD:
932 case TGSI_OPCODE_LRP:
933 emit_triop(gen, inst);
934 break;
935 case TGSI_OPCODE_DP3:
936 case TGSI_OPCODE_DP4:
937 case TGSI_OPCODE_DPH:
938 emit_dotprod(gen, inst);
939 break;
940 case TGSI_OPCODE_LIT:
941 emit_lit(gen, inst);
942 break;
943 case TGSI_OPCODE_END:
944 /* normal end */
945 return 1;
946 default:
947 return 0;
948 }
949 return 1;
950 }
951
952
953 static void
954 emit_declaration(
955 struct ppc_function *func,
956 struct tgsi_full_declaration *decl )
957 {
958 if( decl->Declaration.File == TGSI_FILE_INPUT ) {
959 #if 0
960 unsigned first, last, mask;
961 unsigned i, j;
962
963 first = decl->DeclarationRange.First;
964 last = decl->DeclarationRange.Last;
965 mask = decl->Declaration.UsageMask;
966
967 for( i = first; i <= last; i++ ) {
968 for( j = 0; j < NUM_CHANNELS; j++ ) {
969 if( mask & (1 << j) ) {
970 switch( decl->Declaration.Interpolate ) {
971 case TGSI_INTERPOLATE_CONSTANT:
972 emit_coef_a0( func, 0, i, j );
973 emit_inputs( func, 0, i, j );
974 break;
975
976 case TGSI_INTERPOLATE_LINEAR:
977 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
978 emit_coef_dadx( func, 1, i, j );
979 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
980 emit_coef_dady( func, 3, i, j );
981 emit_mul( func, 0, 1 ); /* x * dadx */
982 emit_coef_a0( func, 4, i, j );
983 emit_mul( func, 2, 3 ); /* y * dady */
984 emit_add( func, 0, 4 ); /* x * dadx + a0 */
985 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
986 emit_inputs( func, 0, i, j );
987 break;
988
989 case TGSI_INTERPOLATE_PERSPECTIVE:
990 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
991 emit_coef_dadx( func, 1, i, j );
992 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
993 emit_coef_dady( func, 3, i, j );
994 emit_mul( func, 0, 1 ); /* x * dadx */
995 emit_tempf( func, 4, 0, TGSI_SWIZZLE_W );
996 emit_coef_a0( func, 5, i, j );
997 emit_rcp( func, 4, 4 ); /* 1.0 / w */
998 emit_mul( func, 2, 3 ); /* y * dady */
999 emit_add( func, 0, 5 ); /* x * dadx + a0 */
1000 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
1001 emit_mul( func, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
1002 emit_inputs( func, 0, i, j );
1003 break;
1004
1005 default:
1006 assert( 0 );
1007 break;
1008 }
1009 }
1010 }
1011 }
1012 #endif
1013 }
1014 }
1015
1016
1017
1018 static void
1019 emit_prologue(struct ppc_function *func)
1020 {
1021 /* XXX set up stack frame */
1022 }
1023
1024
1025 static void
1026 emit_epilogue(struct ppc_function *func)
1027 {
1028 ppc_return(func);
1029 /* XXX restore prev stack frame */
1030 debug_printf("PPC: Emitted %u instructions\n", func->num_inst);
1031 }
1032
1033
1034
1035 /**
1036 * Translate a TGSI vertex/fragment shader to PPC code.
1037 *
1038 * \param tokens the TGSI input shader
1039 * \param func the output PPC code/function
1040 * \param immediates buffer to place immediates, later passed to PPC func
1041 * \return TRUE for success, FALSE if translation failed
1042 */
1043 boolean
1044 tgsi_emit_ppc(const struct tgsi_token *tokens,
1045 struct ppc_function *func,
1046 float (*immediates)[4],
1047 boolean do_swizzles )
1048 {
1049 static int use_ppc_asm = -1;
1050 struct tgsi_parse_context parse;
1051 /*boolean instruction_phase = FALSE;*/
1052 unsigned ok = 1;
1053 uint num_immediates = 0;
1054 struct gen_context gen;
1055
1056 if (use_ppc_asm < 0) {
1057 /* If GALLIUM_NOPPC is set, don't use PPC codegen */
1058 use_ppc_asm = !debug_get_bool_option("GALLIUM_NOPPC", FALSE);
1059 }
1060 if (!use_ppc_asm)
1061 return FALSE;
1062
1063 if (0) {
1064 debug_printf("\n********* TGSI->PPC ********\n");
1065 tgsi_dump(tokens, 0);
1066 }
1067
1068 util_init_math();
1069
1070 init_gen_context(&gen, func);
1071
1072 emit_prologue(func);
1073
1074 tgsi_parse_init( &parse, tokens );
1075
1076 while (!tgsi_parse_end_of_tokens(&parse) && ok) {
1077 tgsi_parse_token(&parse);
1078
1079 switch (parse.FullToken.Token.Type) {
1080 case TGSI_TOKEN_TYPE_DECLARATION:
1081 if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
1082 emit_declaration(func, &parse.FullToken.FullDeclaration );
1083 }
1084 break;
1085
1086 case TGSI_TOKEN_TYPE_INSTRUCTION:
1087 ok = emit_instruction(&gen, &parse.FullToken.FullInstruction);
1088
1089 if (!ok) {
1090 debug_printf("failed to translate tgsi opcode %d to PPC (%s)\n",
1091 parse.FullToken.FullInstruction.Instruction.Opcode,
1092 parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX ?
1093 "vertex shader" : "fragment shader");
1094 }
1095 break;
1096
1097 case TGSI_TOKEN_TYPE_IMMEDIATE:
1098 /* splat each immediate component into a float[4] vector for SoA */
1099 {
1100 const uint size = parse.FullToken.FullImmediate.Immediate.Size - 1;
1101 float *imm = (float *) immediates;
1102 uint i;
1103 assert(size <= 4);
1104 assert(num_immediates < TGSI_EXEC_NUM_IMMEDIATES);
1105 for (i = 0; i < size; i++) {
1106 immediates[num_immediates][i] =
1107 parse.FullToken.FullImmediate.u.ImmediateFloat32[i].Float;
1108 }
1109 num_immediates++;
1110 }
1111 break;
1112
1113 default:
1114 ok = 0;
1115 assert( 0 );
1116 }
1117 }
1118
1119 emit_epilogue(func);
1120
1121 tgsi_parse_free( &parse );
1122
1123 return ok;
1124 }
1125
1126 #endif /* PIPE_ARCH_PPC */