Merge branch 'asm-shader-rework-2'
[mesa.git] / src / gallium / auxiliary / tgsi / tgsi_ppc.c
1 /**************************************************************************
2 *
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * TGSI to PowerPC code generation.
30 */
31
32 #include "pipe/p_config.h"
33
34 #if defined(PIPE_ARCH_PPC)
35
36 #include "util/u_debug.h"
37 #include "pipe/p_shader_tokens.h"
38 #include "util/u_math.h"
39 #include "util/u_memory.h"
40 #include "util/u_sse.h"
41 #include "tgsi/tgsi_info.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi_dump.h"
45 #include "tgsi_exec.h"
46 #include "tgsi_ppc.h"
47 #include "rtasm/rtasm_ppc.h"
48
49
50 /**
51 * Since it's pretty much impossible to form PPC vector immediates, load
52 * them from memory here:
53 */
54 const float ppc_builtin_constants[] ALIGN16_ATTRIB = {
55 1.0f, -128.0f, 128.0, 0.0
56 };
57
58
59 #define FOR_EACH_CHANNEL( CHAN )\
60 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
61
62 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
63 ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
64
65 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
66 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
67
68 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
69 FOR_EACH_CHANNEL( CHAN )\
70 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
71
72 #define CHAN_X 0
73 #define CHAN_Y 1
74 #define CHAN_Z 2
75 #define CHAN_W 3
76
77
78 /**
79 * How many TGSI temps should be implemented with real PPC vector registers
80 * rather than memory.
81 */
82 #define MAX_PPC_TEMPS 3
83
84
85 /**
86 * Context/state used during code gen.
87 */
88 struct gen_context
89 {
90 struct ppc_function *f;
91 int inputs_reg; /**< GP register pointing to input params */
92 int outputs_reg; /**< GP register pointing to output params */
93 int temps_reg; /**< GP register pointing to temporary "registers" */
94 int immed_reg; /**< GP register pointing to immediates buffer */
95 int const_reg; /**< GP register pointing to constants buffer */
96 int builtins_reg; /**< GP register pointint to built-in constants */
97
98 int offset_reg; /**< used to reduce redundant li instructions */
99 int offset_value;
100
101 int one_vec; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
102 int bit31_vec; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
103
104 /**
105 * Map TGSI temps to PPC vector temps.
106 * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
107 * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
108 */
109 int temps_map[MAX_PPC_TEMPS][4];
110
111 /**
112 * Cache of src registers.
113 * This is used to avoid redundant load instructions.
114 */
115 struct {
116 struct tgsi_full_src_register src;
117 uint chan;
118 uint vec;
119 } regs[12]; /* 3 src regs, 4 channels */
120 uint num_regs;
121 };
122
123
124 /**
125 * Initialize code generation context.
126 */
127 static void
128 init_gen_context(struct gen_context *gen, struct ppc_function *func)
129 {
130 uint i;
131
132 memset(gen, 0, sizeof(*gen));
133 gen->f = func;
134 gen->inputs_reg = ppc_reserve_register(func, 3); /* first function param */
135 gen->outputs_reg = ppc_reserve_register(func, 4); /* second function param */
136 gen->temps_reg = ppc_reserve_register(func, 5); /* ... */
137 gen->immed_reg = ppc_reserve_register(func, 6);
138 gen->const_reg = ppc_reserve_register(func, 7);
139 gen->builtins_reg = ppc_reserve_register(func, 8);
140 gen->one_vec = -1;
141 gen->bit31_vec = -1;
142 gen->offset_reg = -1;
143 gen->offset_value = -9999999;
144 for (i = 0; i < MAX_PPC_TEMPS; i++) {
145 gen->temps_map[i][0] = ppc_allocate_vec_register(gen->f);
146 gen->temps_map[i][1] = ppc_allocate_vec_register(gen->f);
147 gen->temps_map[i][2] = ppc_allocate_vec_register(gen->f);
148 gen->temps_map[i][3] = ppc_allocate_vec_register(gen->f);
149 }
150 }
151
152
153 /**
154 * Is the given TGSI register stored as a real PPC vector register?
155 */
156 static boolean
157 is_ppc_vec_temporary(const struct tgsi_full_src_register *reg)
158 {
159 return (reg->SrcRegister.File == TGSI_FILE_TEMPORARY &&
160 reg->SrcRegister.Index < MAX_PPC_TEMPS);
161 }
162
163
164 /**
165 * Is the given TGSI register stored as a real PPC vector register?
166 */
167 static boolean
168 is_ppc_vec_temporary_dst(const struct tgsi_full_dst_register *reg)
169 {
170 return (reg->DstRegister.File == TGSI_FILE_TEMPORARY &&
171 reg->DstRegister.Index < MAX_PPC_TEMPS);
172 }
173
174
175
176 /**
177 * All PPC vector load/store instructions form an effective address
178 * by adding the contents of two registers. For example:
179 * lvx v2,r8,r9 # v2 = memory[r8 + r9]
180 * stvx v2,r8,r9 # memory[r8 + r9] = v2;
181 * So our lvx/stvx instructions are typically preceded by an 'li' instruction
182 * to load r9 (above) with an immediate (an offset).
183 * This code emits that 'li' instruction, but only if the offset value is
184 * different than the previous 'li'.
185 * This optimization seems to save about 10% in the instruction count.
186 * Note that we need to unconditionally emit an 'li' inside basic blocks
187 * (such as inside loops).
188 */
189 static int
190 emit_li_offset(struct gen_context *gen, int offset)
191 {
192 if (gen->offset_reg <= 0) {
193 /* allocate a GP register for storing load/store offset */
194 gen->offset_reg = ppc_allocate_register(gen->f);
195 }
196
197 /* emit new 'li' if offset is changing */
198 if (gen->offset_value < 0 || gen->offset_value != offset) {
199 gen->offset_value = offset;
200 ppc_li(gen->f, gen->offset_reg, offset);
201 }
202
203 return gen->offset_reg;
204 }
205
206
207 /**
208 * Forces subsequent emit_li_offset() calls to emit an 'li'.
209 * To be called at the top of basic blocks.
210 */
211 static void
212 reset_li_offset(struct gen_context *gen)
213 {
214 gen->offset_value = -9999999;
215 }
216
217
218
219 /**
220 * Load the given vector register with {value, value, value, value}.
221 * The value must be in the ppu_builtin_constants[] array.
222 * We wouldn't need this if there was a simple way to load PPC vector
223 * registers with immediate values!
224 */
225 static void
226 load_constant_vec(struct gen_context *gen, int dst_vec, float value)
227 {
228 uint pos;
229 for (pos = 0; pos < Elements(ppc_builtin_constants); pos++) {
230 if (ppc_builtin_constants[pos] == value) {
231 int offset = pos * 4;
232 int offset_reg = emit_li_offset(gen, offset);
233
234 /* Load 4-byte word into vector register.
235 * The vector slot depends on the effective address we load from.
236 * We know that our builtins start at a 16-byte boundary so we
237 * know that 'swizzle' tells us which vector slot will have the
238 * loaded word. The other vector slots will be undefined.
239 */
240 ppc_lvewx(gen->f, dst_vec, gen->builtins_reg, offset_reg);
241 /* splat word[pos % 4] across the vector reg */
242 ppc_vspltw(gen->f, dst_vec, dst_vec, pos % 4);
243 return;
244 }
245 }
246 assert(0 && "Need to add new constant to ppc_builtin_constants array");
247 }
248
249
250 /**
251 * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
252 */
253 static int
254 gen_one_vec(struct gen_context *gen)
255 {
256 if (gen->one_vec < 0) {
257 gen->one_vec = ppc_allocate_vec_register(gen->f);
258 load_constant_vec(gen, gen->one_vec, 1.0f);
259 }
260 return gen->one_vec;
261 }
262
263 /**
264 * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
265 */
266 static int
267 gen_get_bit31_vec(struct gen_context *gen)
268 {
269 if (gen->bit31_vec < 0) {
270 gen->bit31_vec = ppc_allocate_vec_register(gen->f);
271 ppc_vspltisw(gen->f, gen->bit31_vec, -1);
272 ppc_vslw(gen->f, gen->bit31_vec, gen->bit31_vec, gen->bit31_vec);
273 }
274 return gen->bit31_vec;
275 }
276
277
278 /**
279 * Register fetch. Return PPC vector register with result.
280 */
281 static int
282 emit_fetch(struct gen_context *gen,
283 const struct tgsi_full_src_register *reg,
284 const unsigned chan_index)
285 {
286 uint swizzle = tgsi_util_get_full_src_register_extswizzle(reg, chan_index);
287 int dst_vec = -1;
288
289 switch (swizzle) {
290 case TGSI_EXTSWIZZLE_X:
291 case TGSI_EXTSWIZZLE_Y:
292 case TGSI_EXTSWIZZLE_Z:
293 case TGSI_EXTSWIZZLE_W:
294 switch (reg->SrcRegister.File) {
295 case TGSI_FILE_INPUT:
296 {
297 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
298 int offset_reg = emit_li_offset(gen, offset);
299 dst_vec = ppc_allocate_vec_register(gen->f);
300 ppc_lvx(gen->f, dst_vec, gen->inputs_reg, offset_reg);
301 }
302 break;
303 case TGSI_FILE_TEMPORARY:
304 if (is_ppc_vec_temporary(reg)) {
305 /* use PPC vec register */
306 dst_vec = gen->temps_map[reg->SrcRegister.Index][swizzle];
307 }
308 else {
309 /* use memory-based temp register "file" */
310 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
311 int offset_reg = emit_li_offset(gen, offset);
312 dst_vec = ppc_allocate_vec_register(gen->f);
313 ppc_lvx(gen->f, dst_vec, gen->temps_reg, offset_reg);
314 }
315 break;
316 case TGSI_FILE_IMMEDIATE:
317 {
318 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
319 int offset_reg = emit_li_offset(gen, offset);
320 dst_vec = ppc_allocate_vec_register(gen->f);
321 /* Load 4-byte word into vector register.
322 * The vector slot depends on the effective address we load from.
323 * We know that our immediates start at a 16-byte boundary so we
324 * know that 'swizzle' tells us which vector slot will have the
325 * loaded word. The other vector slots will be undefined.
326 */
327 ppc_lvewx(gen->f, dst_vec, gen->immed_reg, offset_reg);
328 /* splat word[swizzle] across the vector reg */
329 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
330 }
331 break;
332 case TGSI_FILE_CONSTANT:
333 {
334 int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
335 int offset_reg = emit_li_offset(gen, offset);
336 dst_vec = ppc_allocate_vec_register(gen->f);
337 /* Load 4-byte word into vector register.
338 * The vector slot depends on the effective address we load from.
339 * We know that our constants start at a 16-byte boundary so we
340 * know that 'swizzle' tells us which vector slot will have the
341 * loaded word. The other vector slots will be undefined.
342 */
343 ppc_lvewx(gen->f, dst_vec, gen->const_reg, offset_reg);
344 /* splat word[swizzle] across the vector reg */
345 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
346 }
347 break;
348 default:
349 assert( 0 );
350 }
351 break;
352 case TGSI_EXTSWIZZLE_ZERO:
353 ppc_vzero(gen->f, dst_vec);
354 break;
355 case TGSI_EXTSWIZZLE_ONE:
356 {
357 int one_vec = gen_one_vec(gen);
358 dst_vec = ppc_allocate_vec_register(gen->f);
359 ppc_vmove(gen->f, dst_vec, one_vec);
360 }
361 break;
362 default:
363 assert( 0 );
364 }
365
366 assert(dst_vec >= 0);
367
368 {
369 uint sign_op = tgsi_util_get_full_src_register_sign_mode(reg, chan_index);
370 if (sign_op != TGSI_UTIL_SIGN_KEEP) {
371 int bit31_vec = gen_get_bit31_vec(gen);
372 int dst_vec2;
373
374 if (is_ppc_vec_temporary(reg)) {
375 /* need to use a new temp */
376 dst_vec2 = ppc_allocate_vec_register(gen->f);
377 }
378 else {
379 dst_vec2 = dst_vec;
380 }
381
382 switch (sign_op) {
383 case TGSI_UTIL_SIGN_CLEAR:
384 /* vec = vec & ~bit31 */
385 ppc_vandc(gen->f, dst_vec2, dst_vec, bit31_vec);
386 break;
387 case TGSI_UTIL_SIGN_SET:
388 /* vec = vec | bit31 */
389 ppc_vor(gen->f, dst_vec2, dst_vec, bit31_vec);
390 break;
391 case TGSI_UTIL_SIGN_TOGGLE:
392 /* vec = vec ^ bit31 */
393 ppc_vxor(gen->f, dst_vec2, dst_vec, bit31_vec);
394 break;
395 default:
396 assert(0);
397 }
398 return dst_vec2;
399 }
400 }
401
402 return dst_vec;
403 }
404
405
406
407 /**
408 * Test if two TGSI src registers refer to the same memory location.
409 * We use this to avoid redundant register loads.
410 */
411 static boolean
412 equal_src_locs(const struct tgsi_full_src_register *a, uint chan_a,
413 const struct tgsi_full_src_register *b, uint chan_b)
414 {
415 int swz_a, swz_b;
416 int sign_a, sign_b;
417 if (a->SrcRegister.File != b->SrcRegister.File)
418 return FALSE;
419 if (a->SrcRegister.Index != b->SrcRegister.Index)
420 return FALSE;
421 swz_a = tgsi_util_get_full_src_register_extswizzle(a, chan_a);
422 swz_b = tgsi_util_get_full_src_register_extswizzle(b, chan_b);
423 if (swz_a != swz_b)
424 return FALSE;
425 sign_a = tgsi_util_get_full_src_register_sign_mode(a, chan_a);
426 sign_b = tgsi_util_get_full_src_register_sign_mode(b, chan_b);
427 if (sign_a != sign_b)
428 return FALSE;
429 return TRUE;
430 }
431
432
433 /**
434 * Given a TGSI src register and channel index, return the PPC vector
435 * register containing the value. We use a cache to prevent re-loading
436 * the same register multiple times.
437 * \return index of PPC vector register with the desired src operand
438 */
439 static int
440 get_src_vec(struct gen_context *gen,
441 struct tgsi_full_instruction *inst, int src_reg, uint chan)
442 {
443 const const struct tgsi_full_src_register *src =
444 &inst->FullSrcRegisters[src_reg];
445 int vec;
446 uint i;
447
448 /* check the cache */
449 for (i = 0; i < gen->num_regs; i++) {
450 if (equal_src_locs(&gen->regs[i].src, gen->regs[i].chan, src, chan)) {
451 /* cache hit */
452 assert(gen->regs[i].vec >= 0);
453 return gen->regs[i].vec;
454 }
455 }
456
457 /* cache miss: allocate new vec reg and emit fetch/load code */
458 vec = emit_fetch(gen, src, chan);
459 gen->regs[gen->num_regs].src = *src;
460 gen->regs[gen->num_regs].chan = chan;
461 gen->regs[gen->num_regs].vec = vec;
462 gen->num_regs++;
463
464 assert(gen->num_regs <= Elements(gen->regs));
465
466 assert(vec >= 0);
467
468 return vec;
469 }
470
471
472 /**
473 * Clear the src operand cache. To be called at the end of each emit function.
474 */
475 static void
476 release_src_vecs(struct gen_context *gen)
477 {
478 uint i;
479 for (i = 0; i < gen->num_regs; i++) {
480 const const struct tgsi_full_src_register src = gen->regs[i].src;
481 if (!is_ppc_vec_temporary(&src)) {
482 ppc_release_vec_register(gen->f, gen->regs[i].vec);
483 }
484 }
485 gen->num_regs = 0;
486 }
487
488
489
490 static int
491 get_dst_vec(struct gen_context *gen,
492 const struct tgsi_full_instruction *inst,
493 unsigned chan_index)
494 {
495 const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
496
497 if (is_ppc_vec_temporary_dst(reg)) {
498 int vec = gen->temps_map[reg->DstRegister.Index][chan_index];
499 return vec;
500 }
501 else {
502 return ppc_allocate_vec_register(gen->f);
503 }
504 }
505
506
507 /**
508 * Register store. Store 'src_vec' at location indicated by 'reg'.
509 * \param free_vec Should the src_vec be released when done?
510 */
511 static void
512 emit_store(struct gen_context *gen,
513 int src_vec,
514 const struct tgsi_full_instruction *inst,
515 unsigned chan_index,
516 boolean free_vec)
517 {
518 const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
519
520 switch (reg->DstRegister.File) {
521 case TGSI_FILE_OUTPUT:
522 {
523 int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
524 int offset_reg = emit_li_offset(gen, offset);
525 ppc_stvx(gen->f, src_vec, gen->outputs_reg, offset_reg);
526 }
527 break;
528 case TGSI_FILE_TEMPORARY:
529 if (is_ppc_vec_temporary_dst(reg)) {
530 if (!free_vec) {
531 int dst_vec = gen->temps_map[reg->DstRegister.Index][chan_index];
532 if (dst_vec != src_vec)
533 ppc_vmove(gen->f, dst_vec, src_vec);
534 }
535 free_vec = FALSE;
536 }
537 else {
538 int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
539 int offset_reg = emit_li_offset(gen, offset);
540 ppc_stvx(gen->f, src_vec, gen->temps_reg, offset_reg);
541 }
542 break;
543 #if 0
544 case TGSI_FILE_ADDRESS:
545 emit_addrs(
546 func,
547 xmm,
548 reg->DstRegister.Index,
549 chan_index );
550 break;
551 #endif
552 default:
553 assert( 0 );
554 }
555
556 #if 0
557 switch( inst->Instruction.Saturate ) {
558 case TGSI_SAT_NONE:
559 break;
560
561 case TGSI_SAT_ZERO_ONE:
562 /* assert( 0 ); */
563 break;
564
565 case TGSI_SAT_MINUS_PLUS_ONE:
566 assert( 0 );
567 break;
568 }
569 #endif
570
571 if (free_vec)
572 ppc_release_vec_register(gen->f, src_vec);
573 }
574
575
576 static void
577 emit_scalar_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
578 {
579 int v0, v1;
580 uint chan_index;
581
582 v0 = get_src_vec(gen, inst, 0, CHAN_X);
583 v1 = ppc_allocate_vec_register(gen->f);
584
585 switch (inst->Instruction.Opcode) {
586 case TGSI_OPCODE_RSQ:
587 /* v1 = 1.0 / sqrt(v0) */
588 ppc_vrsqrtefp(gen->f, v1, v0);
589 break;
590 case TGSI_OPCODE_RCP:
591 /* v1 = 1.0 / v0 */
592 ppc_vrefp(gen->f, v1, v0);
593 break;
594 default:
595 assert(0);
596 }
597
598 FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
599 emit_store(gen, v1, inst, chan_index, FALSE);
600 }
601
602 release_src_vecs(gen);
603 ppc_release_vec_register(gen->f, v1);
604 }
605
606
607 static void
608 emit_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
609 {
610 uint chan_index;
611
612 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
613 int v0 = get_src_vec(gen, inst, 0, chan_index); /* v0 = srcreg[0] */
614 int v1 = get_dst_vec(gen, inst, chan_index);
615 switch (inst->Instruction.Opcode) {
616 case TGSI_OPCODE_ABS:
617 /* turn off the most significant bit of each vector float word */
618 {
619 int bit31_vec = gen_get_bit31_vec(gen);
620 ppc_vandc(gen->f, v1, v0, bit31_vec); /* v1 = v0 & ~bit31 */
621 }
622 break;
623 case TGSI_OPCODE_FLR:
624 ppc_vrfim(gen->f, v1, v0); /* v1 = floor(v0) */
625 break;
626 case TGSI_OPCODE_FRC:
627 ppc_vrfim(gen->f, v1, v0); /* tmp = floor(v0) */
628 ppc_vsubfp(gen->f, v1, v0, v1); /* v1 = v0 - v1 */
629 break;
630 case TGSI_OPCODE_EX2:
631 ppc_vexptefp(gen->f, v1, v0); /* v1 = 2^v0 */
632 break;
633 case TGSI_OPCODE_LG2:
634 /* XXX this may be broken! */
635 ppc_vlogefp(gen->f, v1, v0); /* v1 = log2(v0) */
636 break;
637 case TGSI_OPCODE_MOV:
638 case TGSI_OPCODE_SWZ:
639 if (v0 != v1)
640 ppc_vmove(gen->f, v1, v0);
641 break;
642 default:
643 assert(0);
644 }
645 emit_store(gen, v1, inst, chan_index, TRUE); /* store v0 */
646 }
647
648 release_src_vecs(gen);
649 }
650
651
652 static void
653 emit_binop(struct gen_context *gen, struct tgsi_full_instruction *inst)
654 {
655 int zero_vec = -1;
656 uint chan;
657
658 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL) {
659 zero_vec = ppc_allocate_vec_register(gen->f);
660 ppc_vzero(gen->f, zero_vec);
661 }
662
663 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
664 /* fetch src operands */
665 int v0 = get_src_vec(gen, inst, 0, chan);
666 int v1 = get_src_vec(gen, inst, 1, chan);
667 int v2 = get_dst_vec(gen, inst, chan);
668
669 /* emit binop */
670 switch (inst->Instruction.Opcode) {
671 case TGSI_OPCODE_ADD:
672 ppc_vaddfp(gen->f, v2, v0, v1);
673 break;
674 case TGSI_OPCODE_SUB:
675 ppc_vsubfp(gen->f, v2, v0, v1);
676 break;
677 case TGSI_OPCODE_MUL:
678 ppc_vmaddfp(gen->f, v2, v0, v1, zero_vec);
679 break;
680 case TGSI_OPCODE_MIN:
681 ppc_vminfp(gen->f, v2, v0, v1);
682 break;
683 case TGSI_OPCODE_MAX:
684 ppc_vmaxfp(gen->f, v2, v0, v1);
685 break;
686 default:
687 assert(0);
688 }
689
690 /* store v2 */
691 emit_store(gen, v2, inst, chan, TRUE);
692 }
693
694 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL)
695 ppc_release_vec_register(gen->f, zero_vec);
696
697 release_src_vecs(gen);
698 }
699
700
701 static void
702 emit_triop(struct gen_context *gen, struct tgsi_full_instruction *inst)
703 {
704 uint chan;
705
706 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
707 /* fetch src operands */
708 int v0 = get_src_vec(gen, inst, 0, chan);
709 int v1 = get_src_vec(gen, inst, 1, chan);
710 int v2 = get_src_vec(gen, inst, 2, chan);
711 int v3 = get_dst_vec(gen, inst, chan);
712
713 /* emit ALU */
714 switch (inst->Instruction.Opcode) {
715 case TGSI_OPCODE_MAD:
716 ppc_vmaddfp(gen->f, v3, v0, v1, v2); /* v3 = v0 * v1 + v2 */
717 break;
718 case TGSI_OPCODE_LRP:
719 ppc_vsubfp(gen->f, v3, v1, v2); /* v3 = v1 - v2 */
720 ppc_vmaddfp(gen->f, v3, v0, v3, v2); /* v3 = v0 * v3 + v2 */
721 break;
722 default:
723 assert(0);
724 }
725
726 /* store v3 */
727 emit_store(gen, v3, inst, chan, TRUE);
728 }
729
730 release_src_vecs(gen);
731 }
732
733
734 /**
735 * Vector comparisons, resulting in 1.0 or 0.0 values.
736 */
737 static void
738 emit_inequality(struct gen_context *gen, struct tgsi_full_instruction *inst)
739 {
740 uint chan;
741 int one_vec = gen_one_vec(gen);
742
743 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
744 /* fetch src operands */
745 int v0 = get_src_vec(gen, inst, 0, chan);
746 int v1 = get_src_vec(gen, inst, 1, chan);
747 int v2 = get_dst_vec(gen, inst, chan);
748 boolean complement = FALSE;
749
750 switch (inst->Instruction.Opcode) {
751 case TGSI_OPCODE_SNE:
752 complement = TRUE;
753 /* fall-through */
754 case TGSI_OPCODE_SEQ:
755 ppc_vcmpeqfpx(gen->f, v2, v0, v1); /* v2 = v0 == v1 ? ~0 : 0 */
756 break;
757
758 case TGSI_OPCODE_SGE:
759 complement = TRUE;
760 /* fall-through */
761 case TGSI_OPCODE_SLT:
762 ppc_vcmpgtfpx(gen->f, v2, v1, v0); /* v2 = v1 > v0 ? ~0 : 0 */
763 break;
764
765 case TGSI_OPCODE_SLE:
766 complement = TRUE;
767 /* fall-through */
768 case TGSI_OPCODE_SGT:
769 ppc_vcmpgtfpx(gen->f, v2, v0, v1); /* v2 = v0 > v1 ? ~0 : 0 */
770 break;
771 default:
772 assert(0);
773 }
774
775 /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
776
777 if (complement)
778 ppc_vandc(gen->f, v2, one_vec, v2); /* v2 = one_vec & ~v2 */
779 else
780 ppc_vand(gen->f, v2, one_vec, v2); /* v2 = one_vec & v2 */
781
782 /* store v2 */
783 emit_store(gen, v2, inst, chan, TRUE);
784 }
785
786 release_src_vecs(gen);
787 }
788
789
790 static void
791 emit_dotprod(struct gen_context *gen, struct tgsi_full_instruction *inst)
792 {
793 int v0, v1, v2;
794 uint chan_index;
795
796 v2 = ppc_allocate_vec_register(gen->f);
797
798 ppc_vzero(gen->f, v2); /* v2 = {0, 0, 0, 0} */
799
800 v0 = get_src_vec(gen, inst, 0, CHAN_X); /* v0 = src0.XXXX */
801 v1 = get_src_vec(gen, inst, 1, CHAN_X); /* v1 = src1.XXXX */
802 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
803
804 v0 = get_src_vec(gen, inst, 0, CHAN_Y); /* v0 = src0.YYYY */
805 v1 = get_src_vec(gen, inst, 1, CHAN_Y); /* v1 = src1.YYYY */
806 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
807
808 v0 = get_src_vec(gen, inst, 0, CHAN_Z); /* v0 = src0.ZZZZ */
809 v1 = get_src_vec(gen, inst, 1, CHAN_Z); /* v1 = src1.ZZZZ */
810 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
811
812 if (inst->Instruction.Opcode == TGSI_OPCODE_DP4) {
813 v0 = get_src_vec(gen, inst, 0, CHAN_W); /* v0 = src0.WWWW */
814 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
815 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
816 }
817 else if (inst->Instruction.Opcode == TGSI_OPCODE_DPH) {
818 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
819 ppc_vaddfp(gen->f, v2, v2, v1); /* v2 = v2 + v1 */
820 }
821
822 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
823 emit_store(gen, v2, inst, chan_index, FALSE); /* store v2, free v2 later */
824 }
825
826 release_src_vecs(gen);
827
828 ppc_release_vec_register(gen->f, v2);
829 }
830
831
832 /** Approximation for vr = pow(va, vb) */
833 static void
834 ppc_vec_pow(struct ppc_function *f, int vr, int va, int vb)
835 {
836 /* pow(a,b) ~= exp2(log2(a) * b) */
837 int t_vec = ppc_allocate_vec_register(f);
838 int zero_vec = ppc_allocate_vec_register(f);
839
840 ppc_vzero(f, zero_vec);
841
842 ppc_vlogefp(f, t_vec, va); /* t = log2(va) */
843 ppc_vmaddfp(f, t_vec, t_vec, vb, zero_vec); /* t = t * vb + zero */
844 ppc_vexptefp(f, vr, t_vec); /* vr = 2^t */
845
846 ppc_release_vec_register(f, t_vec);
847 ppc_release_vec_register(f, zero_vec);
848 }
849
850
851 static void
852 emit_lit(struct gen_context *gen, struct tgsi_full_instruction *inst)
853 {
854 int one_vec = gen_one_vec(gen);
855
856 /* Compute X */
857 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
858 emit_store(gen, one_vec, inst, CHAN_X, FALSE);
859 }
860
861 /* Compute Y, Z */
862 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
863 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
864 int x_vec;
865 int zero_vec = ppc_allocate_vec_register(gen->f);
866
867 x_vec = get_src_vec(gen, inst, 0, CHAN_X); /* x_vec = src[0].x */
868
869 ppc_vzero(gen->f, zero_vec); /* zero = {0,0,0,0} */
870 ppc_vmaxfp(gen->f, x_vec, x_vec, zero_vec); /* x_vec = max(x_vec, 0) */
871
872 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
873 emit_store(gen, x_vec, inst, CHAN_Y, FALSE);
874 }
875
876 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
877 int y_vec, w_vec;
878 int z_vec = ppc_allocate_vec_register(gen->f);
879 int pow_vec = ppc_allocate_vec_register(gen->f);
880 int pos_vec = ppc_allocate_vec_register(gen->f);
881 int p128_vec = ppc_allocate_vec_register(gen->f);
882 int n128_vec = ppc_allocate_vec_register(gen->f);
883
884 y_vec = get_src_vec(gen, inst, 0, CHAN_Y); /* y_vec = src[0].y */
885 ppc_vmaxfp(gen->f, y_vec, y_vec, zero_vec); /* y_vec = max(y_vec, 0) */
886
887 w_vec = get_src_vec(gen, inst, 0, CHAN_W); /* w_vec = src[0].w */
888
889 /* clamp W to [-128, 128] */
890 load_constant_vec(gen, p128_vec, 128.0f);
891 load_constant_vec(gen, n128_vec, -128.0f);
892 ppc_vmaxfp(gen->f, w_vec, w_vec, n128_vec); /* w = max(w, -128) */
893 ppc_vminfp(gen->f, w_vec, w_vec, p128_vec); /* w = min(w, 128) */
894
895 /* if temp.x > 0
896 * z = pow(tmp.y, tmp.w)
897 * else
898 * z = 0.0
899 */
900 ppc_vec_pow(gen->f, pow_vec, y_vec, w_vec); /* pow = pow(y, w) */
901 ppc_vcmpgtfpx(gen->f, pos_vec, x_vec, zero_vec); /* pos = x > 0 */
902 ppc_vand(gen->f, z_vec, pow_vec, pos_vec); /* z = pow & pos */
903
904 emit_store(gen, z_vec, inst, CHAN_Z, FALSE);
905
906 ppc_release_vec_register(gen->f, z_vec);
907 ppc_release_vec_register(gen->f, pow_vec);
908 ppc_release_vec_register(gen->f, pos_vec);
909 ppc_release_vec_register(gen->f, p128_vec);
910 ppc_release_vec_register(gen->f, n128_vec);
911 }
912
913 ppc_release_vec_register(gen->f, zero_vec);
914 }
915
916 /* Compute W */
917 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
918 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
919 }
920
921 release_src_vecs(gen);
922 }
923
924
925 static void
926 emit_exp(struct gen_context *gen, struct tgsi_full_instruction *inst)
927 {
928 const int one_vec = gen_one_vec(gen);
929 int src_vec;
930
931 /* get src arg */
932 src_vec = get_src_vec(gen, inst, 0, CHAN_X);
933
934 /* Compute X = 2^floor(src) */
935 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
936 int dst_vec = get_dst_vec(gen, inst, CHAN_X);
937 int tmp_vec = ppc_allocate_vec_register(gen->f);
938 ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
939 ppc_vexptefp(gen->f, dst_vec, tmp_vec); /* dst = 2 ^ tmp */
940 emit_store(gen, dst_vec, inst, CHAN_X, TRUE);
941 ppc_release_vec_register(gen->f, tmp_vec);
942 }
943
944 /* Compute Y = src - floor(src) */
945 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
946 int dst_vec = get_dst_vec(gen, inst, CHAN_Y);
947 int tmp_vec = ppc_allocate_vec_register(gen->f);
948 ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
949 ppc_vsubfp(gen->f, dst_vec, src_vec, tmp_vec); /* dst = src - tmp */
950 emit_store(gen, dst_vec, inst, CHAN_Y, TRUE);
951 ppc_release_vec_register(gen->f, tmp_vec);
952 }
953
954 /* Compute Z = RoughApprox2ToX(src) */
955 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
956 int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
957 ppc_vexptefp(gen->f, dst_vec, src_vec); /* dst = 2 ^ src */
958 emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
959 }
960
961 /* Compute W = 1.0 */
962 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
963 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
964 }
965
966 release_src_vecs(gen);
967 }
968
969
970 static void
971 emit_log(struct gen_context *gen, struct tgsi_full_instruction *inst)
972 {
973 const int bit31_vec = gen_get_bit31_vec(gen);
974 const int one_vec = gen_one_vec(gen);
975 int src_vec, abs_vec;
976
977 /* get src arg */
978 src_vec = get_src_vec(gen, inst, 0, CHAN_X);
979
980 /* compute abs(src) */
981 abs_vec = ppc_allocate_vec_register(gen->f);
982 ppc_vandc(gen->f, abs_vec, src_vec, bit31_vec); /* abs = src & ~bit31 */
983
984 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) &&
985 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
986
987 /* compute tmp = floor(log2(abs)) */
988 int tmp_vec = ppc_allocate_vec_register(gen->f);
989 ppc_vlogefp(gen->f, tmp_vec, abs_vec); /* tmp = log2(abs) */
990 ppc_vrfim(gen->f, tmp_vec, tmp_vec); /* tmp = floor(tmp); */
991
992 /* Compute X = tmp */
993 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
994 emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
995 }
996
997 /* Compute Y = abs / 2^tmp */
998 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
999 const int zero_vec = ppc_allocate_vec_register(gen->f);
1000 ppc_vzero(gen->f, zero_vec);
1001 ppc_vexptefp(gen->f, tmp_vec, tmp_vec); /* tmp = 2 ^ tmp */
1002 ppc_vrefp(gen->f, tmp_vec, tmp_vec); /* tmp = 1 / tmp */
1003 /* tmp = abs * tmp + zero */
1004 ppc_vmaddfp(gen->f, tmp_vec, abs_vec, tmp_vec, zero_vec);
1005 emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
1006 ppc_release_vec_register(gen->f, zero_vec);
1007 }
1008
1009 ppc_release_vec_register(gen->f, tmp_vec);
1010 }
1011
1012 /* Compute Z = RoughApproxLog2(abs) */
1013 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1014 int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
1015 ppc_vlogefp(gen->f, dst_vec, abs_vec); /* dst = log2(abs) */
1016 emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
1017 }
1018
1019 /* Compute W = 1.0 */
1020 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
1021 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
1022 }
1023
1024 ppc_release_vec_register(gen->f, abs_vec);
1025 release_src_vecs(gen);
1026 }
1027
1028
1029 static void
1030 emit_pow(struct gen_context *gen, struct tgsi_full_instruction *inst)
1031 {
1032 int s0_vec = get_src_vec(gen, inst, 0, CHAN_X);
1033 int s1_vec = get_src_vec(gen, inst, 1, CHAN_X);
1034 int pow_vec = ppc_allocate_vec_register(gen->f);
1035 int chan;
1036
1037 ppc_vec_pow(gen->f, pow_vec, s0_vec, s1_vec);
1038
1039 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
1040 emit_store(gen, pow_vec, inst, chan, FALSE);
1041 }
1042
1043 ppc_release_vec_register(gen->f, pow_vec);
1044
1045 release_src_vecs(gen);
1046 }
1047
1048
1049 static void
1050 emit_xpd(struct gen_context *gen, struct tgsi_full_instruction *inst)
1051 {
1052 int x0_vec, y0_vec, z0_vec;
1053 int x1_vec, y1_vec, z1_vec;
1054 int zero_vec, tmp_vec;
1055 int tmp2_vec;
1056
1057 zero_vec = ppc_allocate_vec_register(gen->f);
1058 ppc_vzero(gen->f, zero_vec);
1059
1060 tmp_vec = ppc_allocate_vec_register(gen->f);
1061 tmp2_vec = ppc_allocate_vec_register(gen->f);
1062
1063 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
1064 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1065 x0_vec = get_src_vec(gen, inst, 0, CHAN_X);
1066 x1_vec = get_src_vec(gen, inst, 1, CHAN_X);
1067 }
1068 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
1069 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1070 y0_vec = get_src_vec(gen, inst, 0, CHAN_Y);
1071 y1_vec = get_src_vec(gen, inst, 1, CHAN_Y);
1072 }
1073 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
1074 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
1075 z0_vec = get_src_vec(gen, inst, 0, CHAN_Z);
1076 z1_vec = get_src_vec(gen, inst, 1, CHAN_Z);
1077 }
1078
1079 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) {
1080 /* tmp = y0 * z1 */
1081 ppc_vmaddfp(gen->f, tmp_vec, y0_vec, z1_vec, zero_vec);
1082 /* tmp = tmp - z0 * y1*/
1083 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, z0_vec, y1_vec);
1084 emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
1085 }
1086 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) {
1087 /* tmp = z0 * x1 */
1088 ppc_vmaddfp(gen->f, tmp_vec, z0_vec, x1_vec, zero_vec);
1089 /* tmp = tmp - x0 * z1 */
1090 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, x0_vec, z1_vec);
1091 emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
1092 }
1093 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z) {
1094 /* tmp = x0 * y1 */
1095 ppc_vmaddfp(gen->f, tmp_vec, x0_vec, y1_vec, zero_vec);
1096 /* tmp = tmp - y0 * x1 */
1097 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, y0_vec, x1_vec);
1098 emit_store(gen, tmp_vec, inst, CHAN_Z, FALSE);
1099 }
1100 /* W is undefined */
1101
1102 ppc_release_vec_register(gen->f, tmp_vec);
1103 ppc_release_vec_register(gen->f, zero_vec);
1104 release_src_vecs(gen);
1105 }
1106
1107 static int
1108 emit_instruction(struct gen_context *gen,
1109 struct tgsi_full_instruction *inst)
1110 {
1111
1112 /* we don't handle saturation/clamping yet */
1113 if (inst->Instruction.Saturate != TGSI_SAT_NONE)
1114 return 0;
1115
1116 /* need to use extra temps to fix SOA dependencies : */
1117 if (tgsi_check_soa_dependencies(inst))
1118 return FALSE;
1119
1120 switch (inst->Instruction.Opcode) {
1121 case TGSI_OPCODE_MOV:
1122 case TGSI_OPCODE_SWZ:
1123 case TGSI_OPCODE_ABS:
1124 case TGSI_OPCODE_FLR:
1125 case TGSI_OPCODE_FRC:
1126 case TGSI_OPCODE_EX2:
1127 case TGSI_OPCODE_LG2:
1128 emit_unaryop(gen, inst);
1129 break;
1130 case TGSI_OPCODE_RSQ:
1131 case TGSI_OPCODE_RCP:
1132 emit_scalar_unaryop(gen, inst);
1133 break;
1134 case TGSI_OPCODE_ADD:
1135 case TGSI_OPCODE_SUB:
1136 case TGSI_OPCODE_MUL:
1137 case TGSI_OPCODE_MIN:
1138 case TGSI_OPCODE_MAX:
1139 emit_binop(gen, inst);
1140 break;
1141 case TGSI_OPCODE_SEQ:
1142 case TGSI_OPCODE_SNE:
1143 case TGSI_OPCODE_SLT:
1144 case TGSI_OPCODE_SGT:
1145 case TGSI_OPCODE_SLE:
1146 case TGSI_OPCODE_SGE:
1147 emit_inequality(gen, inst);
1148 break;
1149 case TGSI_OPCODE_MAD:
1150 case TGSI_OPCODE_LRP:
1151 emit_triop(gen, inst);
1152 break;
1153 case TGSI_OPCODE_DP3:
1154 case TGSI_OPCODE_DP4:
1155 case TGSI_OPCODE_DPH:
1156 emit_dotprod(gen, inst);
1157 break;
1158 case TGSI_OPCODE_LIT:
1159 emit_lit(gen, inst);
1160 break;
1161 case TGSI_OPCODE_LOG:
1162 emit_log(gen, inst);
1163 break;
1164 case TGSI_OPCODE_EXP:
1165 emit_exp(gen, inst);
1166 break;
1167 case TGSI_OPCODE_POW:
1168 emit_pow(gen, inst);
1169 break;
1170 case TGSI_OPCODE_XPD:
1171 emit_xpd(gen, inst);
1172 break;
1173 case TGSI_OPCODE_END:
1174 /* normal end */
1175 return 1;
1176 default:
1177 return 0;
1178 }
1179 return 1;
1180 }
1181
1182
1183 static void
1184 emit_declaration(
1185 struct ppc_function *func,
1186 struct tgsi_full_declaration *decl )
1187 {
1188 if( decl->Declaration.File == TGSI_FILE_INPUT ) {
1189 #if 0
1190 unsigned first, last, mask;
1191 unsigned i, j;
1192
1193 first = decl->DeclarationRange.First;
1194 last = decl->DeclarationRange.Last;
1195 mask = decl->Declaration.UsageMask;
1196
1197 for( i = first; i <= last; i++ ) {
1198 for( j = 0; j < NUM_CHANNELS; j++ ) {
1199 if( mask & (1 << j) ) {
1200 switch( decl->Declaration.Interpolate ) {
1201 case TGSI_INTERPOLATE_CONSTANT:
1202 emit_coef_a0( func, 0, i, j );
1203 emit_inputs( func, 0, i, j );
1204 break;
1205
1206 case TGSI_INTERPOLATE_LINEAR:
1207 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
1208 emit_coef_dadx( func, 1, i, j );
1209 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
1210 emit_coef_dady( func, 3, i, j );
1211 emit_mul( func, 0, 1 ); /* x * dadx */
1212 emit_coef_a0( func, 4, i, j );
1213 emit_mul( func, 2, 3 ); /* y * dady */
1214 emit_add( func, 0, 4 ); /* x * dadx + a0 */
1215 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
1216 emit_inputs( func, 0, i, j );
1217 break;
1218
1219 case TGSI_INTERPOLATE_PERSPECTIVE:
1220 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
1221 emit_coef_dadx( func, 1, i, j );
1222 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
1223 emit_coef_dady( func, 3, i, j );
1224 emit_mul( func, 0, 1 ); /* x * dadx */
1225 emit_tempf( func, 4, 0, TGSI_SWIZZLE_W );
1226 emit_coef_a0( func, 5, i, j );
1227 emit_rcp( func, 4, 4 ); /* 1.0 / w */
1228 emit_mul( func, 2, 3 ); /* y * dady */
1229 emit_add( func, 0, 5 ); /* x * dadx + a0 */
1230 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
1231 emit_mul( func, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
1232 emit_inputs( func, 0, i, j );
1233 break;
1234
1235 default:
1236 assert( 0 );
1237 break;
1238 }
1239 }
1240 }
1241 }
1242 #endif
1243 }
1244 }
1245
1246
1247
1248 static void
1249 emit_prologue(struct ppc_function *func)
1250 {
1251 /* XXX set up stack frame */
1252 }
1253
1254
1255 static void
1256 emit_epilogue(struct ppc_function *func)
1257 {
1258 ppc_comment(func, -4, "Epilogue:");
1259 ppc_return(func);
1260 /* XXX restore prev stack frame */
1261 #if 0
1262 debug_printf("PPC: Emitted %u instructions\n", func->num_inst);
1263 #endif
1264 }
1265
1266
1267
1268 /**
1269 * Translate a TGSI vertex/fragment shader to PPC code.
1270 *
1271 * \param tokens the TGSI input shader
1272 * \param func the output PPC code/function
1273 * \param immediates buffer to place immediates, later passed to PPC func
1274 * \return TRUE for success, FALSE if translation failed
1275 */
1276 boolean
1277 tgsi_emit_ppc(const struct tgsi_token *tokens,
1278 struct ppc_function *func,
1279 float (*immediates)[4],
1280 boolean do_swizzles )
1281 {
1282 static int use_ppc_asm = -1;
1283 struct tgsi_parse_context parse;
1284 /*boolean instruction_phase = FALSE;*/
1285 unsigned ok = 1;
1286 uint num_immediates = 0;
1287 struct gen_context gen;
1288 uint ic = 0;
1289
1290 if (use_ppc_asm < 0) {
1291 /* If GALLIUM_NOPPC is set, don't use PPC codegen */
1292 use_ppc_asm = !debug_get_bool_option("GALLIUM_NOPPC", FALSE);
1293 }
1294 if (!use_ppc_asm)
1295 return FALSE;
1296
1297 if (0) {
1298 debug_printf("\n********* TGSI->PPC ********\n");
1299 tgsi_dump(tokens, 0);
1300 }
1301
1302 util_init_math();
1303
1304 init_gen_context(&gen, func);
1305
1306 emit_prologue(func);
1307
1308 tgsi_parse_init( &parse, tokens );
1309
1310 while (!tgsi_parse_end_of_tokens(&parse) && ok) {
1311 tgsi_parse_token(&parse);
1312
1313 switch (parse.FullToken.Token.Type) {
1314 case TGSI_TOKEN_TYPE_DECLARATION:
1315 if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
1316 emit_declaration(func, &parse.FullToken.FullDeclaration );
1317 }
1318 break;
1319
1320 case TGSI_TOKEN_TYPE_INSTRUCTION:
1321 if (func->print) {
1322 _debug_printf("# ");
1323 ic++;
1324 tgsi_dump_instruction(&parse.FullToken.FullInstruction, ic);
1325 }
1326
1327 ok = emit_instruction(&gen, &parse.FullToken.FullInstruction);
1328
1329 if (!ok) {
1330 uint opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
1331 debug_printf("failed to translate tgsi opcode %d (%s) to PPC (%s)\n",
1332 opcode,
1333 tgsi_get_opcode_name(opcode),
1334 parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX ?
1335 "vertex shader" : "fragment shader");
1336 }
1337 break;
1338
1339 case TGSI_TOKEN_TYPE_IMMEDIATE:
1340 /* splat each immediate component into a float[4] vector for SoA */
1341 {
1342 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
1343 uint i;
1344 assert(size <= 4);
1345 assert(num_immediates < TGSI_EXEC_NUM_IMMEDIATES);
1346 for (i = 0; i < size; i++) {
1347 immediates[num_immediates][i] =
1348 parse.FullToken.FullImmediate.u[i].Float;
1349 }
1350 num_immediates++;
1351 }
1352 break;
1353
1354 default:
1355 ok = 0;
1356 assert( 0 );
1357 }
1358 }
1359
1360 emit_epilogue(func);
1361
1362 tgsi_parse_free( &parse );
1363
1364 if (ppc_num_instructions(func) == 0) {
1365 /* ran out of memory for instructions */
1366 ok = FALSE;
1367 }
1368
1369 if (!ok)
1370 debug_printf("TGSI->PPC translation failed\n");
1371
1372 return ok;
1373 }
1374
1375 #endif /* PIPE_ARCH_PPC */