1 /**************************************************************************
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * TGSI to PowerPC code generation.
32 #include "pipe/p_config.h"
34 #if defined(PIPE_ARCH_PPC)
36 #include "util/u_debug.h"
37 #include "pipe/p_shader_tokens.h"
38 #include "util/u_math.h"
39 #include "util/u_memory.h"
40 #include "util/u_sse.h"
41 #include "tgsi/tgsi_info.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi_dump.h"
45 #include "tgsi_exec.h"
47 #include "rtasm/rtasm_ppc.h"
51 * Since it's pretty much impossible to form PPC vector immediates, load
52 * them from memory here:
54 PIPE_ALIGN_VAR(16) const float
55 ppc_builtin_constants
[] = {
56 1.0f
, -128.0f
, 128.0, 0.0
60 #define FOR_EACH_CHANNEL( CHAN )\
61 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
63 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
64 ((INST).Dst[0].Register.WriteMask & (1 << (CHAN)))
66 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
67 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
69 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
70 FOR_EACH_CHANNEL( CHAN )\
71 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
80 * How many TGSI temps should be implemented with real PPC vector registers
83 #define MAX_PPC_TEMPS 3
87 * Context/state used during code gen.
91 struct ppc_function
*f
;
92 int inputs_reg
; /**< GP register pointing to input params */
93 int outputs_reg
; /**< GP register pointing to output params */
94 int temps_reg
; /**< GP register pointing to temporary "registers" */
95 int immed_reg
; /**< GP register pointing to immediates buffer */
96 int const_reg
; /**< GP register pointing to constants buffer */
97 int builtins_reg
; /**< GP register pointint to built-in constants */
99 int offset_reg
; /**< used to reduce redundant li instructions */
102 int one_vec
; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
103 int bit31_vec
; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
106 * Map TGSI temps to PPC vector temps.
107 * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
108 * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
110 int temps_map
[MAX_PPC_TEMPS
][4];
113 * Cache of src registers.
114 * This is used to avoid redundant load instructions.
117 struct tgsi_full_src_register src
;
120 } regs
[12]; /* 3 src regs, 4 channels */
126 * Initialize code generation context.
129 init_gen_context(struct gen_context
*gen
, struct ppc_function
*func
)
133 memset(gen
, 0, sizeof(*gen
));
135 gen
->inputs_reg
= ppc_reserve_register(func
, 3); /* first function param */
136 gen
->outputs_reg
= ppc_reserve_register(func
, 4); /* second function param */
137 gen
->temps_reg
= ppc_reserve_register(func
, 5); /* ... */
138 gen
->immed_reg
= ppc_reserve_register(func
, 6);
139 gen
->const_reg
= ppc_reserve_register(func
, 7);
140 gen
->builtins_reg
= ppc_reserve_register(func
, 8);
143 gen
->offset_reg
= -1;
144 gen
->offset_value
= -9999999;
145 for (i
= 0; i
< MAX_PPC_TEMPS
; i
++) {
146 gen
->temps_map
[i
][0] = ppc_allocate_vec_register(gen
->f
);
147 gen
->temps_map
[i
][1] = ppc_allocate_vec_register(gen
->f
);
148 gen
->temps_map
[i
][2] = ppc_allocate_vec_register(gen
->f
);
149 gen
->temps_map
[i
][3] = ppc_allocate_vec_register(gen
->f
);
155 * Is the given TGSI register stored as a real PPC vector register?
158 is_ppc_vec_temporary(const struct tgsi_full_src_register
*reg
)
160 return (reg
->Register
.File
== TGSI_FILE_TEMPORARY
&&
161 reg
->Register
.Index
< MAX_PPC_TEMPS
);
166 * Is the given TGSI register stored as a real PPC vector register?
169 is_ppc_vec_temporary_dst(const struct tgsi_full_dst_register
*reg
)
171 return (reg
->Register
.File
== TGSI_FILE_TEMPORARY
&&
172 reg
->Register
.Index
< MAX_PPC_TEMPS
);
178 * All PPC vector load/store instructions form an effective address
179 * by adding the contents of two registers. For example:
180 * lvx v2,r8,r9 # v2 = memory[r8 + r9]
181 * stvx v2,r8,r9 # memory[r8 + r9] = v2;
182 * So our lvx/stvx instructions are typically preceded by an 'li' instruction
183 * to load r9 (above) with an immediate (an offset).
184 * This code emits that 'li' instruction, but only if the offset value is
185 * different than the previous 'li'.
186 * This optimization seems to save about 10% in the instruction count.
187 * Note that we need to unconditionally emit an 'li' inside basic blocks
188 * (such as inside loops).
191 emit_li_offset(struct gen_context
*gen
, int offset
)
193 if (gen
->offset_reg
<= 0) {
194 /* allocate a GP register for storing load/store offset */
195 gen
->offset_reg
= ppc_allocate_register(gen
->f
);
198 /* emit new 'li' if offset is changing */
199 if (gen
->offset_value
< 0 || gen
->offset_value
!= offset
) {
200 gen
->offset_value
= offset
;
201 ppc_li(gen
->f
, gen
->offset_reg
, offset
);
204 return gen
->offset_reg
;
209 * Forces subsequent emit_li_offset() calls to emit an 'li'.
210 * To be called at the top of basic blocks.
213 reset_li_offset(struct gen_context
*gen
)
215 gen
->offset_value
= -9999999;
221 * Load the given vector register with {value, value, value, value}.
222 * The value must be in the ppu_builtin_constants[] array.
223 * We wouldn't need this if there was a simple way to load PPC vector
224 * registers with immediate values!
227 load_constant_vec(struct gen_context
*gen
, int dst_vec
, float value
)
230 for (pos
= 0; pos
< Elements(ppc_builtin_constants
); pos
++) {
231 if (ppc_builtin_constants
[pos
] == value
) {
232 int offset
= pos
* 4;
233 int offset_reg
= emit_li_offset(gen
, offset
);
235 /* Load 4-byte word into vector register.
236 * The vector slot depends on the effective address we load from.
237 * We know that our builtins start at a 16-byte boundary so we
238 * know that 'swizzle' tells us which vector slot will have the
239 * loaded word. The other vector slots will be undefined.
241 ppc_lvewx(gen
->f
, dst_vec
, gen
->builtins_reg
, offset_reg
);
242 /* splat word[pos % 4] across the vector reg */
243 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, pos
% 4);
247 assert(0 && "Need to add new constant to ppc_builtin_constants array");
252 * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
255 gen_one_vec(struct gen_context
*gen
)
257 if (gen
->one_vec
< 0) {
258 gen
->one_vec
= ppc_allocate_vec_register(gen
->f
);
259 load_constant_vec(gen
, gen
->one_vec
, 1.0f
);
265 * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
268 gen_get_bit31_vec(struct gen_context
*gen
)
270 if (gen
->bit31_vec
< 0) {
271 gen
->bit31_vec
= ppc_allocate_vec_register(gen
->f
);
272 ppc_vspltisw(gen
->f
, gen
->bit31_vec
, -1);
273 ppc_vslw(gen
->f
, gen
->bit31_vec
, gen
->bit31_vec
, gen
->bit31_vec
);
275 return gen
->bit31_vec
;
280 * Register fetch. Return PPC vector register with result.
283 emit_fetch(struct gen_context
*gen
,
284 const struct tgsi_full_src_register
*reg
,
285 const unsigned chan_index
)
287 uint swizzle
= tgsi_util_get_full_src_register_swizzle(reg
, chan_index
);
295 switch (reg
->Register
.File
) {
296 case TGSI_FILE_INPUT
:
297 case TGSI_FILE_SYSTEM_VALUE
:
299 int offset
= (reg
->Register
.Index
* 4 + swizzle
) * 16;
300 int offset_reg
= emit_li_offset(gen
, offset
);
301 dst_vec
= ppc_allocate_vec_register(gen
->f
);
302 ppc_lvx(gen
->f
, dst_vec
, gen
->inputs_reg
, offset_reg
);
305 case TGSI_FILE_TEMPORARY
:
306 if (is_ppc_vec_temporary(reg
)) {
307 /* use PPC vec register */
308 dst_vec
= gen
->temps_map
[reg
->Register
.Index
][swizzle
];
311 /* use memory-based temp register "file" */
312 int offset
= (reg
->Register
.Index
* 4 + swizzle
) * 16;
313 int offset_reg
= emit_li_offset(gen
, offset
);
314 dst_vec
= ppc_allocate_vec_register(gen
->f
);
315 ppc_lvx(gen
->f
, dst_vec
, gen
->temps_reg
, offset_reg
);
318 case TGSI_FILE_IMMEDIATE
:
320 int offset
= (reg
->Register
.Index
* 4 + swizzle
) * 4;
321 int offset_reg
= emit_li_offset(gen
, offset
);
322 dst_vec
= ppc_allocate_vec_register(gen
->f
);
323 /* Load 4-byte word into vector register.
324 * The vector slot depends on the effective address we load from.
325 * We know that our immediates start at a 16-byte boundary so we
326 * know that 'swizzle' tells us which vector slot will have the
327 * loaded word. The other vector slots will be undefined.
329 ppc_lvewx(gen
->f
, dst_vec
, gen
->immed_reg
, offset_reg
);
330 /* splat word[swizzle] across the vector reg */
331 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, swizzle
);
334 case TGSI_FILE_CONSTANT
:
336 int offset
= (reg
->Register
.Index
* 4 + swizzle
) * 4;
337 int offset_reg
= emit_li_offset(gen
, offset
);
338 dst_vec
= ppc_allocate_vec_register(gen
->f
);
339 /* Load 4-byte word into vector register.
340 * The vector slot depends on the effective address we load from.
341 * We know that our constants start at a 16-byte boundary so we
342 * know that 'swizzle' tells us which vector slot will have the
343 * loaded word. The other vector slots will be undefined.
345 ppc_lvewx(gen
->f
, dst_vec
, gen
->const_reg
, offset_reg
);
346 /* splat word[swizzle] across the vector reg */
347 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, swizzle
);
358 assert(dst_vec
>= 0);
361 uint sign_op
= tgsi_util_get_full_src_register_sign_mode(reg
, chan_index
);
362 if (sign_op
!= TGSI_UTIL_SIGN_KEEP
) {
363 int bit31_vec
= gen_get_bit31_vec(gen
);
366 if (is_ppc_vec_temporary(reg
)) {
367 /* need to use a new temp */
368 dst_vec2
= ppc_allocate_vec_register(gen
->f
);
375 case TGSI_UTIL_SIGN_CLEAR
:
376 /* vec = vec & ~bit31 */
377 ppc_vandc(gen
->f
, dst_vec2
, dst_vec
, bit31_vec
);
379 case TGSI_UTIL_SIGN_SET
:
380 /* vec = vec | bit31 */
381 ppc_vor(gen
->f
, dst_vec2
, dst_vec
, bit31_vec
);
383 case TGSI_UTIL_SIGN_TOGGLE
:
384 /* vec = vec ^ bit31 */
385 ppc_vxor(gen
->f
, dst_vec2
, dst_vec
, bit31_vec
);
400 * Test if two TGSI src registers refer to the same memory location.
401 * We use this to avoid redundant register loads.
404 equal_src_locs(const struct tgsi_full_src_register
*a
, uint chan_a
,
405 const struct tgsi_full_src_register
*b
, uint chan_b
)
409 if (a
->Register
.File
!= b
->Register
.File
)
411 if (a
->Register
.Index
!= b
->Register
.Index
)
413 swz_a
= tgsi_util_get_full_src_register_swizzle(a
, chan_a
);
414 swz_b
= tgsi_util_get_full_src_register_swizzle(b
, chan_b
);
417 sign_a
= tgsi_util_get_full_src_register_sign_mode(a
, chan_a
);
418 sign_b
= tgsi_util_get_full_src_register_sign_mode(b
, chan_b
);
419 if (sign_a
!= sign_b
)
426 * Given a TGSI src register and channel index, return the PPC vector
427 * register containing the value. We use a cache to prevent re-loading
428 * the same register multiple times.
429 * \return index of PPC vector register with the desired src operand
432 get_src_vec(struct gen_context
*gen
,
433 struct tgsi_full_instruction
*inst
, int src_reg
, uint chan
)
435 const const struct tgsi_full_src_register
*src
=
440 /* check the cache */
441 for (i
= 0; i
< gen
->num_regs
; i
++) {
442 if (equal_src_locs(&gen
->regs
[i
].src
, gen
->regs
[i
].chan
, src
, chan
)) {
444 assert(gen
->regs
[i
].vec
>= 0);
445 return gen
->regs
[i
].vec
;
449 /* cache miss: allocate new vec reg and emit fetch/load code */
450 vec
= emit_fetch(gen
, src
, chan
);
451 gen
->regs
[gen
->num_regs
].src
= *src
;
452 gen
->regs
[gen
->num_regs
].chan
= chan
;
453 gen
->regs
[gen
->num_regs
].vec
= vec
;
456 assert(gen
->num_regs
<= Elements(gen
->regs
));
465 * Clear the src operand cache. To be called at the end of each emit function.
468 release_src_vecs(struct gen_context
*gen
)
471 for (i
= 0; i
< gen
->num_regs
; i
++) {
472 const const struct tgsi_full_src_register src
= gen
->regs
[i
].src
;
473 if (!is_ppc_vec_temporary(&src
)) {
474 ppc_release_vec_register(gen
->f
, gen
->regs
[i
].vec
);
483 get_dst_vec(struct gen_context
*gen
,
484 const struct tgsi_full_instruction
*inst
,
487 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[0];
489 if (is_ppc_vec_temporary_dst(reg
)) {
490 int vec
= gen
->temps_map
[reg
->Register
.Index
][chan_index
];
494 return ppc_allocate_vec_register(gen
->f
);
500 * Register store. Store 'src_vec' at location indicated by 'reg'.
501 * \param free_vec Should the src_vec be released when done?
504 emit_store(struct gen_context
*gen
,
506 const struct tgsi_full_instruction
*inst
,
510 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[0];
512 switch (reg
->Register
.File
) {
513 case TGSI_FILE_OUTPUT
:
515 int offset
= (reg
->Register
.Index
* 4 + chan_index
) * 16;
516 int offset_reg
= emit_li_offset(gen
, offset
);
517 ppc_stvx(gen
->f
, src_vec
, gen
->outputs_reg
, offset_reg
);
520 case TGSI_FILE_TEMPORARY
:
521 if (is_ppc_vec_temporary_dst(reg
)) {
523 int dst_vec
= gen
->temps_map
[reg
->Register
.Index
][chan_index
];
524 if (dst_vec
!= src_vec
)
525 ppc_vmove(gen
->f
, dst_vec
, src_vec
);
530 int offset
= (reg
->Register
.Index
* 4 + chan_index
) * 16;
531 int offset_reg
= emit_li_offset(gen
, offset
);
532 ppc_stvx(gen
->f
, src_vec
, gen
->temps_reg
, offset_reg
);
536 case TGSI_FILE_ADDRESS
:
549 switch( inst
->Instruction
.Saturate
) {
553 case TGSI_SAT_ZERO_ONE
:
557 case TGSI_SAT_MINUS_PLUS_ONE
:
564 ppc_release_vec_register(gen
->f
, src_vec
);
569 emit_scalar_unaryop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
574 v0
= get_src_vec(gen
, inst
, 0, CHAN_X
);
575 v1
= ppc_allocate_vec_register(gen
->f
);
577 switch (inst
->Instruction
.Opcode
) {
578 case TGSI_OPCODE_RSQ
:
579 /* v1 = 1.0 / sqrt(v0) */
580 ppc_vrsqrtefp(gen
->f
, v1
, v0
);
582 case TGSI_OPCODE_RCP
:
584 ppc_vrefp(gen
->f
, v1
, v0
);
590 FOR_EACH_DST0_ENABLED_CHANNEL( *inst
, chan_index
) {
591 emit_store(gen
, v1
, inst
, chan_index
, FALSE
);
594 release_src_vecs(gen
);
595 ppc_release_vec_register(gen
->f
, v1
);
600 emit_unaryop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
604 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan_index
) {
605 int v0
= get_src_vec(gen
, inst
, 0, chan_index
); /* v0 = srcreg[0] */
606 int v1
= get_dst_vec(gen
, inst
, chan_index
);
607 switch (inst
->Instruction
.Opcode
) {
608 case TGSI_OPCODE_ABS
:
609 /* turn off the most significant bit of each vector float word */
611 int bit31_vec
= gen_get_bit31_vec(gen
);
612 ppc_vandc(gen
->f
, v1
, v0
, bit31_vec
); /* v1 = v0 & ~bit31 */
615 case TGSI_OPCODE_FLR
:
616 ppc_vrfim(gen
->f
, v1
, v0
); /* v1 = floor(v0) */
618 case TGSI_OPCODE_FRC
:
619 ppc_vrfim(gen
->f
, v1
, v0
); /* tmp = floor(v0) */
620 ppc_vsubfp(gen
->f
, v1
, v0
, v1
); /* v1 = v0 - v1 */
622 case TGSI_OPCODE_EX2
:
623 ppc_vexptefp(gen
->f
, v1
, v0
); /* v1 = 2^v0 */
625 case TGSI_OPCODE_LG2
:
626 /* XXX this may be broken! */
627 ppc_vlogefp(gen
->f
, v1
, v0
); /* v1 = log2(v0) */
629 case TGSI_OPCODE_MOV
:
631 ppc_vmove(gen
->f
, v1
, v0
);
636 emit_store(gen
, v1
, inst
, chan_index
, TRUE
); /* store v0 */
639 release_src_vecs(gen
);
644 emit_binop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
649 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_MUL
) {
650 zero_vec
= ppc_allocate_vec_register(gen
->f
);
651 ppc_vzero(gen
->f
, zero_vec
);
654 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
655 /* fetch src operands */
656 int v0
= get_src_vec(gen
, inst
, 0, chan
);
657 int v1
= get_src_vec(gen
, inst
, 1, chan
);
658 int v2
= get_dst_vec(gen
, inst
, chan
);
661 switch (inst
->Instruction
.Opcode
) {
662 case TGSI_OPCODE_ADD
:
663 ppc_vaddfp(gen
->f
, v2
, v0
, v1
);
665 case TGSI_OPCODE_SUB
:
666 ppc_vsubfp(gen
->f
, v2
, v0
, v1
);
668 case TGSI_OPCODE_MUL
:
669 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, zero_vec
);
671 case TGSI_OPCODE_MIN
:
672 ppc_vminfp(gen
->f
, v2
, v0
, v1
);
674 case TGSI_OPCODE_MAX
:
675 ppc_vmaxfp(gen
->f
, v2
, v0
, v1
);
682 emit_store(gen
, v2
, inst
, chan
, TRUE
);
685 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_MUL
)
686 ppc_release_vec_register(gen
->f
, zero_vec
);
688 release_src_vecs(gen
);
693 emit_triop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
697 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
698 /* fetch src operands */
699 int v0
= get_src_vec(gen
, inst
, 0, chan
);
700 int v1
= get_src_vec(gen
, inst
, 1, chan
);
701 int v2
= get_src_vec(gen
, inst
, 2, chan
);
702 int v3
= get_dst_vec(gen
, inst
, chan
);
705 switch (inst
->Instruction
.Opcode
) {
706 case TGSI_OPCODE_MAD
:
707 ppc_vmaddfp(gen
->f
, v3
, v0
, v1
, v2
); /* v3 = v0 * v1 + v2 */
709 case TGSI_OPCODE_LRP
:
710 ppc_vsubfp(gen
->f
, v3
, v1
, v2
); /* v3 = v1 - v2 */
711 ppc_vmaddfp(gen
->f
, v3
, v0
, v3
, v2
); /* v3 = v0 * v3 + v2 */
718 emit_store(gen
, v3
, inst
, chan
, TRUE
);
721 release_src_vecs(gen
);
726 * Vector comparisons, resulting in 1.0 or 0.0 values.
729 emit_inequality(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
732 int one_vec
= gen_one_vec(gen
);
734 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
735 /* fetch src operands */
736 int v0
= get_src_vec(gen
, inst
, 0, chan
);
737 int v1
= get_src_vec(gen
, inst
, 1, chan
);
738 int v2
= get_dst_vec(gen
, inst
, chan
);
739 boolean complement
= FALSE
;
741 switch (inst
->Instruction
.Opcode
) {
742 case TGSI_OPCODE_SNE
:
745 case TGSI_OPCODE_SEQ
:
746 ppc_vcmpeqfpx(gen
->f
, v2
, v0
, v1
); /* v2 = v0 == v1 ? ~0 : 0 */
749 case TGSI_OPCODE_SGE
:
752 case TGSI_OPCODE_SLT
:
753 ppc_vcmpgtfpx(gen
->f
, v2
, v1
, v0
); /* v2 = v1 > v0 ? ~0 : 0 */
756 case TGSI_OPCODE_SLE
:
759 case TGSI_OPCODE_SGT
:
760 ppc_vcmpgtfpx(gen
->f
, v2
, v0
, v1
); /* v2 = v0 > v1 ? ~0 : 0 */
766 /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
769 ppc_vandc(gen
->f
, v2
, one_vec
, v2
); /* v2 = one_vec & ~v2 */
771 ppc_vand(gen
->f
, v2
, one_vec
, v2
); /* v2 = one_vec & v2 */
774 emit_store(gen
, v2
, inst
, chan
, TRUE
);
777 release_src_vecs(gen
);
782 emit_dotprod(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
787 v2
= ppc_allocate_vec_register(gen
->f
);
789 ppc_vzero(gen
->f
, v2
); /* v2 = {0, 0, 0, 0} */
791 v0
= get_src_vec(gen
, inst
, 0, CHAN_X
); /* v0 = src0.XXXX */
792 v1
= get_src_vec(gen
, inst
, 1, CHAN_X
); /* v1 = src1.XXXX */
793 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
795 v0
= get_src_vec(gen
, inst
, 0, CHAN_Y
); /* v0 = src0.YYYY */
796 v1
= get_src_vec(gen
, inst
, 1, CHAN_Y
); /* v1 = src1.YYYY */
797 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
799 v0
= get_src_vec(gen
, inst
, 0, CHAN_Z
); /* v0 = src0.ZZZZ */
800 v1
= get_src_vec(gen
, inst
, 1, CHAN_Z
); /* v1 = src1.ZZZZ */
801 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
803 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_DP4
) {
804 v0
= get_src_vec(gen
, inst
, 0, CHAN_W
); /* v0 = src0.WWWW */
805 v1
= get_src_vec(gen
, inst
, 1, CHAN_W
); /* v1 = src1.WWWW */
806 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
808 else if (inst
->Instruction
.Opcode
== TGSI_OPCODE_DPH
) {
809 v1
= get_src_vec(gen
, inst
, 1, CHAN_W
); /* v1 = src1.WWWW */
810 ppc_vaddfp(gen
->f
, v2
, v2
, v1
); /* v2 = v2 + v1 */
813 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan_index
) {
814 emit_store(gen
, v2
, inst
, chan_index
, FALSE
); /* store v2, free v2 later */
817 release_src_vecs(gen
);
819 ppc_release_vec_register(gen
->f
, v2
);
823 /** Approximation for vr = pow(va, vb) */
825 ppc_vec_pow(struct ppc_function
*f
, int vr
, int va
, int vb
)
827 /* pow(a,b) ~= exp2(log2(a) * b) */
828 int t_vec
= ppc_allocate_vec_register(f
);
829 int zero_vec
= ppc_allocate_vec_register(f
);
831 ppc_vzero(f
, zero_vec
);
833 ppc_vlogefp(f
, t_vec
, va
); /* t = log2(va) */
834 ppc_vmaddfp(f
, t_vec
, t_vec
, vb
, zero_vec
); /* t = t * vb + zero */
835 ppc_vexptefp(f
, vr
, t_vec
); /* vr = 2^t */
837 ppc_release_vec_register(f
, t_vec
);
838 ppc_release_vec_register(f
, zero_vec
);
843 emit_lit(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
845 int one_vec
= gen_one_vec(gen
);
848 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
)) {
849 emit_store(gen
, one_vec
, inst
, CHAN_X
, FALSE
);
853 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
) ||
854 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
856 int zero_vec
= ppc_allocate_vec_register(gen
->f
);
858 x_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
); /* x_vec = src[0].x */
860 ppc_vzero(gen
->f
, zero_vec
); /* zero = {0,0,0,0} */
861 ppc_vmaxfp(gen
->f
, x_vec
, x_vec
, zero_vec
); /* x_vec = max(x_vec, 0) */
863 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
864 emit_store(gen
, x_vec
, inst
, CHAN_Y
, FALSE
);
867 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
869 int z_vec
= ppc_allocate_vec_register(gen
->f
);
870 int pow_vec
= ppc_allocate_vec_register(gen
->f
);
871 int pos_vec
= ppc_allocate_vec_register(gen
->f
);
872 int p128_vec
= ppc_allocate_vec_register(gen
->f
);
873 int n128_vec
= ppc_allocate_vec_register(gen
->f
);
875 y_vec
= get_src_vec(gen
, inst
, 0, CHAN_Y
); /* y_vec = src[0].y */
876 ppc_vmaxfp(gen
->f
, y_vec
, y_vec
, zero_vec
); /* y_vec = max(y_vec, 0) */
878 w_vec
= get_src_vec(gen
, inst
, 0, CHAN_W
); /* w_vec = src[0].w */
880 /* clamp W to [-128, 128] */
881 load_constant_vec(gen
, p128_vec
, 128.0f
);
882 load_constant_vec(gen
, n128_vec
, -128.0f
);
883 ppc_vmaxfp(gen
->f
, w_vec
, w_vec
, n128_vec
); /* w = max(w, -128) */
884 ppc_vminfp(gen
->f
, w_vec
, w_vec
, p128_vec
); /* w = min(w, 128) */
887 * z = pow(tmp.y, tmp.w)
891 ppc_vec_pow(gen
->f
, pow_vec
, y_vec
, w_vec
); /* pow = pow(y, w) */
892 ppc_vcmpgtfpx(gen
->f
, pos_vec
, x_vec
, zero_vec
); /* pos = x > 0 */
893 ppc_vand(gen
->f
, z_vec
, pow_vec
, pos_vec
); /* z = pow & pos */
895 emit_store(gen
, z_vec
, inst
, CHAN_Z
, FALSE
);
897 ppc_release_vec_register(gen
->f
, z_vec
);
898 ppc_release_vec_register(gen
->f
, pow_vec
);
899 ppc_release_vec_register(gen
->f
, pos_vec
);
900 ppc_release_vec_register(gen
->f
, p128_vec
);
901 ppc_release_vec_register(gen
->f
, n128_vec
);
904 ppc_release_vec_register(gen
->f
, zero_vec
);
908 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_W
)) {
909 emit_store(gen
, one_vec
, inst
, CHAN_W
, FALSE
);
912 release_src_vecs(gen
);
917 emit_exp(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
919 const int one_vec
= gen_one_vec(gen
);
923 src_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
);
925 /* Compute X = 2^floor(src) */
926 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
)) {
927 int dst_vec
= get_dst_vec(gen
, inst
, CHAN_X
);
928 int tmp_vec
= ppc_allocate_vec_register(gen
->f
);
929 ppc_vrfim(gen
->f
, tmp_vec
, src_vec
); /* tmp = floor(src); */
930 ppc_vexptefp(gen
->f
, dst_vec
, tmp_vec
); /* dst = 2 ^ tmp */
931 emit_store(gen
, dst_vec
, inst
, CHAN_X
, TRUE
);
932 ppc_release_vec_register(gen
->f
, tmp_vec
);
935 /* Compute Y = src - floor(src) */
936 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
937 int dst_vec
= get_dst_vec(gen
, inst
, CHAN_Y
);
938 int tmp_vec
= ppc_allocate_vec_register(gen
->f
);
939 ppc_vrfim(gen
->f
, tmp_vec
, src_vec
); /* tmp = floor(src); */
940 ppc_vsubfp(gen
->f
, dst_vec
, src_vec
, tmp_vec
); /* dst = src - tmp */
941 emit_store(gen
, dst_vec
, inst
, CHAN_Y
, TRUE
);
942 ppc_release_vec_register(gen
->f
, tmp_vec
);
945 /* Compute Z = RoughApprox2ToX(src) */
946 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
947 int dst_vec
= get_dst_vec(gen
, inst
, CHAN_Z
);
948 ppc_vexptefp(gen
->f
, dst_vec
, src_vec
); /* dst = 2 ^ src */
949 emit_store(gen
, dst_vec
, inst
, CHAN_Z
, TRUE
);
952 /* Compute W = 1.0 */
953 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_W
)) {
954 emit_store(gen
, one_vec
, inst
, CHAN_W
, FALSE
);
957 release_src_vecs(gen
);
962 emit_log(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
964 const int bit31_vec
= gen_get_bit31_vec(gen
);
965 const int one_vec
= gen_one_vec(gen
);
966 int src_vec
, abs_vec
;
969 src_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
);
971 /* compute abs(src) */
972 abs_vec
= ppc_allocate_vec_register(gen
->f
);
973 ppc_vandc(gen
->f
, abs_vec
, src_vec
, bit31_vec
); /* abs = src & ~bit31 */
975 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
) &&
976 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
978 /* compute tmp = floor(log2(abs)) */
979 int tmp_vec
= ppc_allocate_vec_register(gen
->f
);
980 ppc_vlogefp(gen
->f
, tmp_vec
, abs_vec
); /* tmp = log2(abs) */
981 ppc_vrfim(gen
->f
, tmp_vec
, tmp_vec
); /* tmp = floor(tmp); */
983 /* Compute X = tmp */
984 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
)) {
985 emit_store(gen
, tmp_vec
, inst
, CHAN_X
, FALSE
);
988 /* Compute Y = abs / 2^tmp */
989 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
990 const int zero_vec
= ppc_allocate_vec_register(gen
->f
);
991 ppc_vzero(gen
->f
, zero_vec
);
992 ppc_vexptefp(gen
->f
, tmp_vec
, tmp_vec
); /* tmp = 2 ^ tmp */
993 ppc_vrefp(gen
->f
, tmp_vec
, tmp_vec
); /* tmp = 1 / tmp */
994 /* tmp = abs * tmp + zero */
995 ppc_vmaddfp(gen
->f
, tmp_vec
, abs_vec
, tmp_vec
, zero_vec
);
996 emit_store(gen
, tmp_vec
, inst
, CHAN_Y
, FALSE
);
997 ppc_release_vec_register(gen
->f
, zero_vec
);
1000 ppc_release_vec_register(gen
->f
, tmp_vec
);
1003 /* Compute Z = RoughApproxLog2(abs) */
1004 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
1005 int dst_vec
= get_dst_vec(gen
, inst
, CHAN_Z
);
1006 ppc_vlogefp(gen
->f
, dst_vec
, abs_vec
); /* dst = log2(abs) */
1007 emit_store(gen
, dst_vec
, inst
, CHAN_Z
, TRUE
);
1010 /* Compute W = 1.0 */
1011 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_W
)) {
1012 emit_store(gen
, one_vec
, inst
, CHAN_W
, FALSE
);
1015 ppc_release_vec_register(gen
->f
, abs_vec
);
1016 release_src_vecs(gen
);
1021 emit_pow(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
1023 int s0_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
);
1024 int s1_vec
= get_src_vec(gen
, inst
, 1, CHAN_X
);
1025 int pow_vec
= ppc_allocate_vec_register(gen
->f
);
1028 ppc_vec_pow(gen
->f
, pow_vec
, s0_vec
, s1_vec
);
1030 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
1031 emit_store(gen
, pow_vec
, inst
, chan
, FALSE
);
1034 ppc_release_vec_register(gen
->f
, pow_vec
);
1036 release_src_vecs(gen
);
1041 emit_xpd(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
1043 int x0_vec
, y0_vec
, z0_vec
;
1044 int x1_vec
, y1_vec
, z1_vec
;
1045 int zero_vec
, tmp_vec
;
1048 zero_vec
= ppc_allocate_vec_register(gen
->f
);
1049 ppc_vzero(gen
->f
, zero_vec
);
1051 tmp_vec
= ppc_allocate_vec_register(gen
->f
);
1052 tmp2_vec
= ppc_allocate_vec_register(gen
->f
);
1054 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
) ||
1055 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
1056 x0_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
);
1057 x1_vec
= get_src_vec(gen
, inst
, 1, CHAN_X
);
1059 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
) ||
1060 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
1061 y0_vec
= get_src_vec(gen
, inst
, 0, CHAN_Y
);
1062 y1_vec
= get_src_vec(gen
, inst
, 1, CHAN_Y
);
1064 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
) ||
1065 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
1066 z0_vec
= get_src_vec(gen
, inst
, 0, CHAN_Z
);
1067 z1_vec
= get_src_vec(gen
, inst
, 1, CHAN_Z
);
1070 IF_IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
) {
1072 ppc_vmaddfp(gen
->f
, tmp_vec
, y0_vec
, z1_vec
, zero_vec
);
1073 /* tmp = tmp - z0 * y1*/
1074 ppc_vnmsubfp(gen
->f
, tmp_vec
, tmp_vec
, z0_vec
, y1_vec
);
1075 emit_store(gen
, tmp_vec
, inst
, CHAN_X
, FALSE
);
1077 IF_IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
) {
1079 ppc_vmaddfp(gen
->f
, tmp_vec
, z0_vec
, x1_vec
, zero_vec
);
1080 /* tmp = tmp - x0 * z1 */
1081 ppc_vnmsubfp(gen
->f
, tmp_vec
, tmp_vec
, x0_vec
, z1_vec
);
1082 emit_store(gen
, tmp_vec
, inst
, CHAN_Y
, FALSE
);
1084 IF_IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
) {
1086 ppc_vmaddfp(gen
->f
, tmp_vec
, x0_vec
, y1_vec
, zero_vec
);
1087 /* tmp = tmp - y0 * x1 */
1088 ppc_vnmsubfp(gen
->f
, tmp_vec
, tmp_vec
, y0_vec
, x1_vec
);
1089 emit_store(gen
, tmp_vec
, inst
, CHAN_Z
, FALSE
);
1091 /* W is undefined */
1093 ppc_release_vec_register(gen
->f
, tmp_vec
);
1094 ppc_release_vec_register(gen
->f
, zero_vec
);
1095 release_src_vecs(gen
);
1099 emit_instruction(struct gen_context
*gen
,
1100 struct tgsi_full_instruction
*inst
)
1103 /* we don't handle saturation/clamping yet */
1104 if (inst
->Instruction
.Saturate
!= TGSI_SAT_NONE
)
1107 /* need to use extra temps to fix SOA dependencies : */
1108 if (tgsi_check_soa_dependencies(inst
))
1111 switch (inst
->Instruction
.Opcode
) {
1112 case TGSI_OPCODE_MOV
:
1113 case TGSI_OPCODE_ABS
:
1114 case TGSI_OPCODE_FLR
:
1115 case TGSI_OPCODE_FRC
:
1116 case TGSI_OPCODE_EX2
:
1117 case TGSI_OPCODE_LG2
:
1118 emit_unaryop(gen
, inst
);
1120 case TGSI_OPCODE_RSQ
:
1121 case TGSI_OPCODE_RCP
:
1122 emit_scalar_unaryop(gen
, inst
);
1124 case TGSI_OPCODE_ADD
:
1125 case TGSI_OPCODE_SUB
:
1126 case TGSI_OPCODE_MUL
:
1127 case TGSI_OPCODE_MIN
:
1128 case TGSI_OPCODE_MAX
:
1129 emit_binop(gen
, inst
);
1131 case TGSI_OPCODE_SEQ
:
1132 case TGSI_OPCODE_SNE
:
1133 case TGSI_OPCODE_SLT
:
1134 case TGSI_OPCODE_SGT
:
1135 case TGSI_OPCODE_SLE
:
1136 case TGSI_OPCODE_SGE
:
1137 emit_inequality(gen
, inst
);
1139 case TGSI_OPCODE_MAD
:
1140 case TGSI_OPCODE_LRP
:
1141 emit_triop(gen
, inst
);
1143 case TGSI_OPCODE_DP3
:
1144 case TGSI_OPCODE_DP4
:
1145 case TGSI_OPCODE_DPH
:
1146 emit_dotprod(gen
, inst
);
1148 case TGSI_OPCODE_LIT
:
1149 emit_lit(gen
, inst
);
1151 case TGSI_OPCODE_LOG
:
1152 emit_log(gen
, inst
);
1154 case TGSI_OPCODE_EXP
:
1155 emit_exp(gen
, inst
);
1157 case TGSI_OPCODE_POW
:
1158 emit_pow(gen
, inst
);
1160 case TGSI_OPCODE_XPD
:
1161 emit_xpd(gen
, inst
);
1163 case TGSI_OPCODE_END
:
1175 struct ppc_function
*func
,
1176 struct tgsi_full_declaration
*decl
)
1178 if( decl
->Declaration
.File
== TGSI_FILE_INPUT
||
1179 decl
->Declaration
.File
== TGSI_FILE_SYSTEM_VALUE
) {
1181 unsigned first
, last
, mask
;
1184 first
= decl
->Range
.First
;
1185 last
= decl
->Range
.Last
;
1186 mask
= decl
->Declaration
.UsageMask
;
1188 for( i
= first
; i
<= last
; i
++ ) {
1189 for( j
= 0; j
< NUM_CHANNELS
; j
++ ) {
1190 if( mask
& (1 << j
) ) {
1191 switch( decl
->Declaration
.Interpolate
) {
1192 case TGSI_INTERPOLATE_CONSTANT
:
1193 emit_coef_a0( func
, 0, i
, j
);
1194 emit_inputs( func
, 0, i
, j
);
1197 case TGSI_INTERPOLATE_LINEAR
:
1198 emit_tempf( func
, 0, 0, TGSI_SWIZZLE_X
);
1199 emit_coef_dadx( func
, 1, i
, j
);
1200 emit_tempf( func
, 2, 0, TGSI_SWIZZLE_Y
);
1201 emit_coef_dady( func
, 3, i
, j
);
1202 emit_mul( func
, 0, 1 ); /* x * dadx */
1203 emit_coef_a0( func
, 4, i
, j
);
1204 emit_mul( func
, 2, 3 ); /* y * dady */
1205 emit_add( func
, 0, 4 ); /* x * dadx + a0 */
1206 emit_add( func
, 0, 2 ); /* x * dadx + y * dady + a0 */
1207 emit_inputs( func
, 0, i
, j
);
1210 case TGSI_INTERPOLATE_PERSPECTIVE
:
1211 emit_tempf( func
, 0, 0, TGSI_SWIZZLE_X
);
1212 emit_coef_dadx( func
, 1, i
, j
);
1213 emit_tempf( func
, 2, 0, TGSI_SWIZZLE_Y
);
1214 emit_coef_dady( func
, 3, i
, j
);
1215 emit_mul( func
, 0, 1 ); /* x * dadx */
1216 emit_tempf( func
, 4, 0, TGSI_SWIZZLE_W
);
1217 emit_coef_a0( func
, 5, i
, j
);
1218 emit_rcp( func
, 4, 4 ); /* 1.0 / w */
1219 emit_mul( func
, 2, 3 ); /* y * dady */
1220 emit_add( func
, 0, 5 ); /* x * dadx + a0 */
1221 emit_add( func
, 0, 2 ); /* x * dadx + y * dady + a0 */
1222 emit_mul( func
, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
1223 emit_inputs( func
, 0, i
, j
);
1240 emit_prologue(struct ppc_function
*func
)
1242 /* XXX set up stack frame */
1247 emit_epilogue(struct ppc_function
*func
)
1249 ppc_comment(func
, -4, "Epilogue:");
1251 /* XXX restore prev stack frame */
1253 debug_printf("PPC: Emitted %u instructions\n", func
->num_inst
);
1260 * Translate a TGSI vertex/fragment shader to PPC code.
1262 * \param tokens the TGSI input shader
1263 * \param func the output PPC code/function
1264 * \param immediates buffer to place immediates, later passed to PPC func
1265 * \return TRUE for success, FALSE if translation failed
1268 tgsi_emit_ppc(const struct tgsi_token
*tokens
,
1269 struct ppc_function
*func
,
1270 float (*immediates
)[4],
1271 boolean do_swizzles
)
1273 static int use_ppc_asm
= -1;
1274 struct tgsi_parse_context parse
;
1275 /*boolean instruction_phase = FALSE;*/
1277 uint num_immediates
= 0;
1278 struct gen_context gen
;
1281 if (use_ppc_asm
< 0) {
1282 /* If GALLIUM_NOPPC is set, don't use PPC codegen */
1283 use_ppc_asm
= !debug_get_bool_option("GALLIUM_NOPPC", FALSE
);
1289 debug_printf("\n********* TGSI->PPC ********\n");
1290 tgsi_dump(tokens
, 0);
1295 init_gen_context(&gen
, func
);
1297 emit_prologue(func
);
1299 tgsi_parse_init( &parse
, tokens
);
1301 while (!tgsi_parse_end_of_tokens(&parse
) && ok
) {
1302 tgsi_parse_token(&parse
);
1304 switch (parse
.FullToken
.Token
.Type
) {
1305 case TGSI_TOKEN_TYPE_DECLARATION
:
1306 if (parse
.FullHeader
.Processor
.Processor
== TGSI_PROCESSOR_FRAGMENT
) {
1307 emit_declaration(func
, &parse
.FullToken
.FullDeclaration
);
1311 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1313 _debug_printf("# ");
1315 tgsi_dump_instruction(&parse
.FullToken
.FullInstruction
, ic
);
1318 ok
= emit_instruction(&gen
, &parse
.FullToken
.FullInstruction
);
1321 uint opcode
= parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
1322 debug_printf("failed to translate tgsi opcode %d (%s) to PPC (%s)\n",
1324 tgsi_get_opcode_name(opcode
),
1325 parse
.FullHeader
.Processor
.Processor
== TGSI_PROCESSOR_VERTEX
?
1326 "vertex shader" : "fragment shader");
1330 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1331 /* splat each immediate component into a float[4] vector for SoA */
1333 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
1336 assert(num_immediates
< TGSI_EXEC_NUM_IMMEDIATES
);
1337 for (i
= 0; i
< size
; i
++) {
1338 immediates
[num_immediates
][i
] =
1339 parse
.FullToken
.FullImmediate
.u
[i
].Float
;
1345 case TGSI_TOKEN_TYPE_PROPERTY
:
1354 emit_epilogue(func
);
1356 tgsi_parse_free( &parse
);
1358 if (ppc_num_instructions(func
) == 0) {
1359 /* ran out of memory for instructions */
1364 debug_printf("TGSI->PPC translation failed\n");
1369 #endif /* PIPE_ARCH_PPC */