1 /**************************************************************************
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * TGSI to PowerPC code generation.
32 #include "pipe/p_config.h"
34 #if defined(PIPE_ARCH_PPC)
36 #include "pipe/p_debug.h"
37 #include "pipe/p_shader_tokens.h"
38 #include "util/u_math.h"
39 #include "util/u_memory.h"
40 #include "util/u_sse.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_util.h"
43 #include "tgsi_dump.h"
44 #include "tgsi_exec.h"
46 #include "rtasm/rtasm_ppc.h"
50 * Since it's pretty much impossible to form PPC vector immediates, load
51 * them from memory here:
53 const float ppc_builtin_constants
[] ALIGN16_ATTRIB
= {
54 1.0f
, -128.0f
, 128.0, 0.0
58 #define FOR_EACH_CHANNEL( CHAN )\
59 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
61 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
62 ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
64 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
65 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
67 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
68 FOR_EACH_CHANNEL( CHAN )\
69 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
78 * How many TGSI temps should be implemented with real PPC vector registers
81 #define MAX_PPC_TEMPS 4
86 struct tgsi_full_src_register src
;
93 * Context/state used during code gen.
97 struct ppc_function
*f
;
98 int inputs_reg
; /**< GP register pointing to input params */
99 int outputs_reg
; /**< GP register pointing to output params */
100 int temps_reg
; /**< GP register pointing to temporary "registers" */
101 int immed_reg
; /**< GP register pointing to immediates buffer */
102 int const_reg
; /**< GP register pointing to constants buffer */
103 int builtins_reg
; /**< GP register pointint to built-in constants */
105 int offset_reg
; /**< used to reduce redundant li instructions */
108 int one_vec
; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
109 int bit31_vec
; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
112 * Map TGSI temps to PPC vector temps.
113 * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
114 * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
116 int temps_map
[MAX_PPC_TEMPS
][4];
119 * Cache of src registers.
120 * This is used to avoid redundant load instructions.
123 struct tgsi_full_src_register src
;
126 } regs
[12]; /* 3 src regs, 4 channels */
132 * Initialize code generation context.
135 init_gen_context(struct gen_context
*gen
, struct ppc_function
*func
)
139 memset(gen
, 0, sizeof(*gen
));
141 gen
->inputs_reg
= ppc_reserve_register(func
, 3); /* first function param */
142 gen
->outputs_reg
= ppc_reserve_register(func
, 4); /* second function param */
143 gen
->temps_reg
= ppc_reserve_register(func
, 5); /* ... */
144 gen
->immed_reg
= ppc_reserve_register(func
, 6);
145 gen
->const_reg
= ppc_reserve_register(func
, 7);
146 gen
->builtins_reg
= ppc_reserve_register(func
, 8);
149 gen
->offset_reg
= -1;
150 gen
->offset_value
= -9999999;
151 for (i
= 0; i
< MAX_PPC_TEMPS
; i
++) {
152 gen
->temps_map
[i
][0] = ppc_allocate_vec_register(gen
->f
);
153 gen
->temps_map
[i
][1] = ppc_allocate_vec_register(gen
->f
);
154 gen
->temps_map
[i
][2] = ppc_allocate_vec_register(gen
->f
);
155 gen
->temps_map
[i
][3] = ppc_allocate_vec_register(gen
->f
);
161 * All PPC vector load/store instructions form an effective address
162 * by adding the contents of two registers. For example:
163 * lvx v2,r8,r9 # v2 = memory[r8 + r9]
164 * stvx v2,r8,r9 # memory[r8 + r9] = v2;
165 * So our lvx/stvx instructions are typically preceded by an 'li' instruction
166 * to load r9 (above) with an immediate (an offset).
167 * This code emits that 'li' instruction, but only if the offset value is
168 * different than the previous 'li'.
169 * This optimization seems to save about 10% in the instruction count.
170 * Note that we need to unconditionally emit an 'li' inside basic blocks
171 * (such as inside loops).
174 emit_li_offset(struct gen_context
*gen
, int offset
)
176 if (gen
->offset_reg
<= 0) {
177 /* allocate a GP register for storing load/store offset */
178 gen
->offset_reg
= ppc_allocate_register(gen
->f
);
181 /* emit new 'li' if offset is changing */
182 if (gen
->offset_value
< 0 || gen
->offset_value
!= offset
) {
183 gen
->offset_value
= offset
;
184 ppc_li(gen
->f
, gen
->offset_reg
, offset
);
187 return gen
->offset_reg
;
192 * Forces subsequent emit_li_offset() calls to emit an 'li'.
193 * To be called at the top of basic blocks.
196 reset_li_offset(struct gen_context
*gen
)
198 gen
->offset_value
= -9999999;
204 * Load the given vector register with {value, value, value, value}.
205 * The value must be in the ppu_builtin_constants[] array.
206 * We wouldn't need this if there was a simple way to load PPC vector
207 * registers with immediate values!
210 load_constant_vec(struct gen_context
*gen
, int dst_vec
, float value
)
213 for (pos
= 0; pos
< Elements(ppc_builtin_constants
); pos
++) {
214 if (ppc_builtin_constants
[pos
] == value
) {
215 int offset
= pos
* 4;
216 int offset_reg
= emit_li_offset(gen
, offset
);
218 /* Load 4-byte word into vector register.
219 * The vector slot depends on the effective address we load from.
220 * We know that our builtins start at a 16-byte boundary so we
221 * know that 'swizzle' tells us which vector slot will have the
222 * loaded word. The other vector slots will be undefined.
224 ppc_lvewx(gen
->f
, dst_vec
, gen
->builtins_reg
, offset_reg
);
225 /* splat word[pos % 4] across the vector reg */
226 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, pos
% 4);
230 assert(0 && "Need to add new constant to ppc_builtin_constants array");
235 * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
238 gen_one_vec(struct gen_context
*gen
)
240 if (gen
->one_vec
< 0) {
241 gen
->one_vec
= ppc_allocate_vec_register(gen
->f
);
242 load_constant_vec(gen
, gen
->one_vec
, 1.0f
);
248 * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
251 gen_get_bit31_vec(struct gen_context
*gen
)
253 if (gen
->bit31_vec
< 0) {
254 gen
->bit31_vec
= ppc_allocate_vec_register(gen
->f
);
255 ppc_vspltisw(gen
->f
, gen
->bit31_vec
, -1);
256 ppc_vslw(gen
->f
, gen
->bit31_vec
, gen
->bit31_vec
, gen
->bit31_vec
);
258 return gen
->bit31_vec
;
263 * Register fetch. Return PPC vector register with result.
266 emit_fetch(struct gen_context
*gen
,
267 const struct tgsi_full_src_register
*reg
,
268 const unsigned chan_index
)
270 uint swizzle
= tgsi_util_get_full_src_register_extswizzle(reg
, chan_index
);
274 case TGSI_EXTSWIZZLE_X
:
275 case TGSI_EXTSWIZZLE_Y
:
276 case TGSI_EXTSWIZZLE_Z
:
277 case TGSI_EXTSWIZZLE_W
:
278 switch (reg
->SrcRegister
.File
) {
279 case TGSI_FILE_INPUT
:
281 int offset
= (reg
->SrcRegister
.Index
* 4 + swizzle
) * 16;
282 int offset_reg
= emit_li_offset(gen
, offset
);
283 dst_vec
= ppc_allocate_vec_register(gen
->f
);
284 ppc_lvx(gen
->f
, dst_vec
, gen
->inputs_reg
, offset_reg
);
287 case TGSI_FILE_TEMPORARY
:
288 if (reg
->SrcRegister
.Index
< MAX_PPC_TEMPS
) {
289 /* use PPC vec register */
290 dst_vec
= gen
->temps_map
[reg
->SrcRegister
.Index
][swizzle
];
293 /* use memory-based temp register "file" */
294 int offset
= (reg
->SrcRegister
.Index
* 4 + swizzle
) * 16;
295 int offset_reg
= emit_li_offset(gen
, offset
);
296 dst_vec
= ppc_allocate_vec_register(gen
->f
);
297 ppc_lvx(gen
->f
, dst_vec
, gen
->temps_reg
, offset_reg
);
300 case TGSI_FILE_IMMEDIATE
:
302 int offset
= (reg
->SrcRegister
.Index
* 4 + swizzle
) * 4;
303 int offset_reg
= emit_li_offset(gen
, offset
);
304 dst_vec
= ppc_allocate_vec_register(gen
->f
);
305 /* Load 4-byte word into vector register.
306 * The vector slot depends on the effective address we load from.
307 * We know that our immediates start at a 16-byte boundary so we
308 * know that 'swizzle' tells us which vector slot will have the
309 * loaded word. The other vector slots will be undefined.
311 ppc_lvewx(gen
->f
, dst_vec
, gen
->immed_reg
, offset_reg
);
312 /* splat word[swizzle] across the vector reg */
313 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, swizzle
);
316 case TGSI_FILE_CONSTANT
:
318 int offset
= (reg
->SrcRegister
.Index
* 4 + swizzle
) * 4;
319 int offset_reg
= emit_li_offset(gen
, offset
);
320 dst_vec
= ppc_allocate_vec_register(gen
->f
);
321 /* Load 4-byte word into vector register.
322 * The vector slot depends on the effective address we load from.
323 * We know that our constants start at a 16-byte boundary so we
324 * know that 'swizzle' tells us which vector slot will have the
325 * loaded word. The other vector slots will be undefined.
327 ppc_lvewx(gen
->f
, dst_vec
, gen
->const_reg
, offset_reg
);
328 /* splat word[swizzle] across the vector reg */
329 ppc_vspltw(gen
->f
, dst_vec
, dst_vec
, swizzle
);
336 case TGSI_EXTSWIZZLE_ZERO
:
337 ppc_vzero(gen
->f
, dst_vec
);
339 case TGSI_EXTSWIZZLE_ONE
:
341 int one_vec
= gen_one_vec(gen
);
342 dst_vec
= ppc_allocate_vec_register(gen
->f
);
343 ppc_vmove(gen
->f
, dst_vec
, one_vec
);
350 assert(dst_vec
>= 0);
353 uint sign_op
= tgsi_util_get_full_src_register_sign_mode(reg
, chan_index
);
354 if (sign_op
!= TGSI_UTIL_SIGN_KEEP
) {
355 int bit31_vec
= gen_get_bit31_vec(gen
);
358 case TGSI_UTIL_SIGN_CLEAR
:
359 /* vec = vec & ~bit31 */
360 ppc_vandc(gen
->f
, dst_vec
, dst_vec
, bit31_vec
);
362 case TGSI_UTIL_SIGN_SET
:
363 /* vec = vec | bit31 */
364 ppc_vor(gen
->f
, dst_vec
, dst_vec
, bit31_vec
);
366 case TGSI_UTIL_SIGN_TOGGLE
:
367 /* vec = vec ^ bit31 */
368 ppc_vxor(gen
->f
, dst_vec
, dst_vec
, bit31_vec
);
382 * Test if two TGSI src registers refer to the same memory location.
383 * We use this to avoid redundant register loads.
386 equal_src_locs(const struct tgsi_full_src_register
*a
, uint chan_a
,
387 const struct tgsi_full_src_register
*b
, uint chan_b
)
391 if (a
->SrcRegister
.File
!= b
->SrcRegister
.File
)
393 if (a
->SrcRegister
.Index
!= b
->SrcRegister
.Index
)
395 swz_a
= tgsi_util_get_full_src_register_extswizzle(a
, chan_a
);
396 swz_b
= tgsi_util_get_full_src_register_extswizzle(b
, chan_b
);
399 sign_a
= tgsi_util_get_full_src_register_sign_mode(a
, chan_a
);
400 sign_b
= tgsi_util_get_full_src_register_sign_mode(b
, chan_b
);
401 if (sign_a
!= sign_b
)
408 * Given a TGSI src register and channel index, return the PPC vector
409 * register containing the value. We use a cache to prevent re-loading
410 * the same register multiple times.
411 * \return index of PPC vector register with the desired src operand
414 get_src_vec(struct gen_context
*gen
,
415 struct tgsi_full_instruction
*inst
, int src_reg
, uint chan
)
417 const const struct tgsi_full_src_register
*src
=
418 &inst
->FullSrcRegisters
[src_reg
];
422 /* check the cache */
423 for (i
= 0; i
< gen
->num_regs
; i
++) {
424 if (equal_src_locs(&gen
->regs
[i
].src
, gen
->regs
[i
].chan
, src
, chan
)) {
426 assert(gen
->regs
[i
].vec
>= 0);
427 return gen
->regs
[i
].vec
;
431 /* cache miss: allocate new vec reg and emit fetch/load code */
432 vec
= emit_fetch(gen
, src
, chan
);
433 gen
->regs
[gen
->num_regs
].src
= *src
;
434 gen
->regs
[gen
->num_regs
].chan
= chan
;
435 gen
->regs
[gen
->num_regs
].vec
= vec
;
438 assert(gen
->num_regs
<= Elements(gen
->regs
));
447 * Clear the src operand cache. To be called at the end of each emit function.
450 release_src_vecs(struct gen_context
*gen
)
453 for (i
= 0; i
< gen
->num_regs
; i
++) {
454 const const struct tgsi_full_src_register src
= gen
->regs
[i
].src
;
455 if (!(src
.SrcRegister
.File
== TGSI_FILE_TEMPORARY
&&
456 src
.SrcRegister
.Index
< MAX_PPC_TEMPS
)) {
457 ppc_release_vec_register(gen
->f
, gen
->regs
[i
].vec
);
466 get_dst_vec(struct gen_context
*gen
,
467 const struct tgsi_full_instruction
*inst
,
470 const struct tgsi_full_dst_register
*reg
= &inst
->FullDstRegisters
[0];
472 if (reg
->DstRegister
.File
== TGSI_FILE_TEMPORARY
&&
473 reg
->DstRegister
.Index
< MAX_PPC_TEMPS
) {
474 int vec
= gen
->temps_map
[reg
->DstRegister
.Index
][chan_index
];
478 return ppc_allocate_vec_register(gen
->f
);
484 * Register store. Store 'src_vec' at location indicated by 'reg'.
485 * \param free_vec Should the src_vec be released when done?
488 emit_store(struct gen_context
*gen
,
490 const struct tgsi_full_instruction
*inst
,
494 const struct tgsi_full_dst_register
*reg
= &inst
->FullDstRegisters
[0];
496 switch (reg
->DstRegister
.File
) {
497 case TGSI_FILE_OUTPUT
:
499 int offset
= (reg
->DstRegister
.Index
* 4 + chan_index
) * 16;
500 int offset_reg
= emit_li_offset(gen
, offset
);
501 ppc_stvx(gen
->f
, src_vec
, gen
->outputs_reg
, offset_reg
);
504 case TGSI_FILE_TEMPORARY
:
505 if (reg
->DstRegister
.Index
< MAX_PPC_TEMPS
) {
507 int dst_vec
= gen
->temps_map
[reg
->DstRegister
.Index
][chan_index
];
508 if (dst_vec
!= src_vec
)
509 ppc_vmove(gen
->f
, dst_vec
, src_vec
);
514 int offset
= (reg
->DstRegister
.Index
* 4 + chan_index
) * 16;
515 int offset_reg
= emit_li_offset(gen
, offset
);
516 ppc_stvx(gen
->f
, src_vec
, gen
->temps_reg
, offset_reg
);
520 case TGSI_FILE_ADDRESS
:
524 reg
->DstRegister
.Index
,
533 switch( inst
->Instruction
.Saturate
) {
537 case TGSI_SAT_ZERO_ONE
:
541 case TGSI_SAT_MINUS_PLUS_ONE
:
548 ppc_release_vec_register(gen
->f
, src_vec
);
553 emit_scalar_unaryop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
558 v0
= get_src_vec(gen
, inst
, 0, CHAN_X
);
559 v1
= ppc_allocate_vec_register(gen
->f
);
561 switch (inst
->Instruction
.Opcode
) {
562 case TGSI_OPCODE_RSQ
:
563 /* v1 = 1.0 / sqrt(v0) */
564 ppc_vrsqrtefp(gen
->f
, v1
, v0
);
566 case TGSI_OPCODE_RCP
:
568 ppc_vrefp(gen
->f
, v1
, v0
);
574 FOR_EACH_DST0_ENABLED_CHANNEL( *inst
, chan_index
) {
575 emit_store(gen
, v1
, inst
, chan_index
, FALSE
);
578 release_src_vecs(gen
);
579 ppc_release_vec_register(gen
->f
, v1
);
584 emit_unaryop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
587 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan_index
) {
588 int v0
= get_src_vec(gen
, inst
, 0, chan_index
); /* v0 = srcreg[0] */
589 int v1
= get_dst_vec(gen
, inst
, chan_index
);
590 switch (inst
->Instruction
.Opcode
) {
591 case TGSI_OPCODE_ABS
:
592 /* turn off the most significant bit of each vector float word */
594 int bit31_vec
= gen_get_bit31_vec(gen
);
595 ppc_vandc(gen
->f
, v1
, v0
, bit31_vec
); /* v1 = v0 & ~bit31 */
598 case TGSI_OPCODE_FLOOR
:
599 ppc_vrfim(gen
->f
, v1
, v0
); /* v1 = floor(v0) */
601 case TGSI_OPCODE_FRAC
:
602 ppc_vrfim(gen
->f
, v1
, v0
); /* tmp = floor(v0) */
603 ppc_vsubfp(gen
->f
, v1
, v0
, v1
); /* v1 = v0 - v1 */
605 case TGSI_OPCODE_EXPBASE2
:
606 ppc_vexptefp(gen
->f
, v1
, v0
); /* v1 = 2^v0 */
608 case TGSI_OPCODE_LOGBASE2
:
609 /* XXX this may be broken! */
610 ppc_vlogefp(gen
->f
, v1
, v0
); /* v1 = log2(v0) */
612 case TGSI_OPCODE_MOV
:
614 ppc_vmove(gen
->f
, v1
, v0
);
619 emit_store(gen
, v1
, inst
, chan_index
, TRUE
); /* store v0 */
622 release_src_vecs(gen
);
627 emit_binop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
632 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_MUL
) {
633 zero_vec
= ppc_allocate_vec_register(gen
->f
);
634 ppc_vzero(gen
->f
, zero_vec
);
637 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
638 /* fetch src operands */
639 int v0
= get_src_vec(gen
, inst
, 0, chan
);
640 int v1
= get_src_vec(gen
, inst
, 1, chan
);
641 int v2
= get_dst_vec(gen
, inst
, chan
);
644 switch (inst
->Instruction
.Opcode
) {
645 case TGSI_OPCODE_ADD
:
646 ppc_vaddfp(gen
->f
, v2
, v0
, v1
);
648 case TGSI_OPCODE_SUB
:
649 ppc_vsubfp(gen
->f
, v2
, v0
, v1
);
651 case TGSI_OPCODE_MUL
:
652 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, zero_vec
);
654 case TGSI_OPCODE_MIN
:
655 ppc_vminfp(gen
->f
, v2
, v0
, v1
);
657 case TGSI_OPCODE_MAX
:
658 ppc_vmaxfp(gen
->f
, v2
, v0
, v1
);
665 emit_store(gen
, v2
, inst
, chan
, TRUE
);
668 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_MUL
)
669 ppc_release_vec_register(gen
->f
, zero_vec
);
671 release_src_vecs(gen
);
676 emit_triop(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
680 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
681 /* fetch src operands */
682 int v0
= get_src_vec(gen
, inst
, 0, chan
);
683 int v1
= get_src_vec(gen
, inst
, 1, chan
);
684 int v2
= get_src_vec(gen
, inst
, 2, chan
);
685 int v3
= get_dst_vec(gen
, inst
, chan
);
688 switch (inst
->Instruction
.Opcode
) {
689 case TGSI_OPCODE_MAD
:
690 ppc_vmaddfp(gen
->f
, v3
, v0
, v1
, v2
); /* v3 = v0 * v1 + v2 */
692 case TGSI_OPCODE_LRP
:
693 ppc_vsubfp(gen
->f
, v3
, v1
, v2
); /* v3 = v1 - v2 */
694 ppc_vmaddfp(gen
->f
, v3
, v0
, v3
, v2
); /* v3 = v0 * v3 + v2 */
701 emit_store(gen
, v3
, inst
, chan
, TRUE
);
704 release_src_vecs(gen
);
709 * Vector comparisons, resulting in 1.0 or 0.0 values.
712 emit_inequality(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
715 int one_vec
= gen_one_vec(gen
);
717 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan
) {
718 /* fetch src operands */
719 int v0
= get_src_vec(gen
, inst
, 0, chan
);
720 int v1
= get_src_vec(gen
, inst
, 1, chan
);
721 int v2
= get_dst_vec(gen
, inst
, chan
);
722 boolean complement
= FALSE
;
724 switch (inst
->Instruction
.Opcode
) {
725 case TGSI_OPCODE_SNE
:
728 case TGSI_OPCODE_SEQ
:
729 ppc_vcmpeqfpx(gen
->f
, v2
, v0
, v1
); /* v2 = v0 == v1 ? ~0 : 0 */
732 case TGSI_OPCODE_SGE
:
735 case TGSI_OPCODE_SLT
:
736 ppc_vcmpgtfpx(gen
->f
, v2
, v1
, v0
); /* v2 = v1 > v0 ? ~0 : 0 */
739 case TGSI_OPCODE_SLE
:
742 case TGSI_OPCODE_SGT
:
743 ppc_vcmpgtfpx(gen
->f
, v2
, v0
, v1
); /* v2 = v0 > v1 ? ~0 : 0 */
749 /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
752 ppc_vandc(gen
->f
, v2
, one_vec
, v2
); /* v2 = one_vec & ~v2 */
754 ppc_vand(gen
->f
, v2
, one_vec
, v2
); /* v2 = one_vec & v2 */
757 emit_store(gen
, v2
, inst
, chan
, TRUE
);
760 release_src_vecs(gen
);
765 emit_dotprod(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
770 v2
= ppc_allocate_vec_register(gen
->f
);
772 ppc_vxor(gen
->f
, v2
, v2
, v2
); /* v2 = {0, 0, 0, 0} */
774 v0
= get_src_vec(gen
, inst
, 0, CHAN_X
); /* v0 = src0.XXXX */
775 v1
= get_src_vec(gen
, inst
, 1, CHAN_X
); /* v1 = src1.XXXX */
776 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
778 v0
= get_src_vec(gen
, inst
, 0, CHAN_Y
); /* v0 = src0.YYYY */
779 v1
= get_src_vec(gen
, inst
, 1, CHAN_Y
); /* v1 = src1.YYYY */
780 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
782 v0
= get_src_vec(gen
, inst
, 0, CHAN_Z
); /* v0 = src0.ZZZZ */
783 v1
= get_src_vec(gen
, inst
, 1, CHAN_Z
); /* v1 = src1.ZZZZ */
784 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
786 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_DP4
) {
787 v0
= get_src_vec(gen
, inst
, 0, CHAN_W
); /* v0 = src0.WWWW */
788 v1
= get_src_vec(gen
, inst
, 1, CHAN_W
); /* v1 = src1.WWWW */
789 ppc_vmaddfp(gen
->f
, v2
, v0
, v1
, v2
); /* v2 = v0 * v1 + v2 */
791 else if (inst
->Instruction
.Opcode
== TGSI_OPCODE_DPH
) {
792 v1
= get_src_vec(gen
, inst
, 1, CHAN_W
); /* v1 = src1.WWWW */
793 ppc_vaddfp(gen
->f
, v2
, v2
, v1
); /* v2 = v2 + v1 */
796 FOR_EACH_DST0_ENABLED_CHANNEL(*inst
, chan_index
) {
797 emit_store(gen
, v2
, inst
, chan_index
, FALSE
); /* store v2, free v2 later */
800 release_src_vecs(gen
);
802 ppc_release_vec_register(gen
->f
, v2
);
806 /** Approximation for vr = pow(va, vb) */
808 ppc_vec_pow(struct ppc_function
*f
, int vr
, int va
, int vb
)
810 /* pow(a,b) ~= exp2(log2(a) * b) */
811 int t_vec
= ppc_allocate_vec_register(f
);
812 int zero_vec
= ppc_allocate_vec_register(f
);
814 ppc_vzero(f
, zero_vec
);
816 ppc_vlogefp(f
, t_vec
, va
); /* t = log2(va) */
817 ppc_vmaddfp(f
, t_vec
, t_vec
, vb
, zero_vec
); /* t = t * vb */
818 ppc_vexptefp(f
, vr
, t_vec
); /* vr = 2^t */
820 ppc_release_vec_register(f
, t_vec
);
821 ppc_release_vec_register(f
, zero_vec
);
826 emit_lit(struct gen_context
*gen
, struct tgsi_full_instruction
*inst
)
828 int one_vec
= gen_one_vec(gen
);
831 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_X
)) {
832 emit_store(gen
, one_vec
, inst
, CHAN_X
, FALSE
);
836 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
) ||
837 IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
839 int zero_vec
= ppc_allocate_vec_register(gen
->f
);
841 x_vec
= get_src_vec(gen
, inst
, 0, CHAN_X
); /* x_vec = src[0].x */
843 ppc_vzero(gen
->f
, zero_vec
); /* zero = {0,0,0,0} */
844 ppc_vmaxfp(gen
->f
, x_vec
, x_vec
, zero_vec
); /* x_vec = max(x_vec, 0) */
846 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Y
)) {
847 emit_store(gen
, x_vec
, inst
, CHAN_Y
, FALSE
);
850 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_Z
)) {
852 int z_vec
= ppc_allocate_vec_register(gen
->f
);
853 int pow_vec
= ppc_allocate_vec_register(gen
->f
);
854 int pos_vec
= ppc_allocate_vec_register(gen
->f
);
855 int p128_vec
= ppc_allocate_vec_register(gen
->f
);
856 int n128_vec
= ppc_allocate_vec_register(gen
->f
);
858 y_vec
= get_src_vec(gen
, inst
, 0, CHAN_Y
); /* y_vec = src[0].y */
859 ppc_vmaxfp(gen
->f
, y_vec
, y_vec
, zero_vec
); /* y_vec = max(y_vec, 0) */
861 w_vec
= get_src_vec(gen
, inst
, 0, CHAN_W
); /* w_vec = src[0].w */
863 /* clamp W to [-128, 128] */
864 load_constant_vec(gen
, p128_vec
, 128.0f
);
865 load_constant_vec(gen
, n128_vec
, -128.0f
);
866 ppc_vmaxfp(gen
->f
, w_vec
, w_vec
, n128_vec
); /* w = max(w, -128) */
867 ppc_vminfp(gen
->f
, w_vec
, w_vec
, p128_vec
); /* w = min(w, 128) */
870 * z = pow(tmp.y, tmp.w)
874 ppc_vec_pow(gen
->f
, pow_vec
, y_vec
, w_vec
); /* pow = pow(y, w) */
875 ppc_vcmpgtfpx(gen
->f
, pos_vec
, x_vec
, zero_vec
); /* pos = x > 0 */
876 ppc_vand(gen
->f
, z_vec
, pow_vec
, pos_vec
); /* z = pow & pos */
878 emit_store(gen
, z_vec
, inst
, CHAN_Z
, FALSE
);
880 ppc_release_vec_register(gen
->f
, z_vec
);
881 ppc_release_vec_register(gen
->f
, pow_vec
);
882 ppc_release_vec_register(gen
->f
, pos_vec
);
883 ppc_release_vec_register(gen
->f
, p128_vec
);
884 ppc_release_vec_register(gen
->f
, n128_vec
);
887 ppc_release_vec_register(gen
->f
, zero_vec
);
891 if (IS_DST0_CHANNEL_ENABLED(*inst
, CHAN_W
)) {
892 emit_store(gen
, one_vec
, inst
, CHAN_W
, FALSE
);
895 release_src_vecs(gen
);
900 emit_instruction(struct gen_context
*gen
,
901 struct tgsi_full_instruction
*inst
)
903 switch (inst
->Instruction
.Opcode
) {
904 case TGSI_OPCODE_MOV
:
905 case TGSI_OPCODE_ABS
:
906 case TGSI_OPCODE_FLOOR
:
907 case TGSI_OPCODE_FRAC
:
908 case TGSI_OPCODE_EXPBASE2
:
909 case TGSI_OPCODE_LOGBASE2
:
910 emit_unaryop(gen
, inst
);
912 case TGSI_OPCODE_RSQ
:
913 case TGSI_OPCODE_RCP
:
914 emit_scalar_unaryop(gen
, inst
);
916 case TGSI_OPCODE_ADD
:
917 case TGSI_OPCODE_SUB
:
918 case TGSI_OPCODE_MUL
:
919 case TGSI_OPCODE_MIN
:
920 case TGSI_OPCODE_MAX
:
921 emit_binop(gen
, inst
);
923 case TGSI_OPCODE_SEQ
:
924 case TGSI_OPCODE_SNE
:
925 case TGSI_OPCODE_SLT
:
926 case TGSI_OPCODE_SGT
:
927 case TGSI_OPCODE_SLE
:
928 case TGSI_OPCODE_SGE
:
929 emit_inequality(gen
, inst
);
931 case TGSI_OPCODE_MAD
:
932 case TGSI_OPCODE_LRP
:
933 emit_triop(gen
, inst
);
935 case TGSI_OPCODE_DP3
:
936 case TGSI_OPCODE_DP4
:
937 case TGSI_OPCODE_DPH
:
938 emit_dotprod(gen
, inst
);
940 case TGSI_OPCODE_LIT
:
943 case TGSI_OPCODE_END
:
955 struct ppc_function
*func
,
956 struct tgsi_full_declaration
*decl
)
958 if( decl
->Declaration
.File
== TGSI_FILE_INPUT
) {
960 unsigned first
, last
, mask
;
963 first
= decl
->DeclarationRange
.First
;
964 last
= decl
->DeclarationRange
.Last
;
965 mask
= decl
->Declaration
.UsageMask
;
967 for( i
= first
; i
<= last
; i
++ ) {
968 for( j
= 0; j
< NUM_CHANNELS
; j
++ ) {
969 if( mask
& (1 << j
) ) {
970 switch( decl
->Declaration
.Interpolate
) {
971 case TGSI_INTERPOLATE_CONSTANT
:
972 emit_coef_a0( func
, 0, i
, j
);
973 emit_inputs( func
, 0, i
, j
);
976 case TGSI_INTERPOLATE_LINEAR
:
977 emit_tempf( func
, 0, 0, TGSI_SWIZZLE_X
);
978 emit_coef_dadx( func
, 1, i
, j
);
979 emit_tempf( func
, 2, 0, TGSI_SWIZZLE_Y
);
980 emit_coef_dady( func
, 3, i
, j
);
981 emit_mul( func
, 0, 1 ); /* x * dadx */
982 emit_coef_a0( func
, 4, i
, j
);
983 emit_mul( func
, 2, 3 ); /* y * dady */
984 emit_add( func
, 0, 4 ); /* x * dadx + a0 */
985 emit_add( func
, 0, 2 ); /* x * dadx + y * dady + a0 */
986 emit_inputs( func
, 0, i
, j
);
989 case TGSI_INTERPOLATE_PERSPECTIVE
:
990 emit_tempf( func
, 0, 0, TGSI_SWIZZLE_X
);
991 emit_coef_dadx( func
, 1, i
, j
);
992 emit_tempf( func
, 2, 0, TGSI_SWIZZLE_Y
);
993 emit_coef_dady( func
, 3, i
, j
);
994 emit_mul( func
, 0, 1 ); /* x * dadx */
995 emit_tempf( func
, 4, 0, TGSI_SWIZZLE_W
);
996 emit_coef_a0( func
, 5, i
, j
);
997 emit_rcp( func
, 4, 4 ); /* 1.0 / w */
998 emit_mul( func
, 2, 3 ); /* y * dady */
999 emit_add( func
, 0, 5 ); /* x * dadx + a0 */
1000 emit_add( func
, 0, 2 ); /* x * dadx + y * dady + a0 */
1001 emit_mul( func
, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
1002 emit_inputs( func
, 0, i
, j
);
1019 emit_prologue(struct ppc_function
*func
)
1021 /* XXX set up stack frame */
1026 emit_epilogue(struct ppc_function
*func
)
1029 /* XXX restore prev stack frame */
1030 debug_printf("PPC: Emitted %u instructions\n", func
->num_inst
);
1036 * Translate a TGSI vertex/fragment shader to PPC code.
1038 * \param tokens the TGSI input shader
1039 * \param func the output PPC code/function
1040 * \param immediates buffer to place immediates, later passed to PPC func
1041 * \return TRUE for success, FALSE if translation failed
1044 tgsi_emit_ppc(const struct tgsi_token
*tokens
,
1045 struct ppc_function
*func
,
1046 float (*immediates
)[4],
1047 boolean do_swizzles
)
1049 static int use_ppc_asm
= -1;
1050 struct tgsi_parse_context parse
;
1051 /*boolean instruction_phase = FALSE;*/
1053 uint num_immediates
= 0;
1054 struct gen_context gen
;
1056 if (use_ppc_asm
< 0) {
1057 /* If GALLIUM_NOPPC is set, don't use PPC codegen */
1058 use_ppc_asm
= !debug_get_bool_option("GALLIUM_NOPPC", FALSE
);
1064 debug_printf("\n********* TGSI->PPC ********\n");
1065 tgsi_dump(tokens
, 0);
1070 init_gen_context(&gen
, func
);
1072 emit_prologue(func
);
1074 tgsi_parse_init( &parse
, tokens
);
1076 while (!tgsi_parse_end_of_tokens(&parse
) && ok
) {
1077 tgsi_parse_token(&parse
);
1079 switch (parse
.FullToken
.Token
.Type
) {
1080 case TGSI_TOKEN_TYPE_DECLARATION
:
1081 if (parse
.FullHeader
.Processor
.Processor
== TGSI_PROCESSOR_FRAGMENT
) {
1082 emit_declaration(func
, &parse
.FullToken
.FullDeclaration
);
1086 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1087 ok
= emit_instruction(&gen
, &parse
.FullToken
.FullInstruction
);
1090 debug_printf("failed to translate tgsi opcode %d to PPC (%s)\n",
1091 parse
.FullToken
.FullInstruction
.Instruction
.Opcode
,
1092 parse
.FullHeader
.Processor
.Processor
== TGSI_PROCESSOR_VERTEX
?
1093 "vertex shader" : "fragment shader");
1097 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1098 /* splat each immediate component into a float[4] vector for SoA */
1100 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.Size
- 1;
1101 float *imm
= (float *) immediates
;
1104 assert(num_immediates
< TGSI_EXEC_NUM_IMMEDIATES
);
1105 for (i
= 0; i
< size
; i
++) {
1106 immediates
[num_immediates
][i
] =
1107 parse
.FullToken
.FullImmediate
.u
.ImmediateFloat32
[i
].Float
;
1119 emit_epilogue(func
);
1121 tgsi_parse_free( &parse
);
1126 #endif /* PIPE_ARCH_PPC */