1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_exec.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_logic.h"
53 #include "lp_bld_swizzle.h"
54 #include "lp_bld_flow.h"
55 #include "lp_bld_tgsi.h"
56 #include "lp_bld_debug.h"
59 #define LP_MAX_TEMPS 256
60 #define LP_MAX_IMMEDIATES 256
63 #define FOR_EACH_CHANNEL( CHAN )\
64 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
66 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
67 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
69 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
70 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
72 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
73 FOR_EACH_CHANNEL( CHAN )\
74 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
81 #define QUAD_TOP_LEFT 0
82 #define QUAD_TOP_RIGHT 1
83 #define QUAD_BOTTOM_LEFT 2
84 #define QUAD_BOTTOM_RIGHT 3
86 #define LP_TGSI_MAX_NESTING 16
89 struct lp_build_context
*bld
;
93 LLVMTypeRef int_vec_type
;
95 LLVMValueRef cond_stack
[LP_TGSI_MAX_NESTING
];
97 LLVMValueRef cond_mask
;
99 LLVMValueRef break_stack
[LP_TGSI_MAX_NESTING
];
100 int break_stack_size
;
101 LLVMValueRef break_mask
;
103 LLVMValueRef cont_stack
[LP_TGSI_MAX_NESTING
];
105 LLVMValueRef cont_mask
;
107 LLVMBasicBlockRef loop_stack
[LP_TGSI_MAX_NESTING
];
109 LLVMBasicBlockRef loop_block
;
112 LLVMValueRef exec_mask
;
115 struct lp_build_tgsi_soa_context
117 struct lp_build_context base
;
119 LLVMValueRef consts_ptr
;
120 const LLVMValueRef
*pos
;
121 const LLVMValueRef (*inputs
)[NUM_CHANNELS
];
122 LLVMValueRef (*outputs
)[NUM_CHANNELS
];
124 struct lp_build_sampler_soa
*sampler
;
126 LLVMValueRef immediates
[LP_MAX_IMMEDIATES
][NUM_CHANNELS
];
127 LLVMValueRef temps
[LP_MAX_TEMPS
][NUM_CHANNELS
];
129 struct lp_build_mask_context
*mask
;
130 struct lp_exec_mask exec_mask
;
133 static const unsigned char
135 QUAD_TOP_LEFT
, QUAD_TOP_LEFT
,
136 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_LEFT
139 static const unsigned char
141 QUAD_TOP_RIGHT
, QUAD_TOP_RIGHT
,
142 QUAD_BOTTOM_RIGHT
, QUAD_BOTTOM_RIGHT
145 static const unsigned char
147 QUAD_TOP_LEFT
, QUAD_TOP_RIGHT
,
148 QUAD_TOP_LEFT
, QUAD_TOP_RIGHT
151 static const unsigned char
152 swizzle_bottom
[4] = {
153 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_RIGHT
,
154 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_RIGHT
157 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
160 mask
->has_mask
= FALSE
;
161 mask
->cond_stack_size
= 0;
162 mask
->loop_stack_size
= 0;
163 mask
->break_stack_size
= 0;
164 mask
->cont_stack_size
= 0;
166 mask
->int_vec_type
= lp_build_int_vec_type(mask
->bld
->type
);
169 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
171 if (mask
->loop_stack_size
) {
172 /*for loops we need to update the entire mask at runtime */
174 assert(mask
->break_mask
);
175 tmp
= LLVMBuildAnd(mask
->bld
->builder
,
179 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
184 mask
->exec_mask
= mask
->cond_mask
;
187 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
188 mask
->loop_stack_size
> 0);
191 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
194 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
195 mask
->cond_mask
= LLVMBuildBitCast(mask
->bld
->builder
, val
,
196 mask
->int_vec_type
, "");
198 lp_exec_mask_update(mask
);
201 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
203 LLVMValueRef prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
204 LLVMValueRef inv_mask
= LLVMBuildNot(mask
->bld
->builder
,
205 mask
->cond_mask
, "");
207 /* means that we didn't have any mask before and that
208 * we were fully enabled */
209 if (mask
->cond_stack_size
<= 1) {
210 prev_mask
= LLVMConstAllOnes(mask
->int_vec_type
);
213 mask
->cond_mask
= LLVMBuildAnd(mask
->bld
->builder
,
216 lp_exec_mask_update(mask
);
219 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
221 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
222 lp_exec_mask_update(mask
);
225 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
228 if (mask
->cont_stack_size
== 0)
229 mask
->cont_mask
= LLVMConstAllOnes(mask
->int_vec_type
);
230 if (mask
->break_stack_size
== 0)
231 mask
->break_mask
= LLVMConstAllOnes(mask
->int_vec_type
);
232 if (mask
->cond_stack_size
== 0)
233 mask
->cond_mask
= LLVMConstAllOnes(mask
->int_vec_type
);
235 mask
->break_stack
[mask
->break_stack_size
++] = mask
->break_mask
;
236 mask
->cont_stack
[mask
->cont_stack_size
++] = mask
->cont_mask
;
237 mask
->loop_stack
[mask
->loop_stack_size
++] = mask
->loop_block
;
238 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->builder
, "bgnloop");
239 LLVMBuildBr(mask
->bld
->builder
, mask
->loop_block
);
240 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, mask
->loop_block
);
242 lp_exec_mask_update(mask
);
245 static void lp_exec_break(struct lp_exec_mask
*mask
)
247 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
251 /* mask->break_stack_size > 1 implies that we encountered a break
252 * statemant already and if that's the case we want to make sure
253 * our mask is a combination of the previous break and the current
255 if (mask
->break_stack_size
> 1) {
256 mask
->break_mask
= LLVMBuildAnd(mask
->bld
->builder
,
258 exec_mask
, "break_full");
260 mask
->break_mask
= exec_mask
;
262 lp_exec_mask_update(mask
);
265 static void lp_exec_continue(struct lp_exec_mask
*mask
)
267 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
271 if (mask
->cont_stack_size
> 1) {
272 mask
->cont_mask
= LLVMBuildAnd(mask
->bld
->builder
,
276 mask
->cont_mask
= exec_mask
;
278 lp_exec_mask_update(mask
);
282 static void lp_exec_endloop(struct lp_exec_mask
*mask
)
284 LLVMBasicBlockRef endloop
;
285 LLVMTypeRef reg_type
= LLVMIntType(mask
->bld
->type
.width
*
286 mask
->bld
->type
.length
);
289 assert(mask
->break_mask
);
291 /* i1cond = (mask == 0) */
292 i1cond
= LLVMBuildICmp(
295 LLVMBuildBitCast(mask
->bld
->builder
, mask
->break_mask
, reg_type
, ""),
296 LLVMConstNull(reg_type
), "");
298 endloop
= lp_build_insert_new_block(mask
->bld
->builder
, "endloop");
300 LLVMBuildCondBr(mask
->bld
->builder
,
301 i1cond
, mask
->loop_block
, endloop
);
303 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, endloop
);
305 mask
->loop_block
= mask
->loop_stack
[--mask
->loop_stack_size
];
306 /* pop the cont mask */
307 if (mask
->cont_stack_size
) {
308 mask
->cont_mask
= mask
->cont_stack
[--mask
->cont_stack_size
];
310 /* pop the break mask */
311 if (mask
->break_stack_size
) {
312 mask
->break_mask
= mask
->break_stack
[--mask
->break_stack_size
];
315 lp_exec_mask_update(mask
);
318 /* stores val into an address pointed to by dst.
319 * mask->exec_mask is used to figure out which bits of val
320 * should be stored into the address
321 * (0 means don't store this bit, 1 means do store).
323 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
327 if (mask
->has_mask
) {
328 LLVMValueRef real_val
, dst_val
;
330 dst_val
= LLVMBuildLoad(mask
->bld
->builder
, dst
, "");
331 real_val
= lp_build_select(mask
->bld
,
335 LLVMBuildStore(mask
->bld
->builder
, real_val
, dst
);
337 LLVMBuildStore(mask
->bld
->builder
, val
, dst
);
342 emit_ddx(struct lp_build_tgsi_soa_context
*bld
,
345 LLVMValueRef src_left
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_left
);
346 LLVMValueRef src_right
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_right
);
347 return lp_build_sub(&bld
->base
, src_right
, src_left
);
352 emit_ddy(struct lp_build_tgsi_soa_context
*bld
,
355 LLVMValueRef src_top
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_top
);
356 LLVMValueRef src_bottom
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_bottom
);
357 return lp_build_sub(&bld
->base
, src_top
, src_bottom
);
366 struct lp_build_tgsi_soa_context
*bld
,
367 const struct tgsi_full_instruction
*inst
,
369 const unsigned chan_index
)
371 const struct tgsi_full_src_register
*reg
= &inst
->Src
[index
];
372 unsigned swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
381 switch (reg
->Register
.File
) {
382 case TGSI_FILE_CONSTANT
: {
383 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), reg
->Register
.Index
*4 + swizzle
, 0);
384 LLVMValueRef scalar_ptr
= LLVMBuildGEP(bld
->base
.builder
, bld
->consts_ptr
, &index
, 1, "");
385 LLVMValueRef scalar
= LLVMBuildLoad(bld
->base
.builder
, scalar_ptr
, "");
386 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
390 case TGSI_FILE_IMMEDIATE
:
391 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
395 case TGSI_FILE_INPUT
:
396 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
400 case TGSI_FILE_TEMPORARY
:
401 res
= LLVMBuildLoad(bld
->base
.builder
, bld
->temps
[reg
->Register
.Index
][swizzle
], "");
403 return bld
->base
.undef
;
408 return bld
->base
.undef
;
414 return bld
->base
.undef
;
417 switch( tgsi_util_get_full_src_register_sign_mode( reg
, chan_index
) ) {
418 case TGSI_UTIL_SIGN_CLEAR
:
419 res
= lp_build_abs( &bld
->base
, res
);
422 case TGSI_UTIL_SIGN_SET
:
423 /* TODO: Use bitwese OR for floating point */
424 res
= lp_build_abs( &bld
->base
, res
);
425 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
428 case TGSI_UTIL_SIGN_TOGGLE
:
429 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
432 case TGSI_UTIL_SIGN_KEEP
:
441 * Register fetch with derivatives.
445 struct lp_build_tgsi_soa_context
*bld
,
446 const struct tgsi_full_instruction
*inst
,
448 const unsigned chan_index
,
455 src
= emit_fetch(bld
, inst
, index
, chan_index
);
460 /* TODO: use interpolation coeffs for inputs */
463 *ddx
= emit_ddx(bld
, src
);
466 *ddy
= emit_ddy(bld
, src
);
475 struct lp_build_tgsi_soa_context
*bld
,
476 const struct tgsi_full_instruction
*inst
,
481 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
483 switch( inst
->Instruction
.Saturate
) {
487 case TGSI_SAT_ZERO_ONE
:
488 value
= lp_build_max(&bld
->base
, value
, bld
->base
.zero
);
489 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
492 case TGSI_SAT_MINUS_PLUS_ONE
:
493 value
= lp_build_max(&bld
->base
, value
, lp_build_const_vec(bld
->base
.type
, -1.0));
494 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
501 switch( reg
->Register
.File
) {
502 case TGSI_FILE_OUTPUT
:
503 lp_exec_mask_store(&bld
->exec_mask
, value
,
504 bld
->outputs
[reg
->Register
.Index
][chan_index
]);
507 case TGSI_FILE_TEMPORARY
:
508 lp_exec_mask_store(&bld
->exec_mask
, value
,
509 bld
->temps
[reg
->Register
.Index
][chan_index
]);
512 case TGSI_FILE_ADDRESS
:
517 case TGSI_FILE_PREDICATE
:
529 * High-level instruction translators.
534 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
535 const struct tgsi_full_instruction
*inst
,
536 boolean apply_lodbias
,
540 const uint unit
= inst
->Src
[1].Register
.Index
;
541 LLVMValueRef lodbias
;
542 LLVMValueRef oow
= NULL
;
543 LLVMValueRef coords
[3];
547 switch (inst
->Texture
.Texture
) {
548 case TGSI_TEXTURE_1D
:
551 case TGSI_TEXTURE_2D
:
552 case TGSI_TEXTURE_RECT
:
555 case TGSI_TEXTURE_SHADOW1D
:
556 case TGSI_TEXTURE_SHADOW2D
:
557 case TGSI_TEXTURE_SHADOWRECT
:
558 case TGSI_TEXTURE_3D
:
559 case TGSI_TEXTURE_CUBE
:
568 lodbias
= emit_fetch( bld
, inst
, 0, 3 );
570 lodbias
= bld
->base
.zero
;
573 oow
= emit_fetch( bld
, inst
, 0, 3 );
574 oow
= lp_build_rcp(&bld
->base
, oow
);
577 for (i
= 0; i
< num_coords
; i
++) {
578 coords
[i
] = emit_fetch( bld
, inst
, 0, i
);
580 coords
[i
] = lp_build_mul(&bld
->base
, coords
[i
], oow
);
582 for (i
= num_coords
; i
< 3; i
++) {
583 coords
[i
] = bld
->base
.undef
;
586 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
589 unit
, num_coords
, coords
, lodbias
,
595 * Kill fragment if any of the src register values are negative.
599 struct lp_build_tgsi_soa_context
*bld
,
600 const struct tgsi_full_instruction
*inst
)
602 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
603 LLVMValueRef terms
[NUM_CHANNELS
];
607 memset(&terms
, 0, sizeof terms
);
609 FOR_EACH_CHANNEL( chan_index
) {
612 /* Unswizzle channel */
613 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
615 /* Check if the component has not been already tested. */
616 assert(swizzle
< NUM_CHANNELS
);
617 if( !terms
[swizzle
] )
618 /* TODO: change the comparison operator instead of setting the sign */
619 terms
[swizzle
] = emit_fetch(bld
, inst
, 0, chan_index
);
623 FOR_EACH_CHANNEL( chan_index
) {
624 if(terms
[chan_index
]) {
625 LLVMValueRef chan_mask
;
628 * If term < 0 then mask = 0 else mask = ~0.
630 chan_mask
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->base
.zero
);
633 mask
= LLVMBuildAnd(bld
->base
.builder
, mask
, chan_mask
, "");
640 lp_build_mask_update(bld
->mask
, mask
);
645 * Predicated fragment kill.
646 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
647 * The only predication is the execution mask which will apply if
648 * we're inside a loop or conditional.
651 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
652 const struct tgsi_full_instruction
*inst
)
656 /* For those channels which are "alive", disable fragment shader
659 if (bld
->exec_mask
.has_mask
) {
660 mask
= LLVMBuildNot(bld
->base
.builder
, bld
->exec_mask
.exec_mask
, "kilp");
663 mask
= bld
->base
.zero
;
666 lp_build_mask_update(bld
->mask
, mask
);
671 * Check if inst src/dest regs use indirect addressing into temporary
675 indirect_temp_reference(const struct tgsi_full_instruction
*inst
)
678 for (i
= 0; i
< inst
->Instruction
.NumSrcRegs
; i
++) {
679 const struct tgsi_full_src_register
*reg
= &inst
->Src
[i
];
680 if (reg
->Register
.File
== TGSI_FILE_TEMPORARY
&&
681 reg
->Register
.Indirect
)
684 for (i
= 0; i
< inst
->Instruction
.NumDstRegs
; i
++) {
685 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[i
];
686 if (reg
->Register
.File
== TGSI_FILE_TEMPORARY
&&
687 reg
->Register
.Indirect
)
695 struct lp_build_tgsi_soa_context
*bld
,
696 const struct tgsi_full_declaration
*decl
)
698 unsigned first
= decl
->Range
.First
;
699 unsigned last
= decl
->Range
.Last
;
701 LLVMBasicBlockRef current_block
=
702 LLVMGetInsertBlock(bld
->base
.builder
);
703 LLVMBasicBlockRef first_block
=
704 LLVMGetEntryBasicBlock(
705 LLVMGetBasicBlockParent(current_block
));
706 LLVMValueRef first_inst
=
707 LLVMGetFirstInstruction(first_block
);
709 /* we want alloca's to be the first instruction
710 * in the function so we need to rewind the builder
711 * to the very beginning */
712 LLVMPositionBuilderBefore(bld
->base
.builder
,
715 for (idx
= first
; idx
<= last
; ++idx
) {
716 switch (decl
->Declaration
.File
) {
717 case TGSI_FILE_TEMPORARY
:
718 for (i
= 0; i
< NUM_CHANNELS
; i
++)
719 bld
->temps
[idx
][i
] = lp_build_alloca(&bld
->base
);
722 case TGSI_FILE_OUTPUT
:
723 for (i
= 0; i
< NUM_CHANNELS
; i
++)
724 bld
->outputs
[idx
][i
] = lp_build_alloca(&bld
->base
);
728 /* don't need to declare other vars */
733 LLVMPositionBuilderAtEnd(bld
->base
.builder
,
740 * Emit LLVM for one TGSI instruction.
741 * \param return TRUE for success, FALSE otherwise
745 struct lp_build_tgsi_soa_context
*bld
,
746 const struct tgsi_full_instruction
*inst
,
747 const struct tgsi_opcode_info
*info
)
750 LLVMValueRef src0
, src1
, src2
;
751 LLVMValueRef tmp0
, tmp1
, tmp2
;
752 LLVMValueRef tmp3
= NULL
;
753 LLVMValueRef tmp4
= NULL
;
754 LLVMValueRef tmp5
= NULL
;
755 LLVMValueRef tmp6
= NULL
;
756 LLVMValueRef tmp7
= NULL
;
758 LLVMValueRef dst0
[NUM_CHANNELS
];
760 /* we can't handle indirect addressing into temp register file yet */
761 if (indirect_temp_reference(inst
))
765 * Stores and write masks are handled in a general fashion after the long
766 * instruction opcode switch statement.
768 * Although not stricitly necessary, we avoid generating instructions for
769 * channels which won't be stored, in cases where's that easy. For some
770 * complex instructions, like texture sampling, it is more convenient to
771 * assume a full writemask and then let LLVM optimization passes eliminate
775 assert(info
->num_dst
<= 1);
777 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
778 dst0
[chan_index
] = bld
->base
.undef
;
782 switch (inst
->Instruction
.Opcode
) {
784 case TGSI_OPCODE_ARL
:
786 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
787 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
790 dst0
[chan_index
] = tmp0
;
795 case TGSI_OPCODE_MOV
:
796 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
797 dst0
[chan_index
] = emit_fetch( bld
, inst
, 0, chan_index
);
801 case TGSI_OPCODE_LIT
:
802 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ) {
803 dst0
[CHAN_X
] = bld
->base
.one
;
805 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
806 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
807 dst0
[CHAN_Y
] = lp_build_max( &bld
->base
, src0
, bld
->base
.zero
);
809 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
810 /* XMM[1] = SrcReg[0].yyyy */
811 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
812 /* XMM[1] = max(XMM[1], 0) */
813 tmp1
= lp_build_max( &bld
->base
, tmp1
, bld
->base
.zero
);
814 /* XMM[2] = SrcReg[0].wwww */
815 tmp2
= emit_fetch( bld
, inst
, 0, CHAN_W
);
816 tmp1
= lp_build_pow( &bld
->base
, tmp1
, tmp2
);
817 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
818 tmp2
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GREATER
, tmp0
, bld
->base
.zero
);
819 dst0
[CHAN_Z
] = lp_build_select(&bld
->base
, tmp2
, tmp1
, bld
->base
.zero
);
821 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) ) {
822 dst0
[CHAN_W
] = bld
->base
.one
;
826 case TGSI_OPCODE_RCP
:
827 /* TGSI_OPCODE_RECIP */
828 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
829 res
= lp_build_rcp(&bld
->base
, src0
);
830 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
831 dst0
[chan_index
] = res
;
835 case TGSI_OPCODE_RSQ
:
836 /* TGSI_OPCODE_RECIPSQRT */
837 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
838 src0
= lp_build_abs(&bld
->base
, src0
);
839 res
= lp_build_rsqrt(&bld
->base
, src0
);
840 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
841 dst0
[chan_index
] = res
;
845 case TGSI_OPCODE_EXP
:
846 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
847 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
848 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
849 LLVMValueRef
*p_exp2_int_part
= NULL
;
850 LLVMValueRef
*p_frac_part
= NULL
;
851 LLVMValueRef
*p_exp2
= NULL
;
853 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
855 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
856 p_exp2_int_part
= &tmp0
;
857 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
859 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
862 lp_build_exp2_approx(&bld
->base
, src0
, p_exp2_int_part
, p_frac_part
, p_exp2
);
864 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
866 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
868 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
872 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
873 dst0
[CHAN_W
] = bld
->base
.one
;
877 case TGSI_OPCODE_LOG
:
878 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
879 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
880 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
881 LLVMValueRef
*p_floor_log2
= NULL
;
882 LLVMValueRef
*p_exp
= NULL
;
883 LLVMValueRef
*p_log2
= NULL
;
885 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
886 src0
= lp_build_abs( &bld
->base
, src0
);
888 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
889 p_floor_log2
= &tmp0
;
890 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
892 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
895 lp_build_log2_approx(&bld
->base
, src0
, p_exp
, p_floor_log2
, p_log2
);
897 /* dst.x = floor(lg2(abs(src.x))) */
898 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
900 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
901 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
)) {
902 dst0
[CHAN_Y
] = lp_build_div( &bld
->base
, src0
, tmp1
);
904 /* dst.z = lg2(abs(src.x)) */
905 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
909 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
910 dst0
[CHAN_W
] = bld
->base
.one
;
914 case TGSI_OPCODE_MUL
:
915 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
916 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
917 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
918 dst0
[chan_index
] = lp_build_mul(&bld
->base
, src0
, src1
);
922 case TGSI_OPCODE_ADD
:
923 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
924 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
925 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
926 dst0
[chan_index
] = lp_build_add(&bld
->base
, src0
, src1
);
930 case TGSI_OPCODE_DP3
:
931 /* TGSI_OPCODE_DOT3 */
932 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
933 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
934 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
935 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
936 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
937 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
938 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
939 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
940 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
941 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
942 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
943 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
944 dst0
[chan_index
] = tmp0
;
948 case TGSI_OPCODE_DP4
:
949 /* TGSI_OPCODE_DOT4 */
950 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
951 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
952 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
953 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
954 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
955 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
956 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
957 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
958 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
959 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
960 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
961 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_W
);
962 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_W
);
963 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
964 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
965 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
966 dst0
[chan_index
] = tmp0
;
970 case TGSI_OPCODE_DST
:
971 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
972 dst0
[CHAN_X
] = bld
->base
.one
;
974 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
975 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
976 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
977 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp0
, tmp1
);
979 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
980 dst0
[CHAN_Z
] = emit_fetch( bld
, inst
, 0, CHAN_Z
);
982 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
983 dst0
[CHAN_W
] = emit_fetch( bld
, inst
, 1, CHAN_W
);
987 case TGSI_OPCODE_MIN
:
988 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
989 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
990 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
991 dst0
[chan_index
] = lp_build_min( &bld
->base
, src0
, src1
);
995 case TGSI_OPCODE_MAX
:
996 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
997 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
998 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
999 dst0
[chan_index
] = lp_build_max( &bld
->base
, src0
, src1
);
1003 case TGSI_OPCODE_SLT
:
1004 /* TGSI_OPCODE_SETLT */
1005 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1006 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1007 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1008 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, src1
);
1009 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1013 case TGSI_OPCODE_SGE
:
1014 /* TGSI_OPCODE_SETGE */
1015 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1016 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1017 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1018 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GEQUAL
, src0
, src1
);
1019 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1023 case TGSI_OPCODE_MAD
:
1024 /* TGSI_OPCODE_MADD */
1025 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1026 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1027 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1028 tmp2
= emit_fetch( bld
, inst
, 2, chan_index
);
1029 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1030 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp2
);
1031 dst0
[chan_index
] = tmp0
;
1035 case TGSI_OPCODE_SUB
:
1036 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1037 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1038 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1039 dst0
[chan_index
] = lp_build_sub( &bld
->base
, tmp0
, tmp1
);
1043 case TGSI_OPCODE_LRP
:
1044 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1045 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1046 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1047 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1048 tmp0
= lp_build_sub( &bld
->base
, src1
, src2
);
1049 tmp0
= lp_build_mul( &bld
->base
, src0
, tmp0
);
1050 dst0
[chan_index
] = lp_build_add( &bld
->base
, tmp0
, src2
);
1054 case TGSI_OPCODE_CND
:
1055 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1056 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1057 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1058 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1059 tmp1
= lp_build_const_vec(bld
->base
.type
, 0.5);
1060 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src2
, tmp1
);
1061 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src0
, src1
);
1065 case TGSI_OPCODE_DP2A
:
1066 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1067 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1068 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1069 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1070 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1071 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1072 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1073 tmp1
= emit_fetch( bld
, inst
, 2, CHAN_X
); /* xmm1 = src[2].x */
1074 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1075 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1076 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1080 case TGSI_OPCODE_FRC
:
1081 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1082 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1083 tmp0
= lp_build_floor(&bld
->base
, src0
);
1084 tmp0
= lp_build_sub(&bld
->base
, src0
, tmp0
);
1085 dst0
[chan_index
] = tmp0
;
1089 case TGSI_OPCODE_CLAMP
:
1090 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1091 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1092 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1093 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1094 tmp0
= lp_build_max(&bld
->base
, tmp0
, src1
);
1095 tmp0
= lp_build_min(&bld
->base
, tmp0
, src2
);
1096 dst0
[chan_index
] = tmp0
;
1100 case TGSI_OPCODE_FLR
:
1101 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1102 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1103 dst0
[chan_index
] = lp_build_floor(&bld
->base
, tmp0
);
1107 case TGSI_OPCODE_ROUND
:
1108 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1109 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1110 dst0
[chan_index
] = lp_build_round(&bld
->base
, tmp0
);
1114 case TGSI_OPCODE_EX2
: {
1115 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1116 tmp0
= lp_build_exp2( &bld
->base
, tmp0
);
1117 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1118 dst0
[chan_index
] = tmp0
;
1123 case TGSI_OPCODE_LG2
:
1124 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1125 tmp0
= lp_build_log2( &bld
->base
, tmp0
);
1126 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1127 dst0
[chan_index
] = tmp0
;
1131 case TGSI_OPCODE_POW
:
1132 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1133 src1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1134 res
= lp_build_pow( &bld
->base
, src0
, src1
);
1135 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1136 dst0
[chan_index
] = res
;
1140 case TGSI_OPCODE_XPD
:
1141 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1142 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1143 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1144 tmp3
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1146 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1147 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1148 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1149 tmp4
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1151 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1153 tmp2
= lp_build_mul( &bld
->base
, tmp2
, tmp1
);
1155 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1156 tmp2
= lp_build_sub( &bld
->base
, tmp2
, tmp5
);
1157 dst0
[CHAN_X
] = tmp2
;
1159 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1160 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1161 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1162 tmp5
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1164 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1165 tmp3
= lp_build_mul( &bld
->base
, tmp3
, tmp2
);
1166 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp5
);
1167 tmp3
= lp_build_sub( &bld
->base
, tmp3
, tmp1
);
1168 dst0
[CHAN_Y
] = tmp3
;
1170 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1171 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1172 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp2
);
1173 tmp5
= lp_build_sub( &bld
->base
, tmp5
, tmp0
);
1174 dst0
[CHAN_Z
] = tmp5
;
1176 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1177 dst0
[CHAN_W
] = bld
->base
.one
;
1181 case TGSI_OPCODE_ABS
:
1182 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1183 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1184 dst0
[chan_index
] = lp_build_abs( &bld
->base
, tmp0
);
1188 case TGSI_OPCODE_RCC
:
1193 case TGSI_OPCODE_DPH
:
1194 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1195 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1196 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1197 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1198 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1199 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1200 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1201 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1202 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1203 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1204 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1205 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1206 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1207 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1208 dst0
[chan_index
] = tmp0
;
1212 case TGSI_OPCODE_COS
:
1213 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1214 tmp0
= lp_build_cos( &bld
->base
, tmp0
);
1215 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1216 dst0
[chan_index
] = tmp0
;
1220 case TGSI_OPCODE_DDX
:
1221 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1222 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, &dst0
[chan_index
], NULL
);
1226 case TGSI_OPCODE_DDY
:
1227 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1228 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, NULL
, &dst0
[chan_index
]);
1232 case TGSI_OPCODE_KILP
:
1233 /* predicated kill */
1234 emit_kilp( bld
, inst
);
1237 case TGSI_OPCODE_KIL
:
1238 /* conditional kill */
1239 emit_kil( bld
, inst
);
1242 case TGSI_OPCODE_PK2H
:
1246 case TGSI_OPCODE_PK2US
:
1250 case TGSI_OPCODE_PK4B
:
1254 case TGSI_OPCODE_PK4UB
:
1258 case TGSI_OPCODE_RFL
:
1262 case TGSI_OPCODE_SEQ
:
1263 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1264 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1265 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1266 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_EQUAL
, src0
, src1
);
1267 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1271 case TGSI_OPCODE_SFL
:
1272 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1273 dst0
[chan_index
] = bld
->base
.zero
;
1277 case TGSI_OPCODE_SGT
:
1278 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1279 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1280 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1281 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src0
, src1
);
1282 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1286 case TGSI_OPCODE_SIN
:
1287 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1288 tmp0
= lp_build_sin( &bld
->base
, tmp0
);
1289 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1290 dst0
[chan_index
] = tmp0
;
1294 case TGSI_OPCODE_SLE
:
1295 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1296 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1297 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1298 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LEQUAL
, src0
, src1
);
1299 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1303 case TGSI_OPCODE_SNE
:
1304 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1305 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1306 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1307 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_NOTEQUAL
, src0
, src1
);
1308 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1312 case TGSI_OPCODE_STR
:
1313 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1314 dst0
[chan_index
] = bld
->base
.one
;
1318 case TGSI_OPCODE_TEX
:
1319 emit_tex( bld
, inst
, FALSE
, FALSE
, dst0
);
1322 case TGSI_OPCODE_TXD
:
1327 case TGSI_OPCODE_UP2H
:
1333 case TGSI_OPCODE_UP2US
:
1339 case TGSI_OPCODE_UP4B
:
1345 case TGSI_OPCODE_UP4UB
:
1351 case TGSI_OPCODE_X2D
:
1357 case TGSI_OPCODE_ARA
:
1364 case TGSI_OPCODE_ARR
:
1366 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1367 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1368 emit_rnd( bld
, 0, 0 );
1369 emit_f2it( bld
, 0 );
1370 dst0
[chan_index
] = tmp0
;
1375 case TGSI_OPCODE_BRA
:
1381 case TGSI_OPCODE_CAL
:
1386 case TGSI_OPCODE_RET
:
1391 case TGSI_OPCODE_END
:
1394 case TGSI_OPCODE_SSG
:
1395 /* TGSI_OPCODE_SGN */
1396 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1397 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1398 dst0
[chan_index
] = lp_build_sgn( &bld
->base
, tmp0
);
1402 case TGSI_OPCODE_CMP
:
1403 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1404 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1405 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1406 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1407 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, bld
->base
.zero
);
1408 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src1
, src2
);
1412 case TGSI_OPCODE_SCS
:
1413 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1414 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1415 dst0
[CHAN_X
] = lp_build_cos( &bld
->base
, tmp0
);
1417 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1418 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1419 dst0
[CHAN_Y
] = lp_build_sin( &bld
->base
, tmp0
);
1421 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1422 dst0
[CHAN_Z
] = bld
->base
.zero
;
1424 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1425 dst0
[CHAN_W
] = bld
->base
.one
;
1429 case TGSI_OPCODE_TXB
:
1430 emit_tex( bld
, inst
, TRUE
, FALSE
, dst0
);
1433 case TGSI_OPCODE_NRM
:
1435 case TGSI_OPCODE_NRM4
:
1436 /* 3 or 4-component normalization */
1438 uint dims
= (inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
1440 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) ||
1441 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
) ||
1442 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
) ||
1443 (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 4)) {
1445 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1448 /* xmm0 = src.x * src.x */
1449 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1450 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1453 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp0
);
1456 /* xmm0 = xmm0 + src.y * src.y */
1457 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Y
);
1458 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1461 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1462 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1465 /* xmm0 = xmm0 + src.z * src.z */
1466 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Z
);
1467 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1470 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1471 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1475 /* xmm0 = xmm0 + src.w * src.w */
1476 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_W
);
1477 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
)) {
1480 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1481 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1484 /* xmm1 = 1 / sqrt(xmm0) */
1485 tmp1
= lp_build_rsqrt( &bld
->base
, tmp0
);
1487 /* dst.x = xmm1 * src.x */
1488 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1489 dst0
[CHAN_X
] = lp_build_mul( &bld
->base
, tmp4
, tmp1
);
1492 /* dst.y = xmm1 * src.y */
1493 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1494 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp5
, tmp1
);
1497 /* dst.z = xmm1 * src.z */
1498 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1499 dst0
[CHAN_Z
] = lp_build_mul( &bld
->base
, tmp6
, tmp1
);
1502 /* dst.w = xmm1 * src.w */
1503 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) && dims
== 4) {
1504 dst0
[CHAN_W
] = lp_build_mul( &bld
->base
, tmp7
, tmp1
);
1509 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 3) {
1510 dst0
[CHAN_W
] = bld
->base
.one
;
1515 case TGSI_OPCODE_DIV
:
1521 case TGSI_OPCODE_DP2
:
1522 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1523 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1524 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1525 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1526 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1527 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1528 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1529 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1530 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1534 case TGSI_OPCODE_TXL
:
1535 emit_tex( bld
, inst
, TRUE
, FALSE
, dst0
);
1538 case TGSI_OPCODE_TXP
:
1539 emit_tex( bld
, inst
, FALSE
, TRUE
, dst0
);
1542 case TGSI_OPCODE_BRK
:
1543 lp_exec_break(&bld
->exec_mask
);
1546 case TGSI_OPCODE_IF
:
1547 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1548 tmp0
= lp_build_cmp(&bld
->base
, PIPE_FUNC_NOTEQUAL
,
1549 tmp0
, bld
->base
.zero
);
1550 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp0
);
1553 case TGSI_OPCODE_BGNFOR
:
1559 case TGSI_OPCODE_BGNLOOP
:
1560 lp_exec_bgnloop(&bld
->exec_mask
);
1563 case TGSI_OPCODE_REP
:
1569 case TGSI_OPCODE_ELSE
:
1570 lp_exec_mask_cond_invert(&bld
->exec_mask
);
1573 case TGSI_OPCODE_ENDIF
:
1574 lp_exec_mask_cond_pop(&bld
->exec_mask
);
1577 case TGSI_OPCODE_ENDFOR
:
1583 case TGSI_OPCODE_ENDLOOP
:
1584 lp_exec_endloop(&bld
->exec_mask
);
1587 case TGSI_OPCODE_ENDREP
:
1593 case TGSI_OPCODE_PUSHA
:
1599 case TGSI_OPCODE_POPA
:
1605 case TGSI_OPCODE_CEIL
:
1606 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1607 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1608 dst0
[chan_index
] = lp_build_ceil(&bld
->base
, tmp0
);
1612 case TGSI_OPCODE_I2F
:
1618 case TGSI_OPCODE_NOT
:
1624 case TGSI_OPCODE_TRUNC
:
1625 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1626 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1627 dst0
[chan_index
] = lp_build_trunc(&bld
->base
, tmp0
);
1631 case TGSI_OPCODE_SHL
:
1637 case TGSI_OPCODE_ISHR
:
1643 case TGSI_OPCODE_AND
:
1649 case TGSI_OPCODE_OR
:
1655 case TGSI_OPCODE_MOD
:
1661 case TGSI_OPCODE_XOR
:
1667 case TGSI_OPCODE_SAD
:
1673 case TGSI_OPCODE_TXF
:
1679 case TGSI_OPCODE_TXQ
:
1685 case TGSI_OPCODE_CONT
:
1686 lp_exec_continue(&bld
->exec_mask
);
1689 case TGSI_OPCODE_EMIT
:
1693 case TGSI_OPCODE_ENDPRIM
:
1697 case TGSI_OPCODE_NOP
:
1705 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1706 emit_store( bld
, inst
, 0, chan_index
, dst0
[chan_index
]);
1715 lp_build_tgsi_soa(LLVMBuilderRef builder
,
1716 const struct tgsi_token
*tokens
,
1717 struct lp_type type
,
1718 struct lp_build_mask_context
*mask
,
1719 LLVMValueRef consts_ptr
,
1720 const LLVMValueRef
*pos
,
1721 const LLVMValueRef (*inputs
)[NUM_CHANNELS
],
1722 LLVMValueRef (*outputs
)[NUM_CHANNELS
],
1723 struct lp_build_sampler_soa
*sampler
)
1725 struct lp_build_tgsi_soa_context bld
;
1726 struct tgsi_parse_context parse
;
1727 uint num_immediates
= 0;
1730 /* Setup build context */
1731 memset(&bld
, 0, sizeof bld
);
1732 lp_build_context_init(&bld
.base
, builder
, type
);
1735 bld
.inputs
= inputs
;
1736 bld
.outputs
= outputs
;
1737 bld
.consts_ptr
= consts_ptr
;
1738 bld
.sampler
= sampler
;
1740 lp_exec_mask_init(&bld
.exec_mask
, &bld
.base
);
1742 tgsi_parse_init( &parse
, tokens
);
1744 while( !tgsi_parse_end_of_tokens( &parse
) ) {
1745 tgsi_parse_token( &parse
);
1747 switch( parse
.FullToken
.Token
.Type
) {
1748 case TGSI_TOKEN_TYPE_DECLARATION
:
1749 /* Inputs already interpolated */
1751 if (!emit_declaration( &bld
, &parse
.FullToken
.FullDeclaration
))
1752 _debug_printf("warning: failed to define LLVM variable\n");
1756 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1758 unsigned opcode
= parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
1759 const struct tgsi_opcode_info
*info
= tgsi_get_opcode_info(opcode
);
1760 if (!emit_instruction( &bld
, &parse
.FullToken
.FullInstruction
, info
))
1761 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
1762 info
? info
->mnemonic
: "<invalid>");
1767 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1768 /* simply copy the immediate values into the next immediates[] slot */
1770 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
1772 assert(num_immediates
< LP_MAX_IMMEDIATES
);
1773 for( i
= 0; i
< size
; ++i
)
1774 bld
.immediates
[num_immediates
][i
] =
1775 lp_build_const_vec(type
, parse
.FullToken
.FullImmediate
.u
[i
].Float
);
1776 for( i
= size
; i
< 4; ++i
)
1777 bld
.immediates
[num_immediates
][i
] = bld
.base
.undef
;
1782 case TGSI_TOKEN_TYPE_PROPERTY
:
1790 LLVMBasicBlockRef block
= LLVMGetInsertBlock(builder
);
1791 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
1792 debug_printf("11111111111111111111111111111 \n");
1793 tgsi_dump(tokens
, 0);
1794 LLVMDumpValue(function
);
1795 debug_printf("2222222222222222222222222222 \n");
1797 tgsi_parse_free( &parse
);