1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_exec.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_type.h"
51 #include "lp_bld_const.h"
52 #include "lp_bld_arit.h"
53 #include "lp_bld_logic.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_quad.h"
57 #include "lp_bld_tgsi.h"
58 #include "lp_bld_limits.h"
59 #include "lp_bld_debug.h"
62 #define FOR_EACH_CHANNEL( CHAN )\
63 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
65 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
66 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
68 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
69 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
71 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
72 FOR_EACH_CHANNEL( CHAN )\
73 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
80 #define LP_MAX_INSTRUCTIONS 256
84 struct lp_build_context
*bld
;
88 LLVMTypeRef int_vec_type
;
90 LLVMValueRef cond_stack
[LP_MAX_TGSI_NESTING
];
92 LLVMValueRef cond_mask
;
94 LLVMBasicBlockRef loop_block
;
95 LLVMValueRef cont_mask
;
96 LLVMValueRef break_mask
;
97 LLVMValueRef break_var
;
99 LLVMBasicBlockRef loop_block
;
100 LLVMValueRef cont_mask
;
101 LLVMValueRef break_mask
;
102 LLVMValueRef break_var
;
103 } loop_stack
[LP_MAX_TGSI_NESTING
];
106 LLVMValueRef ret_mask
;
109 LLVMValueRef ret_mask
;
110 } call_stack
[LP_MAX_TGSI_NESTING
];
113 LLVMValueRef exec_mask
;
116 struct lp_build_tgsi_soa_context
118 struct lp_build_context base
;
120 /* Builder for integer masks and indices */
121 struct lp_build_context int_bld
;
123 LLVMValueRef consts_ptr
;
124 const LLVMValueRef
*pos
;
125 const LLVMValueRef (*inputs
)[NUM_CHANNELS
];
126 LLVMValueRef (*outputs
)[NUM_CHANNELS
];
128 const struct lp_build_sampler_soa
*sampler
;
130 LLVMValueRef immediates
[LP_MAX_TGSI_IMMEDIATES
][NUM_CHANNELS
];
131 LLVMValueRef temps
[LP_MAX_TGSI_TEMPS
][NUM_CHANNELS
];
132 LLVMValueRef addr
[LP_MAX_TGSI_ADDRS
][NUM_CHANNELS
];
133 LLVMValueRef preds
[LP_MAX_TGSI_PREDS
][NUM_CHANNELS
];
135 /* we allocate an array of temps if we have indirect
136 * addressing and then the temps above is unused */
137 LLVMValueRef temps_array
;
138 boolean has_indirect_addressing
;
140 struct lp_build_mask_context
*mask
;
141 struct lp_exec_mask exec_mask
;
143 struct tgsi_full_instruction
*instructions
;
144 uint max_instructions
;
147 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
150 mask
->has_mask
= FALSE
;
151 mask
->cond_stack_size
= 0;
152 mask
->loop_stack_size
= 0;
153 mask
->call_stack_size
= 0;
155 mask
->int_vec_type
= lp_build_int_vec_type(mask
->bld
->type
);
156 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
= mask
->cond_mask
=
157 LLVMConstAllOnes(mask
->int_vec_type
);
160 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
162 if (mask
->loop_stack_size
) {
163 /*for loops we need to update the entire mask at runtime */
165 assert(mask
->break_mask
);
166 tmp
= LLVMBuildAnd(mask
->bld
->builder
,
170 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
175 mask
->exec_mask
= mask
->cond_mask
;
177 if (mask
->call_stack_size
) {
178 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
184 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
185 mask
->loop_stack_size
> 0 ||
186 mask
->call_stack_size
> 0);
189 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
192 assert(mask
->cond_stack_size
< LP_MAX_TGSI_NESTING
);
193 if (mask
->cond_stack_size
== 0) {
194 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
196 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
197 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
198 mask
->cond_mask
= val
;
200 lp_exec_mask_update(mask
);
203 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
205 LLVMValueRef prev_mask
;
206 LLVMValueRef inv_mask
;
208 assert(mask
->cond_stack_size
);
209 prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
210 if (mask
->cond_stack_size
== 1) {
211 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
214 inv_mask
= LLVMBuildNot(mask
->bld
->builder
, mask
->cond_mask
, "");
216 mask
->cond_mask
= LLVMBuildAnd(mask
->bld
->builder
,
219 lp_exec_mask_update(mask
);
222 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
224 assert(mask
->cond_stack_size
);
225 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
226 lp_exec_mask_update(mask
);
229 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
231 if (mask
->loop_stack_size
== 0) {
232 assert(mask
->loop_block
== NULL
);
233 assert(mask
->cont_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
234 assert(mask
->break_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
235 assert(mask
->break_var
== NULL
);
238 assert(mask
->loop_stack_size
< LP_MAX_TGSI_NESTING
);
240 mask
->loop_stack
[mask
->loop_stack_size
].loop_block
= mask
->loop_block
;
241 mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
242 mask
->loop_stack
[mask
->loop_stack_size
].break_mask
= mask
->break_mask
;
243 mask
->loop_stack
[mask
->loop_stack_size
].break_var
= mask
->break_var
;
244 ++mask
->loop_stack_size
;
246 mask
->break_var
= lp_build_alloca(mask
->bld
->builder
, mask
->int_vec_type
, "");
247 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
249 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->builder
, "bgnloop");
250 LLVMBuildBr(mask
->bld
->builder
, mask
->loop_block
);
251 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, mask
->loop_block
);
253 mask
->break_mask
= LLVMBuildLoad(mask
->bld
->builder
, mask
->break_var
, "");
255 lp_exec_mask_update(mask
);
258 static void lp_exec_break(struct lp_exec_mask
*mask
)
260 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
264 mask
->break_mask
= LLVMBuildAnd(mask
->bld
->builder
,
266 exec_mask
, "break_full");
268 lp_exec_mask_update(mask
);
271 static void lp_exec_continue(struct lp_exec_mask
*mask
)
273 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
277 mask
->cont_mask
= LLVMBuildAnd(mask
->bld
->builder
,
281 lp_exec_mask_update(mask
);
285 static void lp_exec_endloop(struct lp_exec_mask
*mask
)
287 LLVMBasicBlockRef endloop
;
288 LLVMTypeRef reg_type
= LLVMIntType(mask
->bld
->type
.width
*
289 mask
->bld
->type
.length
);
292 assert(mask
->break_mask
);
295 * Restore the cont_mask, but don't pop
297 assert(mask
->loop_stack_size
);
298 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
- 1].cont_mask
;
299 lp_exec_mask_update(mask
);
302 * Unlike the continue mask, the break_mask must be preserved across loop
305 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
307 /* i1cond = (mask == 0) */
308 i1cond
= LLVMBuildICmp(
311 LLVMBuildBitCast(mask
->bld
->builder
, mask
->exec_mask
, reg_type
, ""),
312 LLVMConstNull(reg_type
), "");
314 endloop
= lp_build_insert_new_block(mask
->bld
->builder
, "endloop");
316 LLVMBuildCondBr(mask
->bld
->builder
,
317 i1cond
, mask
->loop_block
, endloop
);
319 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, endloop
);
321 assert(mask
->loop_stack_size
);
322 --mask
->loop_stack_size
;
323 mask
->loop_block
= mask
->loop_stack
[mask
->loop_stack_size
].loop_block
;
324 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
;
325 mask
->break_mask
= mask
->loop_stack
[mask
->loop_stack_size
].break_mask
;
326 mask
->break_var
= mask
->loop_stack
[mask
->loop_stack_size
].break_var
;
328 lp_exec_mask_update(mask
);
331 /* stores val into an address pointed to by dst.
332 * mask->exec_mask is used to figure out which bits of val
333 * should be stored into the address
334 * (0 means don't store this bit, 1 means do store).
336 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
341 /* Mix the predicate and execution mask */
342 if (mask
->has_mask
) {
344 pred
= LLVMBuildAnd(mask
->bld
->builder
, pred
, mask
->exec_mask
, "");
346 pred
= mask
->exec_mask
;
351 LLVMValueRef real_val
, dst_val
;
353 dst_val
= LLVMBuildLoad(mask
->bld
->builder
, dst
, "");
354 real_val
= lp_build_select(mask
->bld
,
358 LLVMBuildStore(mask
->bld
->builder
, real_val
, dst
);
360 LLVMBuildStore(mask
->bld
->builder
, val
, dst
);
363 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
367 assert(mask
->call_stack_size
< LP_MAX_TGSI_NESTING
);
368 mask
->call_stack
[mask
->call_stack_size
].pc
= *pc
;
369 mask
->call_stack
[mask
->call_stack_size
].ret_mask
= mask
->ret_mask
;
370 mask
->call_stack_size
++;
374 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
376 LLVMValueRef exec_mask
;
378 if (mask
->call_stack_size
== 0) {
379 /* returning from main() */
383 exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
387 mask
->ret_mask
= LLVMBuildAnd(mask
->bld
->builder
,
389 exec_mask
, "ret_full");
391 lp_exec_mask_update(mask
);
394 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
398 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
400 assert(mask
->call_stack_size
);
401 mask
->call_stack_size
--;
402 *pc
= mask
->call_stack
[mask
->call_stack_size
].pc
;
403 mask
->ret_mask
= mask
->call_stack
[mask
->call_stack_size
].ret_mask
;
404 lp_exec_mask_update(mask
);
408 get_temp_ptr(struct lp_build_tgsi_soa_context
*bld
,
415 if (!bld
->has_indirect_addressing
) {
416 return bld
->temps
[index
][chan
];
418 LLVMValueRef lindex
=
419 LLVMConstInt(LLVMInt32Type(), index
* 4 + chan
, 0);
421 lindex
= lp_build_add(&bld
->base
, lindex
, addr
);
422 return LLVMBuildGEP(bld
->base
.builder
, bld
->temps_array
, &lindex
, 1, "");
431 struct lp_build_tgsi_soa_context
*bld
,
432 const struct tgsi_full_instruction
*inst
,
434 const unsigned chan_index
)
436 const struct tgsi_full_src_register
*reg
= &inst
->Src
[index
];
437 const unsigned swizzle
=
438 tgsi_util_get_full_src_register_swizzle(reg
, chan_index
);
440 LLVMValueRef addr
= NULL
;
443 assert(0 && "invalid swizzle in emit_fetch()");
444 return bld
->base
.undef
;
447 if (reg
->Register
.Indirect
) {
448 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->base
.type
);
449 unsigned swizzle
= tgsi_util_get_src_register_swizzle( ®
->Indirect
, chan_index
);
450 addr
= LLVMBuildLoad(bld
->base
.builder
,
451 bld
->addr
[reg
->Indirect
.Index
][swizzle
],
453 /* for indexing we want integers */
454 addr
= LLVMBuildFPToSI(bld
->base
.builder
, addr
,
456 addr
= LLVMBuildExtractElement(bld
->base
.builder
,
457 addr
, LLVMConstInt(LLVMInt32Type(), 0, 0),
459 addr
= lp_build_mul(&bld
->base
, addr
, LLVMConstInt(LLVMInt32Type(), 4, 0));
462 switch (reg
->Register
.File
) {
463 case TGSI_FILE_CONSTANT
:
465 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(),
466 reg
->Register
.Index
*4 + swizzle
, 0);
467 LLVMValueRef scalar
, scalar_ptr
;
469 if (reg
->Register
.Indirect
) {
470 /*lp_build_printf(bld->base.builder,
471 "\taddr = %d\n", addr);*/
472 index
= lp_build_add(&bld
->base
, index
, addr
);
474 scalar_ptr
= LLVMBuildGEP(bld
->base
.builder
, bld
->consts_ptr
,
476 scalar
= LLVMBuildLoad(bld
->base
.builder
, scalar_ptr
, "");
478 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
482 case TGSI_FILE_IMMEDIATE
:
483 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
487 case TGSI_FILE_INPUT
:
488 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
492 case TGSI_FILE_TEMPORARY
:
494 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
496 reg
->Register
.Indirect
,
498 res
= LLVMBuildLoad(bld
->base
.builder
, temp_ptr
, "");
500 return bld
->base
.undef
;
505 assert(0 && "invalid src register in emit_fetch()");
506 return bld
->base
.undef
;
509 switch( tgsi_util_get_full_src_register_sign_mode( reg
, chan_index
) ) {
510 case TGSI_UTIL_SIGN_CLEAR
:
511 res
= lp_build_abs( &bld
->base
, res
);
514 case TGSI_UTIL_SIGN_SET
:
515 /* TODO: Use bitwese OR for floating point */
516 res
= lp_build_abs( &bld
->base
, res
);
517 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
520 case TGSI_UTIL_SIGN_TOGGLE
:
521 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
524 case TGSI_UTIL_SIGN_KEEP
:
533 * Register fetch with derivatives.
537 struct lp_build_tgsi_soa_context
*bld
,
538 const struct tgsi_full_instruction
*inst
,
540 const unsigned chan_index
,
547 src
= emit_fetch(bld
, inst
, index
, chan_index
);
552 /* TODO: use interpolation coeffs for inputs */
555 *ddx
= lp_build_ddx(&bld
->base
, src
);
558 *ddy
= lp_build_ddy(&bld
->base
, src
);
566 emit_fetch_predicate(
567 struct lp_build_tgsi_soa_context
*bld
,
568 const struct tgsi_full_instruction
*inst
,
572 unsigned char swizzles
[4];
573 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
577 if (!inst
->Instruction
.Predicate
) {
578 FOR_EACH_CHANNEL( chan
) {
584 swizzles
[0] = inst
->Predicate
.SwizzleX
;
585 swizzles
[1] = inst
->Predicate
.SwizzleY
;
586 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
587 swizzles
[3] = inst
->Predicate
.SwizzleW
;
589 index
= inst
->Predicate
.Index
;
590 assert(index
< LP_MAX_TGSI_PREDS
);
592 FOR_EACH_CHANNEL( chan
) {
593 unsigned swizzle
= swizzles
[chan
];
596 * Only fetch the predicate register channels that are actually listed
599 if (!unswizzled
[swizzle
]) {
600 value
= LLVMBuildLoad(bld
->base
.builder
,
601 bld
->preds
[index
][swizzle
], "");
604 * Convert the value to an integer mask.
606 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
607 * is needlessly causing two comparisons due to storing the intermediate
608 * result as float vector instead of an integer mask vector.
610 value
= lp_build_compare(bld
->base
.builder
,
615 if (inst
->Predicate
.Negate
) {
616 value
= LLVMBuildNot(bld
->base
.builder
, value
, "");
619 unswizzled
[swizzle
] = value
;
621 value
= unswizzled
[swizzle
];
634 struct lp_build_tgsi_soa_context
*bld
,
635 const struct tgsi_full_instruction
*inst
,
641 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
642 LLVMValueRef addr
= NULL
;
644 switch( inst
->Instruction
.Saturate
) {
648 case TGSI_SAT_ZERO_ONE
:
649 value
= lp_build_max(&bld
->base
, value
, bld
->base
.zero
);
650 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
653 case TGSI_SAT_MINUS_PLUS_ONE
:
654 value
= lp_build_max(&bld
->base
, value
, lp_build_const_vec(bld
->base
.type
, -1.0));
655 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
662 if (reg
->Register
.Indirect
) {
663 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->base
.type
);
664 unsigned swizzle
= tgsi_util_get_src_register_swizzle( ®
->Indirect
, chan_index
);
665 addr
= LLVMBuildLoad(bld
->base
.builder
,
666 bld
->addr
[reg
->Indirect
.Index
][swizzle
],
668 /* for indexing we want integers */
669 addr
= LLVMBuildFPToSI(bld
->base
.builder
, addr
,
671 addr
= LLVMBuildExtractElement(bld
->base
.builder
,
672 addr
, LLVMConstInt(LLVMInt32Type(), 0, 0),
674 addr
= lp_build_mul(&bld
->base
, addr
, LLVMConstInt(LLVMInt32Type(), 4, 0));
677 switch( reg
->Register
.File
) {
678 case TGSI_FILE_OUTPUT
:
679 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
680 bld
->outputs
[reg
->Register
.Index
][chan_index
]);
683 case TGSI_FILE_TEMPORARY
: {
684 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
686 reg
->Register
.Indirect
,
688 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
, temp_ptr
);
692 case TGSI_FILE_ADDRESS
:
693 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
694 bld
->addr
[reg
->Indirect
.Index
][chan_index
]);
697 case TGSI_FILE_PREDICATE
:
698 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
699 bld
->preds
[index
][chan_index
]);
709 * High-level instruction translators.
713 TEX_MODIFIER_NONE
= 0,
714 TEX_MODIFIER_PROJECTED
,
715 TEX_MODIFIER_LOD_BIAS
,
716 TEX_MODIFIER_EXPLICIT_LOD
,
717 TEX_MODIFIER_EXPLICIT_DERIV
721 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
722 const struct tgsi_full_instruction
*inst
,
723 enum tex_modifier modifier
,
727 LLVMValueRef lod_bias
, explicit_lod
;
728 LLVMValueRef oow
= NULL
;
729 LLVMValueRef coords
[3];
736 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
737 for (i
= 0; i
< 4; i
++) {
738 texel
[i
] = bld
->base
.undef
;
743 switch (inst
->Texture
.Texture
) {
744 case TGSI_TEXTURE_1D
:
747 case TGSI_TEXTURE_2D
:
748 case TGSI_TEXTURE_RECT
:
751 case TGSI_TEXTURE_SHADOW1D
:
752 case TGSI_TEXTURE_SHADOW2D
:
753 case TGSI_TEXTURE_SHADOWRECT
:
754 case TGSI_TEXTURE_3D
:
755 case TGSI_TEXTURE_CUBE
:
763 if (modifier
== TEX_MODIFIER_LOD_BIAS
) {
764 lod_bias
= emit_fetch( bld
, inst
, 0, 3 );
767 else if (modifier
== TEX_MODIFIER_EXPLICIT_LOD
) {
769 explicit_lod
= emit_fetch( bld
, inst
, 0, 3 );
776 if (modifier
== TEX_MODIFIER_PROJECTED
) {
777 oow
= emit_fetch( bld
, inst
, 0, 3 );
778 oow
= lp_build_rcp(&bld
->base
, oow
);
781 for (i
= 0; i
< num_coords
; i
++) {
782 coords
[i
] = emit_fetch( bld
, inst
, 0, i
);
783 if (modifier
== TEX_MODIFIER_PROJECTED
)
784 coords
[i
] = lp_build_mul(&bld
->base
, coords
[i
], oow
);
786 for (i
= num_coords
; i
< 3; i
++) {
787 coords
[i
] = bld
->base
.undef
;
790 if (modifier
== TEX_MODIFIER_EXPLICIT_DERIV
) {
791 for (i
= 0; i
< num_coords
; i
++) {
792 ddx
[i
] = emit_fetch( bld
, inst
, 1, i
);
793 ddy
[i
] = emit_fetch( bld
, inst
, 2, i
);
795 unit
= inst
->Src
[3].Register
.Index
;
797 for (i
= 0; i
< num_coords
; i
++) {
798 ddx
[i
] = lp_build_ddx( &bld
->base
, coords
[i
] );
799 ddy
[i
] = lp_build_ddy( &bld
->base
, coords
[i
] );
801 unit
= inst
->Src
[1].Register
.Index
;
803 for (i
= num_coords
; i
< 3; i
++) {
804 ddx
[i
] = bld
->base
.undef
;
805 ddy
[i
] = bld
->base
.undef
;
808 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
811 unit
, num_coords
, coords
,
813 lod_bias
, explicit_lod
,
819 * Kill fragment if any of the src register values are negative.
823 struct lp_build_tgsi_soa_context
*bld
,
824 const struct tgsi_full_instruction
*inst
)
826 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
827 LLVMValueRef terms
[NUM_CHANNELS
];
831 memset(&terms
, 0, sizeof terms
);
833 FOR_EACH_CHANNEL( chan_index
) {
836 /* Unswizzle channel */
837 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
839 /* Check if the component has not been already tested. */
840 assert(swizzle
< NUM_CHANNELS
);
841 if( !terms
[swizzle
] )
842 /* TODO: change the comparison operator instead of setting the sign */
843 terms
[swizzle
] = emit_fetch(bld
, inst
, 0, chan_index
);
847 FOR_EACH_CHANNEL( chan_index
) {
848 if(terms
[chan_index
]) {
849 LLVMValueRef chan_mask
;
852 * If term < 0 then mask = 0 else mask = ~0.
854 chan_mask
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->base
.zero
);
857 mask
= LLVMBuildAnd(bld
->base
.builder
, mask
, chan_mask
, "");
864 lp_build_mask_update(bld
->mask
, mask
);
869 * Predicated fragment kill.
870 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
871 * The only predication is the execution mask which will apply if
872 * we're inside a loop or conditional.
875 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
876 const struct tgsi_full_instruction
*inst
)
880 /* For those channels which are "alive", disable fragment shader
883 if (bld
->exec_mask
.has_mask
) {
884 mask
= LLVMBuildNot(bld
->base
.builder
, bld
->exec_mask
.exec_mask
, "kilp");
887 mask
= bld
->base
.zero
;
890 lp_build_mask_update(bld
->mask
, mask
);
895 struct lp_build_tgsi_soa_context
*bld
,
896 const struct tgsi_full_declaration
*decl
)
898 LLVMTypeRef vec_type
= lp_build_vec_type(bld
->base
.type
);
900 unsigned first
= decl
->Range
.First
;
901 unsigned last
= decl
->Range
.Last
;
904 for (idx
= first
; idx
<= last
; ++idx
) {
905 switch (decl
->Declaration
.File
) {
906 case TGSI_FILE_TEMPORARY
:
907 assert(idx
< LP_MAX_TGSI_TEMPS
);
908 if (bld
->has_indirect_addressing
) {
909 LLVMValueRef array_size
= LLVMConstInt(LLVMInt32Type(),
911 bld
->temps_array
= lp_build_array_alloca(bld
->base
.builder
,
912 vec_type
, array_size
, "");
914 for (i
= 0; i
< NUM_CHANNELS
; i
++)
915 bld
->temps
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
920 case TGSI_FILE_OUTPUT
:
921 for (i
= 0; i
< NUM_CHANNELS
; i
++)
922 bld
->outputs
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
926 case TGSI_FILE_ADDRESS
:
927 assert(idx
< LP_MAX_TGSI_ADDRS
);
928 for (i
= 0; i
< NUM_CHANNELS
; i
++)
929 bld
->addr
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
933 case TGSI_FILE_PREDICATE
:
934 assert(idx
< LP_MAX_TGSI_PREDS
);
935 for (i
= 0; i
< NUM_CHANNELS
; i
++)
936 bld
->preds
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
941 /* don't need to declare other vars */
949 * Emit LLVM for one TGSI instruction.
950 * \param return TRUE for success, FALSE otherwise
954 struct lp_build_tgsi_soa_context
*bld
,
955 const struct tgsi_full_instruction
*inst
,
956 const struct tgsi_opcode_info
*info
,
960 LLVMValueRef src0
, src1
, src2
;
961 LLVMValueRef tmp0
, tmp1
, tmp2
;
962 LLVMValueRef tmp3
= NULL
;
963 LLVMValueRef tmp4
= NULL
;
964 LLVMValueRef tmp5
= NULL
;
965 LLVMValueRef tmp6
= NULL
;
966 LLVMValueRef tmp7
= NULL
;
968 LLVMValueRef dst0
[NUM_CHANNELS
];
971 * Stores and write masks are handled in a general fashion after the long
972 * instruction opcode switch statement.
974 * Although not stricitly necessary, we avoid generating instructions for
975 * channels which won't be stored, in cases where's that easy. For some
976 * complex instructions, like texture sampling, it is more convenient to
977 * assume a full writemask and then let LLVM optimization passes eliminate
983 assert(info
->num_dst
<= 1);
985 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
986 dst0
[chan_index
] = bld
->base
.undef
;
990 switch (inst
->Instruction
.Opcode
) {
991 case TGSI_OPCODE_ARL
:
992 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
993 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
994 tmp0
= lp_build_floor(&bld
->base
, tmp0
);
995 dst0
[chan_index
] = tmp0
;
999 case TGSI_OPCODE_MOV
:
1000 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1001 dst0
[chan_index
] = emit_fetch( bld
, inst
, 0, chan_index
);
1005 case TGSI_OPCODE_LIT
:
1006 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ) {
1007 dst0
[CHAN_X
] = bld
->base
.one
;
1009 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1010 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1011 dst0
[CHAN_Y
] = lp_build_max( &bld
->base
, src0
, bld
->base
.zero
);
1013 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1014 /* XMM[1] = SrcReg[0].yyyy */
1015 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1016 /* XMM[1] = max(XMM[1], 0) */
1017 tmp1
= lp_build_max( &bld
->base
, tmp1
, bld
->base
.zero
);
1018 /* XMM[2] = SrcReg[0].wwww */
1019 tmp2
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1020 tmp1
= lp_build_pow( &bld
->base
, tmp1
, tmp2
);
1021 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1022 tmp2
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GREATER
, tmp0
, bld
->base
.zero
);
1023 dst0
[CHAN_Z
] = lp_build_select(&bld
->base
, tmp2
, tmp1
, bld
->base
.zero
);
1025 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) ) {
1026 dst0
[CHAN_W
] = bld
->base
.one
;
1030 case TGSI_OPCODE_RCP
:
1031 /* TGSI_OPCODE_RECIP */
1032 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1033 res
= lp_build_rcp(&bld
->base
, src0
);
1034 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1035 dst0
[chan_index
] = res
;
1039 case TGSI_OPCODE_RSQ
:
1040 /* TGSI_OPCODE_RECIPSQRT */
1041 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1042 src0
= lp_build_abs(&bld
->base
, src0
);
1043 res
= lp_build_rsqrt(&bld
->base
, src0
);
1044 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1045 dst0
[chan_index
] = res
;
1049 case TGSI_OPCODE_EXP
:
1050 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1051 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1052 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1053 LLVMValueRef
*p_exp2_int_part
= NULL
;
1054 LLVMValueRef
*p_frac_part
= NULL
;
1055 LLVMValueRef
*p_exp2
= NULL
;
1057 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1059 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1060 p_exp2_int_part
= &tmp0
;
1061 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1062 p_frac_part
= &tmp1
;
1063 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1066 lp_build_exp2_approx(&bld
->base
, src0
, p_exp2_int_part
, p_frac_part
, p_exp2
);
1068 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1069 dst0
[CHAN_X
] = tmp0
;
1070 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1071 dst0
[CHAN_Y
] = tmp1
;
1072 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1073 dst0
[CHAN_Z
] = tmp2
;
1076 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1077 dst0
[CHAN_W
] = bld
->base
.one
;
1081 case TGSI_OPCODE_LOG
:
1082 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1083 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1084 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1085 LLVMValueRef
*p_floor_log2
= NULL
;
1086 LLVMValueRef
*p_exp
= NULL
;
1087 LLVMValueRef
*p_log2
= NULL
;
1089 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1090 src0
= lp_build_abs( &bld
->base
, src0
);
1092 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1093 p_floor_log2
= &tmp0
;
1094 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1096 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1099 lp_build_log2_approx(&bld
->base
, src0
, p_exp
, p_floor_log2
, p_log2
);
1101 /* dst.x = floor(lg2(abs(src.x))) */
1102 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1103 dst0
[CHAN_X
] = tmp0
;
1104 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1105 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
)) {
1106 dst0
[CHAN_Y
] = lp_build_div( &bld
->base
, src0
, tmp1
);
1108 /* dst.z = lg2(abs(src.x)) */
1109 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1110 dst0
[CHAN_Z
] = tmp2
;
1113 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1114 dst0
[CHAN_W
] = bld
->base
.one
;
1118 case TGSI_OPCODE_MUL
:
1119 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1120 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1121 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1122 dst0
[chan_index
] = lp_build_mul(&bld
->base
, src0
, src1
);
1126 case TGSI_OPCODE_ADD
:
1127 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1128 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1129 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1130 dst0
[chan_index
] = lp_build_add(&bld
->base
, src0
, src1
);
1134 case TGSI_OPCODE_DP3
:
1135 /* TGSI_OPCODE_DOT3 */
1136 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1137 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1138 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1139 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1140 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1141 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1142 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1143 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1144 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1145 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1146 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1147 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1148 dst0
[chan_index
] = tmp0
;
1152 case TGSI_OPCODE_DP4
:
1153 /* TGSI_OPCODE_DOT4 */
1154 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1155 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1156 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1157 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1158 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1159 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1160 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1161 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1162 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1163 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1164 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1165 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1166 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1167 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1168 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1169 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1170 dst0
[chan_index
] = tmp0
;
1174 case TGSI_OPCODE_DST
:
1175 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1176 dst0
[CHAN_X
] = bld
->base
.one
;
1178 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1179 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1180 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1181 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1183 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1184 dst0
[CHAN_Z
] = emit_fetch( bld
, inst
, 0, CHAN_Z
);
1186 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1187 dst0
[CHAN_W
] = emit_fetch( bld
, inst
, 1, CHAN_W
);
1191 case TGSI_OPCODE_MIN
:
1192 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1193 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1194 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1195 dst0
[chan_index
] = lp_build_min( &bld
->base
, src0
, src1
);
1199 case TGSI_OPCODE_MAX
:
1200 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1201 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1202 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1203 dst0
[chan_index
] = lp_build_max( &bld
->base
, src0
, src1
);
1207 case TGSI_OPCODE_SLT
:
1208 /* TGSI_OPCODE_SETLT */
1209 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1210 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1211 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1212 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, src1
);
1213 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1217 case TGSI_OPCODE_SGE
:
1218 /* TGSI_OPCODE_SETGE */
1219 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1220 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1221 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1222 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GEQUAL
, src0
, src1
);
1223 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1227 case TGSI_OPCODE_MAD
:
1228 /* TGSI_OPCODE_MADD */
1229 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1230 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1231 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1232 tmp2
= emit_fetch( bld
, inst
, 2, chan_index
);
1233 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1234 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp2
);
1235 dst0
[chan_index
] = tmp0
;
1239 case TGSI_OPCODE_SUB
:
1240 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1241 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1242 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1243 dst0
[chan_index
] = lp_build_sub( &bld
->base
, tmp0
, tmp1
);
1247 case TGSI_OPCODE_LRP
:
1248 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1249 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1250 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1251 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1252 tmp0
= lp_build_sub( &bld
->base
, src1
, src2
);
1253 tmp0
= lp_build_mul( &bld
->base
, src0
, tmp0
);
1254 dst0
[chan_index
] = lp_build_add( &bld
->base
, tmp0
, src2
);
1258 case TGSI_OPCODE_CND
:
1259 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1260 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1261 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1262 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1263 tmp1
= lp_build_const_vec(bld
->base
.type
, 0.5);
1264 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src2
, tmp1
);
1265 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src0
, src1
);
1269 case TGSI_OPCODE_DP2A
:
1270 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1271 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1272 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1273 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1274 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1275 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1276 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1277 tmp1
= emit_fetch( bld
, inst
, 2, CHAN_X
); /* xmm1 = src[2].x */
1278 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1279 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1280 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1284 case TGSI_OPCODE_FRC
:
1285 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1286 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1287 tmp0
= lp_build_floor(&bld
->base
, src0
);
1288 tmp0
= lp_build_sub(&bld
->base
, src0
, tmp0
);
1289 dst0
[chan_index
] = tmp0
;
1293 case TGSI_OPCODE_CLAMP
:
1294 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1295 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1296 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1297 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1298 tmp0
= lp_build_max(&bld
->base
, tmp0
, src1
);
1299 tmp0
= lp_build_min(&bld
->base
, tmp0
, src2
);
1300 dst0
[chan_index
] = tmp0
;
1304 case TGSI_OPCODE_FLR
:
1305 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1306 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1307 dst0
[chan_index
] = lp_build_floor(&bld
->base
, tmp0
);
1311 case TGSI_OPCODE_ROUND
:
1312 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1313 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1314 dst0
[chan_index
] = lp_build_round(&bld
->base
, tmp0
);
1318 case TGSI_OPCODE_EX2
: {
1319 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1320 tmp0
= lp_build_exp2( &bld
->base
, tmp0
);
1321 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1322 dst0
[chan_index
] = tmp0
;
1327 case TGSI_OPCODE_LG2
:
1328 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1329 tmp0
= lp_build_log2( &bld
->base
, tmp0
);
1330 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1331 dst0
[chan_index
] = tmp0
;
1335 case TGSI_OPCODE_POW
:
1336 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1337 src1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1338 res
= lp_build_pow( &bld
->base
, src0
, src1
);
1339 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1340 dst0
[chan_index
] = res
;
1344 case TGSI_OPCODE_XPD
:
1345 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1346 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1347 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1348 tmp3
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1350 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1351 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1352 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1353 tmp4
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1355 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1357 tmp2
= lp_build_mul( &bld
->base
, tmp2
, tmp1
);
1359 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1360 tmp2
= lp_build_sub( &bld
->base
, tmp2
, tmp5
);
1361 dst0
[CHAN_X
] = tmp2
;
1363 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1364 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1365 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1366 tmp5
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1368 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1369 tmp3
= lp_build_mul( &bld
->base
, tmp3
, tmp2
);
1370 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp5
);
1371 tmp3
= lp_build_sub( &bld
->base
, tmp3
, tmp1
);
1372 dst0
[CHAN_Y
] = tmp3
;
1374 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1375 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1376 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp2
);
1377 tmp5
= lp_build_sub( &bld
->base
, tmp5
, tmp0
);
1378 dst0
[CHAN_Z
] = tmp5
;
1380 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1381 dst0
[CHAN_W
] = bld
->base
.one
;
1385 case TGSI_OPCODE_ABS
:
1386 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1387 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1388 dst0
[chan_index
] = lp_build_abs( &bld
->base
, tmp0
);
1392 case TGSI_OPCODE_RCC
:
1397 case TGSI_OPCODE_DPH
:
1398 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1399 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1400 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1401 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1402 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1403 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1404 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1405 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1406 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1407 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1408 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1409 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1410 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1411 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1412 dst0
[chan_index
] = tmp0
;
1416 case TGSI_OPCODE_COS
:
1417 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1418 tmp0
= lp_build_cos( &bld
->base
, tmp0
);
1419 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1420 dst0
[chan_index
] = tmp0
;
1424 case TGSI_OPCODE_DDX
:
1425 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1426 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, &dst0
[chan_index
], NULL
);
1430 case TGSI_OPCODE_DDY
:
1431 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1432 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, NULL
, &dst0
[chan_index
]);
1436 case TGSI_OPCODE_KILP
:
1437 /* predicated kill */
1438 emit_kilp( bld
, inst
);
1441 case TGSI_OPCODE_KIL
:
1442 /* conditional kill */
1443 emit_kil( bld
, inst
);
1446 case TGSI_OPCODE_PK2H
:
1450 case TGSI_OPCODE_PK2US
:
1454 case TGSI_OPCODE_PK4B
:
1458 case TGSI_OPCODE_PK4UB
:
1462 case TGSI_OPCODE_RFL
:
1466 case TGSI_OPCODE_SEQ
:
1467 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1468 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1469 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1470 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_EQUAL
, src0
, src1
);
1471 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1475 case TGSI_OPCODE_SFL
:
1476 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1477 dst0
[chan_index
] = bld
->base
.zero
;
1481 case TGSI_OPCODE_SGT
:
1482 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1483 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1484 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1485 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src0
, src1
);
1486 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1490 case TGSI_OPCODE_SIN
:
1491 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1492 tmp0
= lp_build_sin( &bld
->base
, tmp0
);
1493 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1494 dst0
[chan_index
] = tmp0
;
1498 case TGSI_OPCODE_SLE
:
1499 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1500 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1501 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1502 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LEQUAL
, src0
, src1
);
1503 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1507 case TGSI_OPCODE_SNE
:
1508 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1509 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1510 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1511 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_NOTEQUAL
, src0
, src1
);
1512 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1516 case TGSI_OPCODE_STR
:
1517 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1518 dst0
[chan_index
] = bld
->base
.one
;
1522 case TGSI_OPCODE_TEX
:
1523 emit_tex( bld
, inst
, TEX_MODIFIER_NONE
, dst0
);
1526 case TGSI_OPCODE_TXD
:
1527 emit_tex( bld
, inst
, TEX_MODIFIER_EXPLICIT_DERIV
, dst0
);
1530 case TGSI_OPCODE_UP2H
:
1536 case TGSI_OPCODE_UP2US
:
1542 case TGSI_OPCODE_UP4B
:
1548 case TGSI_OPCODE_UP4UB
:
1554 case TGSI_OPCODE_X2D
:
1560 case TGSI_OPCODE_ARA
:
1566 case TGSI_OPCODE_ARR
:
1567 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1568 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1569 tmp0
= lp_build_round(&bld
->base
, tmp0
);
1570 dst0
[chan_index
] = tmp0
;
1574 case TGSI_OPCODE_BRA
:
1580 case TGSI_OPCODE_CAL
:
1581 lp_exec_mask_call(&bld
->exec_mask
,
1587 case TGSI_OPCODE_RET
:
1588 lp_exec_mask_ret(&bld
->exec_mask
, pc
);
1591 case TGSI_OPCODE_END
:
1595 case TGSI_OPCODE_SSG
:
1596 /* TGSI_OPCODE_SGN */
1597 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1598 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1599 dst0
[chan_index
] = lp_build_sgn( &bld
->base
, tmp0
);
1603 case TGSI_OPCODE_CMP
:
1604 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1605 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1606 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1607 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1608 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, bld
->base
.zero
);
1609 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src1
, src2
);
1613 case TGSI_OPCODE_SCS
:
1614 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1615 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1616 dst0
[CHAN_X
] = lp_build_cos( &bld
->base
, tmp0
);
1618 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1619 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1620 dst0
[CHAN_Y
] = lp_build_sin( &bld
->base
, tmp0
);
1622 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1623 dst0
[CHAN_Z
] = bld
->base
.zero
;
1625 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1626 dst0
[CHAN_W
] = bld
->base
.one
;
1630 case TGSI_OPCODE_TXB
:
1631 emit_tex( bld
, inst
, TEX_MODIFIER_LOD_BIAS
, dst0
);
1634 case TGSI_OPCODE_NRM
:
1636 case TGSI_OPCODE_NRM4
:
1637 /* 3 or 4-component normalization */
1639 uint dims
= (inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
1641 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) ||
1642 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
) ||
1643 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
) ||
1644 (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 4)) {
1646 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1649 /* xmm0 = src.x * src.x */
1650 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1651 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1654 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp0
);
1657 /* xmm0 = xmm0 + src.y * src.y */
1658 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Y
);
1659 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1662 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1663 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1666 /* xmm0 = xmm0 + src.z * src.z */
1667 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Z
);
1668 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1671 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1672 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1676 /* xmm0 = xmm0 + src.w * src.w */
1677 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_W
);
1678 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
)) {
1681 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1682 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1685 /* xmm1 = 1 / sqrt(xmm0) */
1686 tmp1
= lp_build_rsqrt( &bld
->base
, tmp0
);
1688 /* dst.x = xmm1 * src.x */
1689 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1690 dst0
[CHAN_X
] = lp_build_mul( &bld
->base
, tmp4
, tmp1
);
1693 /* dst.y = xmm1 * src.y */
1694 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1695 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp5
, tmp1
);
1698 /* dst.z = xmm1 * src.z */
1699 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1700 dst0
[CHAN_Z
] = lp_build_mul( &bld
->base
, tmp6
, tmp1
);
1703 /* dst.w = xmm1 * src.w */
1704 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) && dims
== 4) {
1705 dst0
[CHAN_W
] = lp_build_mul( &bld
->base
, tmp7
, tmp1
);
1710 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 3) {
1711 dst0
[CHAN_W
] = bld
->base
.one
;
1716 case TGSI_OPCODE_DIV
:
1722 case TGSI_OPCODE_DP2
:
1723 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1724 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1725 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1726 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1727 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1728 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1729 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1730 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1731 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1735 case TGSI_OPCODE_TXL
:
1736 emit_tex( bld
, inst
, TEX_MODIFIER_EXPLICIT_LOD
, dst0
);
1739 case TGSI_OPCODE_TXP
:
1740 emit_tex( bld
, inst
, TEX_MODIFIER_PROJECTED
, dst0
);
1743 case TGSI_OPCODE_BRK
:
1744 lp_exec_break(&bld
->exec_mask
);
1747 case TGSI_OPCODE_IF
:
1748 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1749 tmp0
= lp_build_cmp(&bld
->base
, PIPE_FUNC_NOTEQUAL
,
1750 tmp0
, bld
->base
.zero
);
1751 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp0
);
1754 case TGSI_OPCODE_BGNLOOP
:
1755 lp_exec_bgnloop(&bld
->exec_mask
);
1758 case TGSI_OPCODE_BGNSUB
:
1759 lp_exec_mask_bgnsub(&bld
->exec_mask
);
1762 case TGSI_OPCODE_ELSE
:
1763 lp_exec_mask_cond_invert(&bld
->exec_mask
);
1766 case TGSI_OPCODE_ENDIF
:
1767 lp_exec_mask_cond_pop(&bld
->exec_mask
);
1770 case TGSI_OPCODE_ENDLOOP
:
1771 lp_exec_endloop(&bld
->exec_mask
);
1774 case TGSI_OPCODE_ENDSUB
:
1775 lp_exec_mask_endsub(&bld
->exec_mask
, pc
);
1778 case TGSI_OPCODE_PUSHA
:
1784 case TGSI_OPCODE_POPA
:
1790 case TGSI_OPCODE_CEIL
:
1791 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1792 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1793 dst0
[chan_index
] = lp_build_ceil(&bld
->base
, tmp0
);
1797 case TGSI_OPCODE_I2F
:
1803 case TGSI_OPCODE_NOT
:
1809 case TGSI_OPCODE_TRUNC
:
1810 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1811 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1812 dst0
[chan_index
] = lp_build_trunc(&bld
->base
, tmp0
);
1816 case TGSI_OPCODE_SHL
:
1822 case TGSI_OPCODE_ISHR
:
1828 case TGSI_OPCODE_AND
:
1834 case TGSI_OPCODE_OR
:
1840 case TGSI_OPCODE_MOD
:
1846 case TGSI_OPCODE_XOR
:
1852 case TGSI_OPCODE_SAD
:
1858 case TGSI_OPCODE_TXF
:
1864 case TGSI_OPCODE_TXQ
:
1870 case TGSI_OPCODE_CONT
:
1871 lp_exec_continue(&bld
->exec_mask
);
1874 case TGSI_OPCODE_EMIT
:
1878 case TGSI_OPCODE_ENDPRIM
:
1882 case TGSI_OPCODE_NOP
:
1890 LLVMValueRef pred
[NUM_CHANNELS
];
1892 emit_fetch_predicate( bld
, inst
, pred
);
1894 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1895 emit_store( bld
, inst
, 0, chan_index
, pred
[chan_index
], dst0
[chan_index
]);
1904 lp_build_tgsi_soa(LLVMBuilderRef builder
,
1905 const struct tgsi_token
*tokens
,
1906 struct lp_type type
,
1907 struct lp_build_mask_context
*mask
,
1908 LLVMValueRef consts_ptr
,
1909 const LLVMValueRef
*pos
,
1910 const LLVMValueRef (*inputs
)[NUM_CHANNELS
],
1911 LLVMValueRef (*outputs
)[NUM_CHANNELS
],
1912 struct lp_build_sampler_soa
*sampler
,
1913 const struct tgsi_shader_info
*info
)
1915 struct lp_build_tgsi_soa_context bld
;
1916 struct tgsi_parse_context parse
;
1917 uint num_immediates
= 0;
1918 uint num_instructions
= 0;
1922 /* Setup build context */
1923 memset(&bld
, 0, sizeof bld
);
1924 lp_build_context_init(&bld
.base
, builder
, type
);
1925 lp_build_context_init(&bld
.int_bld
, builder
, lp_int_type(type
));
1928 bld
.inputs
= inputs
;
1929 bld
.outputs
= outputs
;
1930 bld
.consts_ptr
= consts_ptr
;
1931 bld
.sampler
= sampler
;
1932 bld
.has_indirect_addressing
= info
->opcode_count
[TGSI_OPCODE_ARR
] > 0 ||
1933 info
->opcode_count
[TGSI_OPCODE_ARL
] > 0;
1934 bld
.instructions
= (struct tgsi_full_instruction
*)
1935 MALLOC( LP_MAX_INSTRUCTIONS
* sizeof(struct tgsi_full_instruction
) );
1936 bld
.max_instructions
= LP_MAX_INSTRUCTIONS
;
1938 if (!bld
.instructions
) {
1942 lp_exec_mask_init(&bld
.exec_mask
, &bld
.base
);
1944 tgsi_parse_init( &parse
, tokens
);
1946 while( !tgsi_parse_end_of_tokens( &parse
) ) {
1947 tgsi_parse_token( &parse
);
1949 switch( parse
.FullToken
.Token
.Type
) {
1950 case TGSI_TOKEN_TYPE_DECLARATION
:
1951 /* Inputs already interpolated */
1952 emit_declaration( &bld
, &parse
.FullToken
.FullDeclaration
);
1955 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1957 /* save expanded instruction */
1958 if (num_instructions
== bld
.max_instructions
) {
1959 bld
.instructions
= REALLOC(bld
.instructions
,
1960 bld
.max_instructions
1961 * sizeof(struct tgsi_full_instruction
),
1962 (bld
.max_instructions
+ LP_MAX_INSTRUCTIONS
)
1963 * sizeof(struct tgsi_full_instruction
));
1964 bld
.max_instructions
+= LP_MAX_INSTRUCTIONS
;
1967 memcpy(bld
.instructions
+ num_instructions
,
1968 &parse
.FullToken
.FullInstruction
,
1969 sizeof(bld
.instructions
[0]));
1976 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1977 /* simply copy the immediate values into the next immediates[] slot */
1979 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
1981 assert(num_immediates
< LP_MAX_TGSI_IMMEDIATES
);
1982 for( i
= 0; i
< size
; ++i
)
1983 bld
.immediates
[num_immediates
][i
] =
1984 lp_build_const_vec(type
, parse
.FullToken
.FullImmediate
.u
[i
].Float
);
1985 for( i
= size
; i
< 4; ++i
)
1986 bld
.immediates
[num_immediates
][i
] = bld
.base
.undef
;
1991 case TGSI_TOKEN_TYPE_PROPERTY
:
2000 struct tgsi_full_instruction
*instr
= bld
.instructions
+ pc
;
2001 const struct tgsi_opcode_info
*opcode_info
=
2002 tgsi_get_opcode_info(instr
->Instruction
.Opcode
);
2003 if (!emit_instruction( &bld
, instr
, opcode_info
, &pc
))
2004 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2005 opcode_info
->mnemonic
);
2009 LLVMBasicBlockRef block
= LLVMGetInsertBlock(builder
);
2010 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
2011 debug_printf("11111111111111111111111111111 \n");
2012 tgsi_dump(tokens
, 0);
2013 lp_debug_dump_value(function
);
2014 debug_printf("2222222222222222222222222222 \n");
2016 tgsi_parse_free( &parse
);
2019 LLVMModuleRef module
= LLVMGetGlobalParent(
2020 LLVMGetBasicBlockParent(LLVMGetInsertBlock(bld
.base
.builder
)));
2021 LLVMDumpModule(module
);
2025 FREE( bld
.instructions
);