1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_exec.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_type.h"
51 #include "lp_bld_const.h"
52 #include "lp_bld_arit.h"
53 #include "lp_bld_logic.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_tgsi.h"
57 #include "lp_bld_limits.h"
58 #include "lp_bld_debug.h"
61 #define FOR_EACH_CHANNEL( CHAN )\
62 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
64 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
65 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
67 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
68 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
70 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
71 FOR_EACH_CHANNEL( CHAN )\
72 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
79 #define QUAD_TOP_LEFT 0
80 #define QUAD_TOP_RIGHT 1
81 #define QUAD_BOTTOM_LEFT 2
82 #define QUAD_BOTTOM_RIGHT 3
86 struct lp_build_context
*bld
;
90 LLVMTypeRef int_vec_type
;
92 LLVMValueRef cond_stack
[LP_MAX_TGSI_NESTING
];
94 LLVMValueRef cond_mask
;
96 LLVMBasicBlockRef loop_block
;
97 LLVMValueRef cont_mask
;
98 LLVMValueRef break_mask
;
99 LLVMValueRef break_var
;
101 LLVMBasicBlockRef loop_block
;
102 LLVMValueRef cont_mask
;
103 LLVMValueRef break_mask
;
104 LLVMValueRef break_var
;
105 } loop_stack
[LP_MAX_TGSI_NESTING
];
108 LLVMValueRef exec_mask
;
111 struct lp_build_tgsi_soa_context
113 struct lp_build_context base
;
115 /* Builder for integer masks and indices */
116 struct lp_build_context int_bld
;
118 LLVMValueRef consts_ptr
;
119 const LLVMValueRef
*pos
;
120 const LLVMValueRef (*inputs
)[NUM_CHANNELS
];
121 LLVMValueRef (*outputs
)[NUM_CHANNELS
];
123 const struct lp_build_sampler_soa
*sampler
;
125 LLVMValueRef immediates
[LP_MAX_TGSI_IMMEDIATES
][NUM_CHANNELS
];
126 LLVMValueRef temps
[LP_MAX_TGSI_TEMPS
][NUM_CHANNELS
];
127 LLVMValueRef addr
[LP_MAX_TGSI_ADDRS
][NUM_CHANNELS
];
128 LLVMValueRef preds
[LP_MAX_TGSI_PREDS
][NUM_CHANNELS
];
130 /* we allocate an array of temps if we have indirect
131 * addressing and then the temps above is unused */
132 LLVMValueRef temps_array
;
133 boolean has_indirect_addressing
;
135 struct lp_build_mask_context
*mask
;
136 struct lp_exec_mask exec_mask
;
139 static const unsigned char
141 QUAD_TOP_LEFT
, QUAD_TOP_LEFT
,
142 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_LEFT
145 static const unsigned char
147 QUAD_TOP_RIGHT
, QUAD_TOP_RIGHT
,
148 QUAD_BOTTOM_RIGHT
, QUAD_BOTTOM_RIGHT
151 static const unsigned char
153 QUAD_TOP_LEFT
, QUAD_TOP_RIGHT
,
154 QUAD_TOP_LEFT
, QUAD_TOP_RIGHT
157 static const unsigned char
158 swizzle_bottom
[4] = {
159 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_RIGHT
,
160 QUAD_BOTTOM_LEFT
, QUAD_BOTTOM_RIGHT
163 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
166 mask
->has_mask
= FALSE
;
167 mask
->cond_stack_size
= 0;
168 mask
->loop_stack_size
= 0;
170 mask
->int_vec_type
= lp_build_int_vec_type(mask
->bld
->type
);
171 mask
->break_mask
= mask
->cont_mask
= mask
->cond_mask
=
172 LLVMConstAllOnes(mask
->int_vec_type
);
175 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
177 if (mask
->loop_stack_size
) {
178 /*for loops we need to update the entire mask at runtime */
180 assert(mask
->break_mask
);
181 tmp
= LLVMBuildAnd(mask
->bld
->builder
,
185 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
190 mask
->exec_mask
= mask
->cond_mask
;
193 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
194 mask
->loop_stack_size
> 0);
197 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
200 assert(mask
->cond_stack_size
< LP_MAX_TGSI_NESTING
);
201 if (mask
->cond_stack_size
== 0) {
202 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
204 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
205 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
206 mask
->cond_mask
= val
;
208 lp_exec_mask_update(mask
);
211 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
213 LLVMValueRef prev_mask
;
214 LLVMValueRef inv_mask
;
216 assert(mask
->cond_stack_size
);
217 prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
218 if (mask
->cond_stack_size
== 1) {
219 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
222 inv_mask
= LLVMBuildNot(mask
->bld
->builder
, mask
->cond_mask
, "");
224 mask
->cond_mask
= LLVMBuildAnd(mask
->bld
->builder
,
227 lp_exec_mask_update(mask
);
230 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
232 assert(mask
->cond_stack_size
);
233 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
234 lp_exec_mask_update(mask
);
237 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
239 if (mask
->loop_stack_size
== 0) {
240 assert(mask
->loop_block
== NULL
);
241 assert(mask
->cont_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
242 assert(mask
->break_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
243 assert(mask
->break_var
== NULL
);
246 assert(mask
->loop_stack_size
< LP_MAX_TGSI_NESTING
);
248 mask
->loop_stack
[mask
->loop_stack_size
].loop_block
= mask
->loop_block
;
249 mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
250 mask
->loop_stack
[mask
->loop_stack_size
].break_mask
= mask
->break_mask
;
251 mask
->loop_stack
[mask
->loop_stack_size
].break_var
= mask
->break_var
;
252 ++mask
->loop_stack_size
;
254 mask
->break_var
= lp_build_alloca(mask
->bld
->builder
, mask
->int_vec_type
, "");
255 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
257 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->builder
, "bgnloop");
258 LLVMBuildBr(mask
->bld
->builder
, mask
->loop_block
);
259 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, mask
->loop_block
);
261 mask
->break_mask
= LLVMBuildLoad(mask
->bld
->builder
, mask
->break_var
, "");
263 lp_exec_mask_update(mask
);
266 static void lp_exec_break(struct lp_exec_mask
*mask
)
268 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
272 mask
->break_mask
= LLVMBuildAnd(mask
->bld
->builder
,
274 exec_mask
, "break_full");
276 lp_exec_mask_update(mask
);
279 static void lp_exec_continue(struct lp_exec_mask
*mask
)
281 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
285 mask
->cont_mask
= LLVMBuildAnd(mask
->bld
->builder
,
289 lp_exec_mask_update(mask
);
293 static void lp_exec_endloop(struct lp_exec_mask
*mask
)
295 LLVMBasicBlockRef endloop
;
296 LLVMTypeRef reg_type
= LLVMIntType(mask
->bld
->type
.width
*
297 mask
->bld
->type
.length
);
300 assert(mask
->break_mask
);
303 * Restore the cont_mask, but don't pop
305 assert(mask
->loop_stack_size
);
306 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
- 1].cont_mask
;
307 lp_exec_mask_update(mask
);
310 * Unlike the continue mask, the break_mask must be preserved across loop
313 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
315 /* i1cond = (mask == 0) */
316 i1cond
= LLVMBuildICmp(
319 LLVMBuildBitCast(mask
->bld
->builder
, mask
->exec_mask
, reg_type
, ""),
320 LLVMConstNull(reg_type
), "");
322 endloop
= lp_build_insert_new_block(mask
->bld
->builder
, "endloop");
324 LLVMBuildCondBr(mask
->bld
->builder
,
325 i1cond
, mask
->loop_block
, endloop
);
327 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, endloop
);
329 assert(mask
->loop_stack_size
);
330 --mask
->loop_stack_size
;
331 mask
->loop_block
= mask
->loop_stack
[mask
->loop_stack_size
].loop_block
;
332 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
;
333 mask
->break_mask
= mask
->loop_stack
[mask
->loop_stack_size
].break_mask
;
334 mask
->break_var
= mask
->loop_stack
[mask
->loop_stack_size
].break_var
;
336 lp_exec_mask_update(mask
);
339 /* stores val into an address pointed to by dst.
340 * mask->exec_mask is used to figure out which bits of val
341 * should be stored into the address
342 * (0 means don't store this bit, 1 means do store).
344 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
349 /* Mix the predicate and execution mask */
350 if (mask
->has_mask
) {
352 pred
= LLVMBuildAnd(mask
->bld
->builder
, pred
, mask
->exec_mask
, "");
354 pred
= mask
->exec_mask
;
359 LLVMValueRef real_val
, dst_val
;
361 dst_val
= LLVMBuildLoad(mask
->bld
->builder
, dst
, "");
362 real_val
= lp_build_select(mask
->bld
,
366 LLVMBuildStore(mask
->bld
->builder
, real_val
, dst
);
368 LLVMBuildStore(mask
->bld
->builder
, val
, dst
);
373 emit_ddx(struct lp_build_tgsi_soa_context
*bld
,
376 LLVMValueRef src_left
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_left
);
377 LLVMValueRef src_right
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_right
);
378 return lp_build_sub(&bld
->base
, src_right
, src_left
);
383 emit_ddy(struct lp_build_tgsi_soa_context
*bld
,
386 LLVMValueRef src_top
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_top
);
387 LLVMValueRef src_bottom
= lp_build_swizzle1_aos(&bld
->base
, src
, swizzle_bottom
);
388 return lp_build_sub(&bld
->base
, src_top
, src_bottom
);
392 get_temp_ptr(struct lp_build_tgsi_soa_context
*bld
,
399 if (!bld
->has_indirect_addressing
) {
400 return bld
->temps
[index
][chan
];
402 LLVMValueRef lindex
=
403 LLVMConstInt(LLVMInt32Type(), index
* 4 + chan
, 0);
405 lindex
= lp_build_add(&bld
->base
, lindex
, addr
);
406 return LLVMBuildGEP(bld
->base
.builder
, bld
->temps_array
, &lindex
, 1, "");
415 struct lp_build_tgsi_soa_context
*bld
,
416 const struct tgsi_full_instruction
*inst
,
418 const unsigned chan_index
)
420 const struct tgsi_full_src_register
*reg
= &inst
->Src
[index
];
421 const unsigned swizzle
=
422 tgsi_util_get_full_src_register_swizzle(reg
, chan_index
);
424 LLVMValueRef addr
= NULL
;
427 assert(0 && "invalid swizzle in emit_fetch()");
428 return bld
->base
.undef
;
431 if (reg
->Register
.Indirect
) {
432 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->base
.type
);
433 unsigned swizzle
= tgsi_util_get_src_register_swizzle( ®
->Indirect
, chan_index
);
434 addr
= LLVMBuildLoad(bld
->base
.builder
,
435 bld
->addr
[reg
->Indirect
.Index
][swizzle
],
437 /* for indexing we want integers */
438 addr
= LLVMBuildFPToSI(bld
->base
.builder
, addr
,
440 addr
= LLVMBuildExtractElement(bld
->base
.builder
,
441 addr
, LLVMConstInt(LLVMInt32Type(), 0, 0),
443 addr
= lp_build_mul(&bld
->base
, addr
, LLVMConstInt(LLVMInt32Type(), 4, 0));
446 switch (reg
->Register
.File
) {
447 case TGSI_FILE_CONSTANT
:
449 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(),
450 reg
->Register
.Index
*4 + swizzle
, 0);
451 LLVMValueRef scalar
, scalar_ptr
;
453 if (reg
->Register
.Indirect
) {
454 /*lp_build_printf(bld->base.builder,
455 "\taddr = %d\n", addr);*/
456 index
= lp_build_add(&bld
->base
, index
, addr
);
458 scalar_ptr
= LLVMBuildGEP(bld
->base
.builder
, bld
->consts_ptr
,
460 scalar
= LLVMBuildLoad(bld
->base
.builder
, scalar_ptr
, "");
462 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
466 case TGSI_FILE_IMMEDIATE
:
467 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
471 case TGSI_FILE_INPUT
:
472 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
476 case TGSI_FILE_TEMPORARY
:
478 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
480 reg
->Register
.Indirect
,
482 res
= LLVMBuildLoad(bld
->base
.builder
, temp_ptr
, "");
484 return bld
->base
.undef
;
489 assert(0 && "invalid src register in emit_fetch()");
490 return bld
->base
.undef
;
493 switch( tgsi_util_get_full_src_register_sign_mode( reg
, chan_index
) ) {
494 case TGSI_UTIL_SIGN_CLEAR
:
495 res
= lp_build_abs( &bld
->base
, res
);
498 case TGSI_UTIL_SIGN_SET
:
499 /* TODO: Use bitwese OR for floating point */
500 res
= lp_build_abs( &bld
->base
, res
);
501 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
504 case TGSI_UTIL_SIGN_TOGGLE
:
505 res
= LLVMBuildNeg( bld
->base
.builder
, res
, "" );
508 case TGSI_UTIL_SIGN_KEEP
:
517 * Register fetch with derivatives.
521 struct lp_build_tgsi_soa_context
*bld
,
522 const struct tgsi_full_instruction
*inst
,
524 const unsigned chan_index
,
531 src
= emit_fetch(bld
, inst
, index
, chan_index
);
536 /* TODO: use interpolation coeffs for inputs */
539 *ddx
= emit_ddx(bld
, src
);
542 *ddy
= emit_ddy(bld
, src
);
550 emit_fetch_predicate(
551 struct lp_build_tgsi_soa_context
*bld
,
552 const struct tgsi_full_instruction
*inst
,
556 unsigned char swizzles
[4];
557 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
561 if (!inst
->Instruction
.Predicate
) {
562 FOR_EACH_CHANNEL( chan
) {
568 swizzles
[0] = inst
->Predicate
.SwizzleX
;
569 swizzles
[1] = inst
->Predicate
.SwizzleY
;
570 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
571 swizzles
[3] = inst
->Predicate
.SwizzleW
;
573 index
= inst
->Predicate
.Index
;
574 assert(index
< LP_MAX_TGSI_PREDS
);
576 FOR_EACH_CHANNEL( chan
) {
577 unsigned swizzle
= swizzles
[chan
];
580 * Only fetch the predicate register channels that are actually listed
583 if (!unswizzled
[swizzle
]) {
584 value
= LLVMBuildLoad(bld
->base
.builder
,
585 bld
->preds
[index
][swizzle
], "");
588 * Convert the value to an integer mask.
590 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
591 * is needlessly causing two comparisons due to storing the intermediate
592 * result as float vector instead of an integer mask vector.
594 value
= lp_build_compare(bld
->base
.builder
,
599 if (inst
->Predicate
.Negate
) {
600 value
= LLVMBuildNot(bld
->base
.builder
, value
, "");
603 unswizzled
[swizzle
] = value
;
605 value
= unswizzled
[swizzle
];
618 struct lp_build_tgsi_soa_context
*bld
,
619 const struct tgsi_full_instruction
*inst
,
625 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
626 LLVMValueRef addr
= NULL
;
628 switch( inst
->Instruction
.Saturate
) {
632 case TGSI_SAT_ZERO_ONE
:
633 value
= lp_build_max(&bld
->base
, value
, bld
->base
.zero
);
634 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
637 case TGSI_SAT_MINUS_PLUS_ONE
:
638 value
= lp_build_max(&bld
->base
, value
, lp_build_const_vec(bld
->base
.type
, -1.0));
639 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
646 if (reg
->Register
.Indirect
) {
647 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->base
.type
);
648 unsigned swizzle
= tgsi_util_get_src_register_swizzle( ®
->Indirect
, chan_index
);
649 addr
= LLVMBuildLoad(bld
->base
.builder
,
650 bld
->addr
[reg
->Indirect
.Index
][swizzle
],
652 /* for indexing we want integers */
653 addr
= LLVMBuildFPToSI(bld
->base
.builder
, addr
,
655 addr
= LLVMBuildExtractElement(bld
->base
.builder
,
656 addr
, LLVMConstInt(LLVMInt32Type(), 0, 0),
658 addr
= lp_build_mul(&bld
->base
, addr
, LLVMConstInt(LLVMInt32Type(), 4, 0));
661 switch( reg
->Register
.File
) {
662 case TGSI_FILE_OUTPUT
:
663 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
664 bld
->outputs
[reg
->Register
.Index
][chan_index
]);
667 case TGSI_FILE_TEMPORARY
: {
668 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
670 reg
->Register
.Indirect
,
672 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
, temp_ptr
);
676 case TGSI_FILE_ADDRESS
:
677 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
678 bld
->addr
[reg
->Indirect
.Index
][chan_index
]);
681 case TGSI_FILE_PREDICATE
:
682 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
683 bld
->preds
[index
][chan_index
]);
693 * High-level instruction translators.
697 TEX_MODIFIER_NONE
= 0,
698 TEX_MODIFIER_PROJECTED
,
699 TEX_MODIFIER_LOD_BIAS
,
700 TEX_MODIFIER_EXPLICIT_LOD
,
701 TEX_MODIFIER_EXPLICIT_DERIV
705 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
706 const struct tgsi_full_instruction
*inst
,
707 enum tex_modifier modifier
,
711 LLVMValueRef lod_bias
, explicit_lod
;
712 LLVMValueRef oow
= NULL
;
713 LLVMValueRef coords
[3];
720 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
721 for (i
= 0; i
< 4; i
++) {
722 texel
[i
] = bld
->base
.undef
;
727 switch (inst
->Texture
.Texture
) {
728 case TGSI_TEXTURE_1D
:
731 case TGSI_TEXTURE_2D
:
732 case TGSI_TEXTURE_RECT
:
735 case TGSI_TEXTURE_SHADOW1D
:
736 case TGSI_TEXTURE_SHADOW2D
:
737 case TGSI_TEXTURE_SHADOWRECT
:
738 case TGSI_TEXTURE_3D
:
739 case TGSI_TEXTURE_CUBE
:
747 if (modifier
== TEX_MODIFIER_LOD_BIAS
) {
748 lod_bias
= emit_fetch( bld
, inst
, 0, 3 );
751 else if (modifier
== TEX_MODIFIER_EXPLICIT_LOD
) {
753 explicit_lod
= emit_fetch( bld
, inst
, 0, 3 );
760 if (modifier
== TEX_MODIFIER_PROJECTED
) {
761 oow
= emit_fetch( bld
, inst
, 0, 3 );
762 oow
= lp_build_rcp(&bld
->base
, oow
);
765 for (i
= 0; i
< num_coords
; i
++) {
766 coords
[i
] = emit_fetch( bld
, inst
, 0, i
);
767 if (modifier
== TEX_MODIFIER_PROJECTED
)
768 coords
[i
] = lp_build_mul(&bld
->base
, coords
[i
], oow
);
770 for (i
= num_coords
; i
< 3; i
++) {
771 coords
[i
] = bld
->base
.undef
;
774 if (modifier
== TEX_MODIFIER_EXPLICIT_DERIV
) {
775 for (i
= 0; i
< num_coords
; i
++) {
776 ddx
[i
] = emit_fetch( bld
, inst
, 1, i
);
777 ddy
[i
] = emit_fetch( bld
, inst
, 2, i
);
779 unit
= inst
->Src
[3].Register
.Index
;
781 for (i
= 0; i
< num_coords
; i
++) {
782 ddx
[i
] = emit_ddx( bld
, coords
[i
] );
783 ddy
[i
] = emit_ddy( bld
, coords
[i
] );
785 unit
= inst
->Src
[1].Register
.Index
;
787 for (i
= num_coords
; i
< 3; i
++) {
788 ddx
[i
] = bld
->base
.undef
;
789 ddy
[i
] = bld
->base
.undef
;
792 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
795 unit
, num_coords
, coords
,
797 lod_bias
, explicit_lod
,
803 * Kill fragment if any of the src register values are negative.
807 struct lp_build_tgsi_soa_context
*bld
,
808 const struct tgsi_full_instruction
*inst
)
810 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
811 LLVMValueRef terms
[NUM_CHANNELS
];
815 memset(&terms
, 0, sizeof terms
);
817 FOR_EACH_CHANNEL( chan_index
) {
820 /* Unswizzle channel */
821 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
823 /* Check if the component has not been already tested. */
824 assert(swizzle
< NUM_CHANNELS
);
825 if( !terms
[swizzle
] )
826 /* TODO: change the comparison operator instead of setting the sign */
827 terms
[swizzle
] = emit_fetch(bld
, inst
, 0, chan_index
);
831 FOR_EACH_CHANNEL( chan_index
) {
832 if(terms
[chan_index
]) {
833 LLVMValueRef chan_mask
;
836 * If term < 0 then mask = 0 else mask = ~0.
838 chan_mask
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->base
.zero
);
841 mask
= LLVMBuildAnd(bld
->base
.builder
, mask
, chan_mask
, "");
848 lp_build_mask_update(bld
->mask
, mask
);
853 * Predicated fragment kill.
854 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
855 * The only predication is the execution mask which will apply if
856 * we're inside a loop or conditional.
859 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
860 const struct tgsi_full_instruction
*inst
)
864 /* For those channels which are "alive", disable fragment shader
867 if (bld
->exec_mask
.has_mask
) {
868 mask
= LLVMBuildNot(bld
->base
.builder
, bld
->exec_mask
.exec_mask
, "kilp");
871 mask
= bld
->base
.zero
;
874 lp_build_mask_update(bld
->mask
, mask
);
879 struct lp_build_tgsi_soa_context
*bld
,
880 const struct tgsi_full_declaration
*decl
)
882 LLVMTypeRef vec_type
= lp_build_vec_type(bld
->base
.type
);
884 unsigned first
= decl
->Range
.First
;
885 unsigned last
= decl
->Range
.Last
;
888 for (idx
= first
; idx
<= last
; ++idx
) {
889 switch (decl
->Declaration
.File
) {
890 case TGSI_FILE_TEMPORARY
:
891 assert(idx
< LP_MAX_TGSI_TEMPS
);
892 if (bld
->has_indirect_addressing
) {
893 LLVMValueRef val
= LLVMConstInt(LLVMInt32Type(),
895 bld
->temps_array
= lp_build_array_alloca(bld
->base
.builder
,
898 for (i
= 0; i
< NUM_CHANNELS
; i
++)
899 bld
->temps
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
904 case TGSI_FILE_OUTPUT
:
905 for (i
= 0; i
< NUM_CHANNELS
; i
++)
906 bld
->outputs
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
910 case TGSI_FILE_ADDRESS
:
911 assert(idx
< LP_MAX_TGSI_ADDRS
);
912 for (i
= 0; i
< NUM_CHANNELS
; i
++)
913 bld
->addr
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
917 case TGSI_FILE_PREDICATE
:
918 assert(idx
< LP_MAX_TGSI_PREDS
);
919 for (i
= 0; i
< NUM_CHANNELS
; i
++)
920 bld
->preds
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
925 /* don't need to declare other vars */
933 * Emit LLVM for one TGSI instruction.
934 * \param return TRUE for success, FALSE otherwise
938 struct lp_build_tgsi_soa_context
*bld
,
939 const struct tgsi_full_instruction
*inst
,
940 const struct tgsi_opcode_info
*info
)
943 LLVMValueRef src0
, src1
, src2
;
944 LLVMValueRef tmp0
, tmp1
, tmp2
;
945 LLVMValueRef tmp3
= NULL
;
946 LLVMValueRef tmp4
= NULL
;
947 LLVMValueRef tmp5
= NULL
;
948 LLVMValueRef tmp6
= NULL
;
949 LLVMValueRef tmp7
= NULL
;
951 LLVMValueRef dst0
[NUM_CHANNELS
];
954 * Stores and write masks are handled in a general fashion after the long
955 * instruction opcode switch statement.
957 * Although not stricitly necessary, we avoid generating instructions for
958 * channels which won't be stored, in cases where's that easy. For some
959 * complex instructions, like texture sampling, it is more convenient to
960 * assume a full writemask and then let LLVM optimization passes eliminate
964 assert(info
->num_dst
<= 1);
966 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
967 dst0
[chan_index
] = bld
->base
.undef
;
971 switch (inst
->Instruction
.Opcode
) {
972 case TGSI_OPCODE_ARL
:
973 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
974 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
975 tmp0
= lp_build_floor(&bld
->base
, tmp0
);
976 dst0
[chan_index
] = tmp0
;
980 case TGSI_OPCODE_MOV
:
981 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
982 dst0
[chan_index
] = emit_fetch( bld
, inst
, 0, chan_index
);
986 case TGSI_OPCODE_LIT
:
987 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ) {
988 dst0
[CHAN_X
] = bld
->base
.one
;
990 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
991 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
992 dst0
[CHAN_Y
] = lp_build_max( &bld
->base
, src0
, bld
->base
.zero
);
994 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
995 /* XMM[1] = SrcReg[0].yyyy */
996 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
997 /* XMM[1] = max(XMM[1], 0) */
998 tmp1
= lp_build_max( &bld
->base
, tmp1
, bld
->base
.zero
);
999 /* XMM[2] = SrcReg[0].wwww */
1000 tmp2
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1001 tmp1
= lp_build_pow( &bld
->base
, tmp1
, tmp2
);
1002 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1003 tmp2
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GREATER
, tmp0
, bld
->base
.zero
);
1004 dst0
[CHAN_Z
] = lp_build_select(&bld
->base
, tmp2
, tmp1
, bld
->base
.zero
);
1006 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) ) {
1007 dst0
[CHAN_W
] = bld
->base
.one
;
1011 case TGSI_OPCODE_RCP
:
1012 /* TGSI_OPCODE_RECIP */
1013 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1014 res
= lp_build_rcp(&bld
->base
, src0
);
1015 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1016 dst0
[chan_index
] = res
;
1020 case TGSI_OPCODE_RSQ
:
1021 /* TGSI_OPCODE_RECIPSQRT */
1022 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1023 src0
= lp_build_abs(&bld
->base
, src0
);
1024 res
= lp_build_rsqrt(&bld
->base
, src0
);
1025 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1026 dst0
[chan_index
] = res
;
1030 case TGSI_OPCODE_EXP
:
1031 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1032 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1033 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1034 LLVMValueRef
*p_exp2_int_part
= NULL
;
1035 LLVMValueRef
*p_frac_part
= NULL
;
1036 LLVMValueRef
*p_exp2
= NULL
;
1038 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1040 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1041 p_exp2_int_part
= &tmp0
;
1042 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1043 p_frac_part
= &tmp1
;
1044 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1047 lp_build_exp2_approx(&bld
->base
, src0
, p_exp2_int_part
, p_frac_part
, p_exp2
);
1049 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1050 dst0
[CHAN_X
] = tmp0
;
1051 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1052 dst0
[CHAN_Y
] = tmp1
;
1053 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1054 dst0
[CHAN_Z
] = tmp2
;
1057 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1058 dst0
[CHAN_W
] = bld
->base
.one
;
1062 case TGSI_OPCODE_LOG
:
1063 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1064 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1065 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1066 LLVMValueRef
*p_floor_log2
= NULL
;
1067 LLVMValueRef
*p_exp
= NULL
;
1068 LLVMValueRef
*p_log2
= NULL
;
1070 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1071 src0
= lp_build_abs( &bld
->base
, src0
);
1073 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1074 p_floor_log2
= &tmp0
;
1075 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1077 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1080 lp_build_log2_approx(&bld
->base
, src0
, p_exp
, p_floor_log2
, p_log2
);
1082 /* dst.x = floor(lg2(abs(src.x))) */
1083 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1084 dst0
[CHAN_X
] = tmp0
;
1085 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1086 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
)) {
1087 dst0
[CHAN_Y
] = lp_build_div( &bld
->base
, src0
, tmp1
);
1089 /* dst.z = lg2(abs(src.x)) */
1090 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1091 dst0
[CHAN_Z
] = tmp2
;
1094 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1095 dst0
[CHAN_W
] = bld
->base
.one
;
1099 case TGSI_OPCODE_MUL
:
1100 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1101 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1102 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1103 dst0
[chan_index
] = lp_build_mul(&bld
->base
, src0
, src1
);
1107 case TGSI_OPCODE_ADD
:
1108 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1109 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1110 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1111 dst0
[chan_index
] = lp_build_add(&bld
->base
, src0
, src1
);
1115 case TGSI_OPCODE_DP3
:
1116 /* TGSI_OPCODE_DOT3 */
1117 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1118 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1119 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1120 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1121 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1122 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1123 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1124 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1125 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1126 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1127 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1128 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1129 dst0
[chan_index
] = tmp0
;
1133 case TGSI_OPCODE_DP4
:
1134 /* TGSI_OPCODE_DOT4 */
1135 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1136 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1137 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1138 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1139 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1140 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1141 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1142 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1143 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1144 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1145 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1146 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1147 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1148 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1149 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1150 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1151 dst0
[chan_index
] = tmp0
;
1155 case TGSI_OPCODE_DST
:
1156 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1157 dst0
[CHAN_X
] = bld
->base
.one
;
1159 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1160 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1161 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1162 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1164 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1165 dst0
[CHAN_Z
] = emit_fetch( bld
, inst
, 0, CHAN_Z
);
1167 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1168 dst0
[CHAN_W
] = emit_fetch( bld
, inst
, 1, CHAN_W
);
1172 case TGSI_OPCODE_MIN
:
1173 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1174 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1175 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1176 dst0
[chan_index
] = lp_build_min( &bld
->base
, src0
, src1
);
1180 case TGSI_OPCODE_MAX
:
1181 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1182 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1183 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1184 dst0
[chan_index
] = lp_build_max( &bld
->base
, src0
, src1
);
1188 case TGSI_OPCODE_SLT
:
1189 /* TGSI_OPCODE_SETLT */
1190 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1191 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1192 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1193 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, src1
);
1194 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1198 case TGSI_OPCODE_SGE
:
1199 /* TGSI_OPCODE_SETGE */
1200 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1201 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1202 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1203 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GEQUAL
, src0
, src1
);
1204 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1208 case TGSI_OPCODE_MAD
:
1209 /* TGSI_OPCODE_MADD */
1210 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1211 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1212 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1213 tmp2
= emit_fetch( bld
, inst
, 2, chan_index
);
1214 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1215 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp2
);
1216 dst0
[chan_index
] = tmp0
;
1220 case TGSI_OPCODE_SUB
:
1221 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1222 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1223 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1224 dst0
[chan_index
] = lp_build_sub( &bld
->base
, tmp0
, tmp1
);
1228 case TGSI_OPCODE_LRP
:
1229 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1230 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1231 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1232 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1233 tmp0
= lp_build_sub( &bld
->base
, src1
, src2
);
1234 tmp0
= lp_build_mul( &bld
->base
, src0
, tmp0
);
1235 dst0
[chan_index
] = lp_build_add( &bld
->base
, tmp0
, src2
);
1239 case TGSI_OPCODE_CND
:
1240 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1241 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1242 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1243 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1244 tmp1
= lp_build_const_vec(bld
->base
.type
, 0.5);
1245 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src2
, tmp1
);
1246 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src0
, src1
);
1250 case TGSI_OPCODE_DP2A
:
1251 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1252 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1253 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1254 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1255 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1256 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1257 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1258 tmp1
= emit_fetch( bld
, inst
, 2, CHAN_X
); /* xmm1 = src[2].x */
1259 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1260 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1261 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1265 case TGSI_OPCODE_FRC
:
1266 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1267 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1268 tmp0
= lp_build_floor(&bld
->base
, src0
);
1269 tmp0
= lp_build_sub(&bld
->base
, src0
, tmp0
);
1270 dst0
[chan_index
] = tmp0
;
1274 case TGSI_OPCODE_CLAMP
:
1275 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1276 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1277 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1278 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1279 tmp0
= lp_build_max(&bld
->base
, tmp0
, src1
);
1280 tmp0
= lp_build_min(&bld
->base
, tmp0
, src2
);
1281 dst0
[chan_index
] = tmp0
;
1285 case TGSI_OPCODE_FLR
:
1286 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1287 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1288 dst0
[chan_index
] = lp_build_floor(&bld
->base
, tmp0
);
1292 case TGSI_OPCODE_ROUND
:
1293 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1294 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1295 dst0
[chan_index
] = lp_build_round(&bld
->base
, tmp0
);
1299 case TGSI_OPCODE_EX2
: {
1300 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1301 tmp0
= lp_build_exp2( &bld
->base
, tmp0
);
1302 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1303 dst0
[chan_index
] = tmp0
;
1308 case TGSI_OPCODE_LG2
:
1309 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1310 tmp0
= lp_build_log2( &bld
->base
, tmp0
);
1311 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1312 dst0
[chan_index
] = tmp0
;
1316 case TGSI_OPCODE_POW
:
1317 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1318 src1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1319 res
= lp_build_pow( &bld
->base
, src0
, src1
);
1320 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1321 dst0
[chan_index
] = res
;
1325 case TGSI_OPCODE_XPD
:
1326 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1327 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1328 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1329 tmp3
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1331 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1332 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1333 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1334 tmp4
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1336 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1338 tmp2
= lp_build_mul( &bld
->base
, tmp2
, tmp1
);
1340 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1341 tmp2
= lp_build_sub( &bld
->base
, tmp2
, tmp5
);
1342 dst0
[CHAN_X
] = tmp2
;
1344 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1345 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1346 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1347 tmp5
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1349 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1350 tmp3
= lp_build_mul( &bld
->base
, tmp3
, tmp2
);
1351 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp5
);
1352 tmp3
= lp_build_sub( &bld
->base
, tmp3
, tmp1
);
1353 dst0
[CHAN_Y
] = tmp3
;
1355 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1356 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1357 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp2
);
1358 tmp5
= lp_build_sub( &bld
->base
, tmp5
, tmp0
);
1359 dst0
[CHAN_Z
] = tmp5
;
1361 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1362 dst0
[CHAN_W
] = bld
->base
.one
;
1366 case TGSI_OPCODE_ABS
:
1367 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1368 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1369 dst0
[chan_index
] = lp_build_abs( &bld
->base
, tmp0
);
1373 case TGSI_OPCODE_RCC
:
1378 case TGSI_OPCODE_DPH
:
1379 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1380 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1381 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1382 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1383 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1384 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1385 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1386 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1387 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1388 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1389 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1390 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1391 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1392 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1393 dst0
[chan_index
] = tmp0
;
1397 case TGSI_OPCODE_COS
:
1398 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1399 tmp0
= lp_build_cos( &bld
->base
, tmp0
);
1400 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1401 dst0
[chan_index
] = tmp0
;
1405 case TGSI_OPCODE_DDX
:
1406 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1407 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, &dst0
[chan_index
], NULL
);
1411 case TGSI_OPCODE_DDY
:
1412 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1413 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, NULL
, &dst0
[chan_index
]);
1417 case TGSI_OPCODE_KILP
:
1418 /* predicated kill */
1419 emit_kilp( bld
, inst
);
1422 case TGSI_OPCODE_KIL
:
1423 /* conditional kill */
1424 emit_kil( bld
, inst
);
1427 case TGSI_OPCODE_PK2H
:
1431 case TGSI_OPCODE_PK2US
:
1435 case TGSI_OPCODE_PK4B
:
1439 case TGSI_OPCODE_PK4UB
:
1443 case TGSI_OPCODE_RFL
:
1447 case TGSI_OPCODE_SEQ
:
1448 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1449 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1450 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1451 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_EQUAL
, src0
, src1
);
1452 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1456 case TGSI_OPCODE_SFL
:
1457 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1458 dst0
[chan_index
] = bld
->base
.zero
;
1462 case TGSI_OPCODE_SGT
:
1463 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1464 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1465 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1466 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src0
, src1
);
1467 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1471 case TGSI_OPCODE_SIN
:
1472 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1473 tmp0
= lp_build_sin( &bld
->base
, tmp0
);
1474 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1475 dst0
[chan_index
] = tmp0
;
1479 case TGSI_OPCODE_SLE
:
1480 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1481 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1482 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1483 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LEQUAL
, src0
, src1
);
1484 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1488 case TGSI_OPCODE_SNE
:
1489 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1490 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1491 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1492 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_NOTEQUAL
, src0
, src1
);
1493 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1497 case TGSI_OPCODE_STR
:
1498 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1499 dst0
[chan_index
] = bld
->base
.one
;
1503 case TGSI_OPCODE_TEX
:
1504 emit_tex( bld
, inst
, TEX_MODIFIER_NONE
, dst0
);
1507 case TGSI_OPCODE_TXD
:
1508 emit_tex( bld
, inst
, TEX_MODIFIER_EXPLICIT_DERIV
, dst0
);
1511 case TGSI_OPCODE_UP2H
:
1517 case TGSI_OPCODE_UP2US
:
1523 case TGSI_OPCODE_UP4B
:
1529 case TGSI_OPCODE_UP4UB
:
1535 case TGSI_OPCODE_X2D
:
1541 case TGSI_OPCODE_ARA
:
1547 case TGSI_OPCODE_ARR
:
1548 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1549 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1550 tmp0
= lp_build_round(&bld
->base
, tmp0
);
1551 dst0
[chan_index
] = tmp0
;
1555 case TGSI_OPCODE_BRA
:
1561 case TGSI_OPCODE_CAL
:
1566 case TGSI_OPCODE_RET
:
1571 case TGSI_OPCODE_END
:
1574 case TGSI_OPCODE_SSG
:
1575 /* TGSI_OPCODE_SGN */
1576 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1577 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1578 dst0
[chan_index
] = lp_build_sgn( &bld
->base
, tmp0
);
1582 case TGSI_OPCODE_CMP
:
1583 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1584 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1585 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1586 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1587 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, bld
->base
.zero
);
1588 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src1
, src2
);
1592 case TGSI_OPCODE_SCS
:
1593 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1594 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1595 dst0
[CHAN_X
] = lp_build_cos( &bld
->base
, tmp0
);
1597 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1598 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1599 dst0
[CHAN_Y
] = lp_build_sin( &bld
->base
, tmp0
);
1601 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1602 dst0
[CHAN_Z
] = bld
->base
.zero
;
1604 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1605 dst0
[CHAN_W
] = bld
->base
.one
;
1609 case TGSI_OPCODE_TXB
:
1610 emit_tex( bld
, inst
, TEX_MODIFIER_LOD_BIAS
, dst0
);
1613 case TGSI_OPCODE_NRM
:
1615 case TGSI_OPCODE_NRM4
:
1616 /* 3 or 4-component normalization */
1618 uint dims
= (inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
1620 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) ||
1621 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
) ||
1622 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
) ||
1623 (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 4)) {
1625 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1628 /* xmm0 = src.x * src.x */
1629 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1630 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1633 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp0
);
1636 /* xmm0 = xmm0 + src.y * src.y */
1637 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Y
);
1638 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1641 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1642 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1645 /* xmm0 = xmm0 + src.z * src.z */
1646 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Z
);
1647 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1650 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1651 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1655 /* xmm0 = xmm0 + src.w * src.w */
1656 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_W
);
1657 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
)) {
1660 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1661 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1664 /* xmm1 = 1 / sqrt(xmm0) */
1665 tmp1
= lp_build_rsqrt( &bld
->base
, tmp0
);
1667 /* dst.x = xmm1 * src.x */
1668 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1669 dst0
[CHAN_X
] = lp_build_mul( &bld
->base
, tmp4
, tmp1
);
1672 /* dst.y = xmm1 * src.y */
1673 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1674 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp5
, tmp1
);
1677 /* dst.z = xmm1 * src.z */
1678 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1679 dst0
[CHAN_Z
] = lp_build_mul( &bld
->base
, tmp6
, tmp1
);
1682 /* dst.w = xmm1 * src.w */
1683 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) && dims
== 4) {
1684 dst0
[CHAN_W
] = lp_build_mul( &bld
->base
, tmp7
, tmp1
);
1689 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 3) {
1690 dst0
[CHAN_W
] = bld
->base
.one
;
1695 case TGSI_OPCODE_DIV
:
1701 case TGSI_OPCODE_DP2
:
1702 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1703 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1704 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1705 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1706 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1707 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1708 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1709 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1710 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1714 case TGSI_OPCODE_TXL
:
1715 emit_tex( bld
, inst
, TEX_MODIFIER_EXPLICIT_LOD
, dst0
);
1718 case TGSI_OPCODE_TXP
:
1719 emit_tex( bld
, inst
, TEX_MODIFIER_PROJECTED
, dst0
);
1722 case TGSI_OPCODE_BRK
:
1723 lp_exec_break(&bld
->exec_mask
);
1726 case TGSI_OPCODE_IF
:
1727 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1728 tmp0
= lp_build_cmp(&bld
->base
, PIPE_FUNC_NOTEQUAL
,
1729 tmp0
, bld
->base
.zero
);
1730 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp0
);
1733 case TGSI_OPCODE_BGNLOOP
:
1734 lp_exec_bgnloop(&bld
->exec_mask
);
1737 case TGSI_OPCODE_ELSE
:
1738 lp_exec_mask_cond_invert(&bld
->exec_mask
);
1741 case TGSI_OPCODE_ENDIF
:
1742 lp_exec_mask_cond_pop(&bld
->exec_mask
);
1745 case TGSI_OPCODE_ENDLOOP
:
1746 lp_exec_endloop(&bld
->exec_mask
);
1749 case TGSI_OPCODE_PUSHA
:
1755 case TGSI_OPCODE_POPA
:
1761 case TGSI_OPCODE_CEIL
:
1762 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1763 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1764 dst0
[chan_index
] = lp_build_ceil(&bld
->base
, tmp0
);
1768 case TGSI_OPCODE_I2F
:
1774 case TGSI_OPCODE_NOT
:
1780 case TGSI_OPCODE_TRUNC
:
1781 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1782 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1783 dst0
[chan_index
] = lp_build_trunc(&bld
->base
, tmp0
);
1787 case TGSI_OPCODE_SHL
:
1793 case TGSI_OPCODE_ISHR
:
1799 case TGSI_OPCODE_AND
:
1805 case TGSI_OPCODE_OR
:
1811 case TGSI_OPCODE_MOD
:
1817 case TGSI_OPCODE_XOR
:
1823 case TGSI_OPCODE_SAD
:
1829 case TGSI_OPCODE_TXF
:
1835 case TGSI_OPCODE_TXQ
:
1841 case TGSI_OPCODE_CONT
:
1842 lp_exec_continue(&bld
->exec_mask
);
1845 case TGSI_OPCODE_EMIT
:
1849 case TGSI_OPCODE_ENDPRIM
:
1853 case TGSI_OPCODE_NOP
:
1861 LLVMValueRef pred
[NUM_CHANNELS
];
1863 emit_fetch_predicate( bld
, inst
, pred
);
1865 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1866 emit_store( bld
, inst
, 0, chan_index
, pred
[chan_index
], dst0
[chan_index
]);
1875 lp_build_tgsi_soa(LLVMBuilderRef builder
,
1876 const struct tgsi_token
*tokens
,
1877 struct lp_type type
,
1878 struct lp_build_mask_context
*mask
,
1879 LLVMValueRef consts_ptr
,
1880 const LLVMValueRef
*pos
,
1881 const LLVMValueRef (*inputs
)[NUM_CHANNELS
],
1882 LLVMValueRef (*outputs
)[NUM_CHANNELS
],
1883 struct lp_build_sampler_soa
*sampler
,
1884 const struct tgsi_shader_info
*info
)
1886 struct lp_build_tgsi_soa_context bld
;
1887 struct tgsi_parse_context parse
;
1888 uint num_immediates
= 0;
1891 /* Setup build context */
1892 memset(&bld
, 0, sizeof bld
);
1893 lp_build_context_init(&bld
.base
, builder
, type
);
1894 lp_build_context_init(&bld
.int_bld
, builder
, lp_int_type(type
));
1897 bld
.inputs
= inputs
;
1898 bld
.outputs
= outputs
;
1899 bld
.consts_ptr
= consts_ptr
;
1900 bld
.sampler
= sampler
;
1901 bld
.has_indirect_addressing
= info
->opcode_count
[TGSI_OPCODE_ARR
] > 0 ||
1902 info
->opcode_count
[TGSI_OPCODE_ARL
] > 0;
1904 lp_exec_mask_init(&bld
.exec_mask
, &bld
.base
);
1906 tgsi_parse_init( &parse
, tokens
);
1908 while( !tgsi_parse_end_of_tokens( &parse
) ) {
1909 tgsi_parse_token( &parse
);
1911 switch( parse
.FullToken
.Token
.Type
) {
1912 case TGSI_TOKEN_TYPE_DECLARATION
:
1913 /* Inputs already interpolated */
1914 emit_declaration( &bld
, &parse
.FullToken
.FullDeclaration
);
1917 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1919 unsigned opcode
= parse
.FullToken
.FullInstruction
.Instruction
.Opcode
;
1920 const struct tgsi_opcode_info
*opcode_info
= tgsi_get_opcode_info(opcode
);
1921 if (!emit_instruction( &bld
, &parse
.FullToken
.FullInstruction
, opcode_info
))
1922 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
1923 opcode_info
->mnemonic
);
1928 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1929 /* simply copy the immediate values into the next immediates[] slot */
1931 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
1933 assert(num_immediates
< LP_MAX_TGSI_IMMEDIATES
);
1934 for( i
= 0; i
< size
; ++i
)
1935 bld
.immediates
[num_immediates
][i
] =
1936 lp_build_const_vec(type
, parse
.FullToken
.FullImmediate
.u
[i
].Float
);
1937 for( i
= size
; i
< 4; ++i
)
1938 bld
.immediates
[num_immediates
][i
] = bld
.base
.undef
;
1943 case TGSI_TOKEN_TYPE_PROPERTY
:
1951 LLVMBasicBlockRef block
= LLVMGetInsertBlock(builder
);
1952 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
1953 debug_printf("11111111111111111111111111111 \n");
1954 tgsi_dump(tokens
, 0);
1955 lp_debug_dump_value(function
);
1956 debug_printf("2222222222222222222222222222 \n");
1958 tgsi_parse_free( &parse
);