1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_scan.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_bitarit.h"
53 #include "lp_bld_gather.h"
54 #include "lp_bld_logic.h"
55 #include "lp_bld_swizzle.h"
56 #include "lp_bld_flow.h"
57 #include "lp_bld_quad.h"
58 #include "lp_bld_tgsi.h"
59 #include "lp_bld_limits.h"
60 #include "lp_bld_debug.h"
61 #include "lp_bld_printf.h"
64 #define FOR_EACH_CHANNEL( CHAN )\
65 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
67 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
68 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
70 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
71 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
73 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
74 FOR_EACH_CHANNEL( CHAN )\
75 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
81 #define NUM_CHANNELS 4
83 #define LP_MAX_INSTRUCTIONS 256
87 struct lp_build_context
*bld
;
91 LLVMTypeRef int_vec_type
;
93 LLVMValueRef cond_stack
[LP_MAX_TGSI_NESTING
];
95 LLVMValueRef cond_mask
;
97 LLVMBasicBlockRef loop_block
;
98 LLVMValueRef cont_mask
;
99 LLVMValueRef break_mask
;
100 LLVMValueRef break_var
;
102 LLVMBasicBlockRef loop_block
;
103 LLVMValueRef cont_mask
;
104 LLVMValueRef break_mask
;
105 LLVMValueRef break_var
;
106 } loop_stack
[LP_MAX_TGSI_NESTING
];
109 LLVMValueRef ret_mask
;
112 LLVMValueRef ret_mask
;
113 } call_stack
[LP_MAX_TGSI_NESTING
];
116 LLVMValueRef exec_mask
;
119 struct lp_build_tgsi_soa_context
121 struct lp_build_context base
;
123 /* Builder for integer masks and indices */
124 struct lp_build_context uint_bld
;
126 LLVMValueRef consts_ptr
;
127 const LLVMValueRef
*pos
;
128 const LLVMValueRef (*inputs
)[NUM_CHANNELS
];
129 LLVMValueRef (*outputs
)[NUM_CHANNELS
];
131 const struct lp_build_sampler_soa
*sampler
;
133 LLVMValueRef immediates
[LP_MAX_TGSI_IMMEDIATES
][NUM_CHANNELS
];
134 LLVMValueRef temps
[LP_MAX_TGSI_TEMPS
][NUM_CHANNELS
];
135 LLVMValueRef addr
[LP_MAX_TGSI_ADDRS
][NUM_CHANNELS
];
136 LLVMValueRef preds
[LP_MAX_TGSI_PREDS
][NUM_CHANNELS
];
138 /* We allocate/use this array of temps if (1 << TGSI_FILE_TEMPORARY) is
139 * set in the indirect_files field.
140 * The temps[] array above is unused then.
142 LLVMValueRef temps_array
;
144 const struct tgsi_shader_info
*info
;
145 /** bitmask indicating which register files are accessed indirectly */
146 unsigned indirect_files
;
148 struct lp_build_mask_context
*mask
;
149 struct lp_exec_mask exec_mask
;
151 struct tgsi_full_instruction
*instructions
;
152 uint max_instructions
;
155 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
158 mask
->has_mask
= FALSE
;
159 mask
->cond_stack_size
= 0;
160 mask
->loop_stack_size
= 0;
161 mask
->call_stack_size
= 0;
163 mask
->int_vec_type
= lp_build_int_vec_type(mask
->bld
->type
);
164 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
= mask
->cond_mask
=
165 LLVMConstAllOnes(mask
->int_vec_type
);
168 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
170 if (mask
->loop_stack_size
) {
171 /*for loops we need to update the entire mask at runtime */
173 assert(mask
->break_mask
);
174 tmp
= LLVMBuildAnd(mask
->bld
->builder
,
178 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
183 mask
->exec_mask
= mask
->cond_mask
;
185 if (mask
->call_stack_size
) {
186 mask
->exec_mask
= LLVMBuildAnd(mask
->bld
->builder
,
192 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
193 mask
->loop_stack_size
> 0 ||
194 mask
->call_stack_size
> 0);
197 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
200 assert(mask
->cond_stack_size
< LP_MAX_TGSI_NESTING
);
201 if (mask
->cond_stack_size
== 0) {
202 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
204 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
205 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
206 mask
->cond_mask
= LLVMBuildAnd(mask
->bld
->builder
,
210 lp_exec_mask_update(mask
);
213 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
215 LLVMValueRef prev_mask
;
216 LLVMValueRef inv_mask
;
218 assert(mask
->cond_stack_size
);
219 prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
220 if (mask
->cond_stack_size
== 1) {
221 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
224 inv_mask
= LLVMBuildNot(mask
->bld
->builder
, mask
->cond_mask
, "");
226 mask
->cond_mask
= LLVMBuildAnd(mask
->bld
->builder
,
229 lp_exec_mask_update(mask
);
232 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
234 assert(mask
->cond_stack_size
);
235 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
236 lp_exec_mask_update(mask
);
239 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
241 if (mask
->loop_stack_size
== 0) {
242 assert(mask
->loop_block
== NULL
);
243 assert(mask
->cont_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
244 assert(mask
->break_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
245 assert(mask
->break_var
== NULL
);
248 assert(mask
->loop_stack_size
< LP_MAX_TGSI_NESTING
);
250 mask
->loop_stack
[mask
->loop_stack_size
].loop_block
= mask
->loop_block
;
251 mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
252 mask
->loop_stack
[mask
->loop_stack_size
].break_mask
= mask
->break_mask
;
253 mask
->loop_stack
[mask
->loop_stack_size
].break_var
= mask
->break_var
;
254 ++mask
->loop_stack_size
;
256 mask
->break_var
= lp_build_alloca(mask
->bld
->builder
, mask
->int_vec_type
, "");
257 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
259 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->builder
, "bgnloop");
260 LLVMBuildBr(mask
->bld
->builder
, mask
->loop_block
);
261 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, mask
->loop_block
);
263 mask
->break_mask
= LLVMBuildLoad(mask
->bld
->builder
, mask
->break_var
, "");
265 lp_exec_mask_update(mask
);
268 static void lp_exec_break(struct lp_exec_mask
*mask
)
270 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
274 mask
->break_mask
= LLVMBuildAnd(mask
->bld
->builder
,
276 exec_mask
, "break_full");
278 lp_exec_mask_update(mask
);
281 static void lp_exec_continue(struct lp_exec_mask
*mask
)
283 LLVMValueRef exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
287 mask
->cont_mask
= LLVMBuildAnd(mask
->bld
->builder
,
291 lp_exec_mask_update(mask
);
295 static void lp_exec_endloop(struct lp_exec_mask
*mask
)
297 LLVMBasicBlockRef endloop
;
298 LLVMTypeRef reg_type
= LLVMIntType(mask
->bld
->type
.width
*
299 mask
->bld
->type
.length
);
302 assert(mask
->break_mask
);
305 * Restore the cont_mask, but don't pop
307 assert(mask
->loop_stack_size
);
308 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
- 1].cont_mask
;
309 lp_exec_mask_update(mask
);
312 * Unlike the continue mask, the break_mask must be preserved across loop
315 LLVMBuildStore(mask
->bld
->builder
, mask
->break_mask
, mask
->break_var
);
317 /* i1cond = (mask == 0) */
318 i1cond
= LLVMBuildICmp(
321 LLVMBuildBitCast(mask
->bld
->builder
, mask
->exec_mask
, reg_type
, ""),
322 LLVMConstNull(reg_type
), "");
324 endloop
= lp_build_insert_new_block(mask
->bld
->builder
, "endloop");
326 LLVMBuildCondBr(mask
->bld
->builder
,
327 i1cond
, mask
->loop_block
, endloop
);
329 LLVMPositionBuilderAtEnd(mask
->bld
->builder
, endloop
);
331 assert(mask
->loop_stack_size
);
332 --mask
->loop_stack_size
;
333 mask
->loop_block
= mask
->loop_stack
[mask
->loop_stack_size
].loop_block
;
334 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
;
335 mask
->break_mask
= mask
->loop_stack
[mask
->loop_stack_size
].break_mask
;
336 mask
->break_var
= mask
->loop_stack
[mask
->loop_stack_size
].break_var
;
338 lp_exec_mask_update(mask
);
341 /* stores val into an address pointed to by dst.
342 * mask->exec_mask is used to figure out which bits of val
343 * should be stored into the address
344 * (0 means don't store this bit, 1 means do store).
346 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
351 /* Mix the predicate and execution mask */
352 if (mask
->has_mask
) {
354 pred
= LLVMBuildAnd(mask
->bld
->builder
, pred
, mask
->exec_mask
, "");
356 pred
= mask
->exec_mask
;
361 LLVMValueRef real_val
, dst_val
;
363 dst_val
= LLVMBuildLoad(mask
->bld
->builder
, dst
, "");
364 real_val
= lp_build_select(mask
->bld
,
368 LLVMBuildStore(mask
->bld
->builder
, real_val
, dst
);
370 LLVMBuildStore(mask
->bld
->builder
, val
, dst
);
373 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
377 assert(mask
->call_stack_size
< LP_MAX_TGSI_NESTING
);
378 mask
->call_stack
[mask
->call_stack_size
].pc
= *pc
;
379 mask
->call_stack
[mask
->call_stack_size
].ret_mask
= mask
->ret_mask
;
380 mask
->call_stack_size
++;
384 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
386 LLVMValueRef exec_mask
;
388 if (mask
->call_stack_size
== 0) {
389 /* returning from main() */
393 exec_mask
= LLVMBuildNot(mask
->bld
->builder
,
397 mask
->ret_mask
= LLVMBuildAnd(mask
->bld
->builder
,
399 exec_mask
, "ret_full");
401 lp_exec_mask_update(mask
);
404 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
408 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
410 assert(mask
->call_stack_size
);
411 mask
->call_stack_size
--;
412 *pc
= mask
->call_stack
[mask
->call_stack_size
].pc
;
413 mask
->ret_mask
= mask
->call_stack
[mask
->call_stack_size
].ret_mask
;
414 lp_exec_mask_update(mask
);
419 * Return pointer to a temporary register channel (src or dest).
420 * Note that indirect addressing cannot be handled here.
421 * \param index which temporary register
422 * \param chan which channel of the temp register.
425 get_temp_ptr(struct lp_build_tgsi_soa_context
*bld
,
430 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
431 LLVMValueRef lindex
= lp_build_const_int32(index
* 4 + chan
);
432 return LLVMBuildGEP(bld
->base
.builder
, bld
->temps_array
, &lindex
, 1, "");
435 return bld
->temps
[index
][chan
];
442 * XXX the lp_build_gather() function should be capable of doing this
443 * with a little work.
446 build_gather(struct lp_build_tgsi_soa_context
*bld
,
447 LLVMValueRef base_ptr
,
448 LLVMValueRef indexes
)
450 LLVMValueRef res
= bld
->base
.undef
;
454 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
456 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
457 LLVMValueRef ii
= LLVMConstInt(LLVMInt32Type(), i
, 0);
458 LLVMValueRef index
= LLVMBuildExtractElement(bld
->base
.builder
,
460 LLVMValueRef scalar_ptr
= LLVMBuildGEP(bld
->base
.builder
, base_ptr
,
461 &index
, 1, "gather_ptr");
462 LLVMValueRef scalar
= LLVMBuildLoad(bld
->base
.builder
, scalar_ptr
, "");
464 res
= LLVMBuildInsertElement(bld
->base
.builder
, res
, scalar
, ii
, "");
472 * Scatter/store vector.
475 build_scatter(struct lp_build_tgsi_soa_context
*bld
,
476 LLVMValueRef base_ptr
,
477 LLVMValueRef indexes
,
480 LLVMBuilderRef builder
= bld
->base
.builder
;
484 * Loop over elements of index_vec, store scalar value.
486 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
487 LLVMValueRef ii
= LLVMConstInt(LLVMInt32Type(), i
, 0);
488 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
489 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
490 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
493 lp_build_printf(builder
, "scatter %d: val %f at %d %p\n",
494 ii
, val
, index
, scalar_ptr
);
496 LLVMBuildStore(builder
, val
, scalar_ptr
);
502 * Read the current value of the ADDR register, convert the floats to
503 * ints, add the base index and return the vector of offsets.
504 * The offsets will be used to index into the constant buffer or
505 * temporary register file.
508 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
509 unsigned reg_file
, unsigned reg_index
,
510 const struct tgsi_src_register
*indirect_reg
)
512 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
513 /* always use X component of address register */
514 unsigned swizzle
= indirect_reg
->SwizzleX
;
517 LLVMValueRef max_index
;
520 assert(bld
->indirect_files
& (1 << reg_file
));
522 base
= lp_build_const_int_vec(uint_bld
->type
, reg_index
);
525 rel
= LLVMBuildLoad(bld
->base
.builder
,
526 bld
->addr
[indirect_reg
->Index
][swizzle
],
529 /* for indexing we want integers */
530 rel
= LLVMBuildFPToSI(bld
->base
.builder
,
532 uint_bld
->vec_type
, "");
534 index
= lp_build_add(uint_bld
, base
, rel
);
536 max_index
= lp_build_const_int_vec(uint_bld
->type
,
537 bld
->info
->file_max
[reg_file
]);
539 assert(!uint_bld
->type
.sign
);
540 index
= lp_build_min(uint_bld
, index
, max_index
);
551 struct lp_build_tgsi_soa_context
*bld
,
552 const struct tgsi_full_instruction
*inst
,
554 const unsigned chan_index
)
556 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
557 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
558 const unsigned swizzle
=
559 tgsi_util_get_full_src_register_swizzle(reg
, chan_index
);
561 LLVMValueRef indirect_index
= NULL
;
564 assert(0 && "invalid swizzle in emit_fetch()");
565 return bld
->base
.undef
;
568 if (reg
->Register
.Indirect
) {
569 indirect_index
= get_indirect_index(bld
,
574 assert(reg
->Register
.Index
<= bld
->info
->file_max
[reg
->Register
.File
]);
577 switch (reg
->Register
.File
) {
578 case TGSI_FILE_CONSTANT
:
579 if (reg
->Register
.Indirect
) {
580 LLVMValueRef swizzle_vec
=
581 lp_build_const_int_vec(uint_bld
->type
, swizzle
);
582 LLVMValueRef index_vec
; /* index into the const buffer */
584 /* index_vec = indirect_index * 4 + swizzle */
585 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
586 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
588 /* Gather values from the constant buffer */
589 res
= build_gather(bld
, bld
->consts_ptr
, index_vec
);
592 LLVMValueRef index
; /* index into the const buffer */
593 LLVMValueRef scalar
, scalar_ptr
;
595 index
= lp_build_const_int32(reg
->Register
.Index
*4 + swizzle
);
597 scalar_ptr
= LLVMBuildGEP(bld
->base
.builder
, bld
->consts_ptr
,
599 scalar
= LLVMBuildLoad(bld
->base
.builder
, scalar_ptr
, "");
601 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
605 case TGSI_FILE_IMMEDIATE
:
606 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
610 case TGSI_FILE_INPUT
:
611 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
615 case TGSI_FILE_TEMPORARY
:
616 if (reg
->Register
.Indirect
) {
617 LLVMValueRef swizzle_vec
=
618 lp_build_const_int_vec(uint_bld
->type
, swizzle
);
619 LLVMValueRef length_vec
=
620 lp_build_const_int_vec(uint_bld
->type
, bld
->base
.type
.length
);
621 LLVMValueRef index_vec
; /* index into the const buffer */
622 LLVMValueRef temps_array
;
623 LLVMTypeRef float4_ptr_type
;
625 /* index_vec = (indirect_index * 4 + swizzle) * length */
626 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
627 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
628 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
630 /* cast temps_array pointer to float* */
631 float4_ptr_type
= LLVMPointerType(LLVMFloatType(), 0);
632 temps_array
= LLVMBuildBitCast(uint_bld
->builder
, bld
->temps_array
,
633 float4_ptr_type
, "");
635 /* Gather values from the temporary register array */
636 res
= build_gather(bld
, temps_array
, index_vec
);
639 LLVMValueRef temp_ptr
;
640 temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
, swizzle
);
641 res
= LLVMBuildLoad(bld
->base
.builder
, temp_ptr
, "");
643 return bld
->base
.undef
;
648 assert(0 && "invalid src register in emit_fetch()");
649 return bld
->base
.undef
;
652 switch( tgsi_util_get_full_src_register_sign_mode( reg
, chan_index
) ) {
653 case TGSI_UTIL_SIGN_CLEAR
:
654 res
= lp_build_abs( &bld
->base
, res
);
657 case TGSI_UTIL_SIGN_SET
:
658 res
= lp_build_abs( &bld
->base
, res
);
660 case TGSI_UTIL_SIGN_TOGGLE
:
661 res
= lp_build_negate( &bld
->base
, res
);
664 case TGSI_UTIL_SIGN_KEEP
:
673 * Register fetch with derivatives.
677 struct lp_build_tgsi_soa_context
*bld
,
678 const struct tgsi_full_instruction
*inst
,
680 const unsigned chan_index
,
687 src
= emit_fetch(bld
, inst
, index
, chan_index
);
692 /* TODO: use interpolation coeffs for inputs */
695 *ddx
= lp_build_ddx(&bld
->base
, src
);
698 *ddy
= lp_build_ddy(&bld
->base
, src
);
706 emit_fetch_predicate(
707 struct lp_build_tgsi_soa_context
*bld
,
708 const struct tgsi_full_instruction
*inst
,
712 unsigned char swizzles
[4];
713 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
717 if (!inst
->Instruction
.Predicate
) {
718 FOR_EACH_CHANNEL( chan
) {
724 swizzles
[0] = inst
->Predicate
.SwizzleX
;
725 swizzles
[1] = inst
->Predicate
.SwizzleY
;
726 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
727 swizzles
[3] = inst
->Predicate
.SwizzleW
;
729 index
= inst
->Predicate
.Index
;
730 assert(index
< LP_MAX_TGSI_PREDS
);
732 FOR_EACH_CHANNEL( chan
) {
733 unsigned swizzle
= swizzles
[chan
];
736 * Only fetch the predicate register channels that are actually listed
739 if (!unswizzled
[swizzle
]) {
740 value
= LLVMBuildLoad(bld
->base
.builder
,
741 bld
->preds
[index
][swizzle
], "");
744 * Convert the value to an integer mask.
746 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
747 * is needlessly causing two comparisons due to storing the intermediate
748 * result as float vector instead of an integer mask vector.
750 value
= lp_build_compare(bld
->base
.builder
,
755 if (inst
->Predicate
.Negate
) {
756 value
= LLVMBuildNot(bld
->base
.builder
, value
, "");
759 unswizzled
[swizzle
] = value
;
761 value
= unswizzled
[swizzle
];
774 struct lp_build_tgsi_soa_context
*bld
,
775 const struct tgsi_full_instruction
*inst
,
781 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
782 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
783 LLVMValueRef indirect_index
= NULL
;
785 switch( inst
->Instruction
.Saturate
) {
789 case TGSI_SAT_ZERO_ONE
:
790 value
= lp_build_max(&bld
->base
, value
, bld
->base
.zero
);
791 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
794 case TGSI_SAT_MINUS_PLUS_ONE
:
795 value
= lp_build_max(&bld
->base
, value
, lp_build_const_vec(bld
->base
.type
, -1.0));
796 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
803 if (reg
->Register
.Indirect
) {
804 indirect_index
= get_indirect_index(bld
,
809 assert(reg
->Register
.Index
<= bld
->info
->file_max
[reg
->Register
.File
]);
812 switch( reg
->Register
.File
) {
813 case TGSI_FILE_OUTPUT
:
814 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
815 bld
->outputs
[reg
->Register
.Index
][chan_index
]);
818 case TGSI_FILE_TEMPORARY
:
819 if (reg
->Register
.Indirect
) {
820 LLVMBuilderRef builder
= bld
->base
.builder
;
821 LLVMValueRef chan_vec
=
822 lp_build_const_int_vec(uint_bld
->type
, chan_index
);
823 LLVMValueRef length_vec
=
824 lp_build_const_int_vec(uint_bld
->type
, bld
->base
.type
.length
);
825 LLVMValueRef index_vec
; /* indexes into the temp registers */
826 LLVMValueRef temps_array
;
827 LLVMValueRef pixel_offsets
;
828 LLVMTypeRef float_ptr_type
;
831 /* build pixel offset vector: {0, 1, 2, 3, ...} */
832 pixel_offsets
= uint_bld
->undef
;
833 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
834 LLVMValueRef ii
= lp_build_const_int32(i
);
835 pixel_offsets
= LLVMBuildInsertElement(builder
, pixel_offsets
,
839 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
840 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
841 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
842 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
843 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
845 float_ptr_type
= LLVMPointerType(LLVMFloatType(), 0);
846 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
,
849 /* Scatter store values into temp registers */
850 build_scatter(bld
, temps_array
, index_vec
, value
);
853 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
855 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
, temp_ptr
);
859 case TGSI_FILE_ADDRESS
:
860 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
861 bld
->addr
[reg
->Indirect
.Index
][chan_index
]);
864 case TGSI_FILE_PREDICATE
:
865 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
866 bld
->preds
[reg
->Register
.Index
][chan_index
]);
876 * High-level instruction translators.
880 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
881 const struct tgsi_full_instruction
*inst
,
882 enum lp_build_tex_modifier modifier
,
886 LLVMValueRef lod_bias
, explicit_lod
;
887 LLVMValueRef oow
= NULL
;
888 LLVMValueRef coords
[3];
895 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
896 for (i
= 0; i
< 4; i
++) {
897 texel
[i
] = bld
->base
.undef
;
902 switch (inst
->Texture
.Texture
) {
903 case TGSI_TEXTURE_1D
:
906 case TGSI_TEXTURE_2D
:
907 case TGSI_TEXTURE_RECT
:
910 case TGSI_TEXTURE_SHADOW1D
:
911 case TGSI_TEXTURE_SHADOW2D
:
912 case TGSI_TEXTURE_SHADOWRECT
:
913 case TGSI_TEXTURE_3D
:
914 case TGSI_TEXTURE_CUBE
:
922 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
923 lod_bias
= emit_fetch( bld
, inst
, 0, 3 );
926 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
928 explicit_lod
= emit_fetch( bld
, inst
, 0, 3 );
935 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
936 oow
= emit_fetch( bld
, inst
, 0, 3 );
937 oow
= lp_build_rcp(&bld
->base
, oow
);
940 for (i
= 0; i
< num_coords
; i
++) {
941 coords
[i
] = emit_fetch( bld
, inst
, 0, i
);
942 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
943 coords
[i
] = lp_build_mul(&bld
->base
, coords
[i
], oow
);
945 for (i
= num_coords
; i
< 3; i
++) {
946 coords
[i
] = bld
->base
.undef
;
949 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
950 LLVMTypeRef i32t
= LLVMInt32Type();
951 LLVMValueRef index0
= LLVMConstInt(i32t
, 0, 0);
952 for (i
= 0; i
< num_coords
; i
++) {
953 LLVMValueRef src1
= emit_fetch( bld
, inst
, 1, i
);
954 LLVMValueRef src2
= emit_fetch( bld
, inst
, 2, i
);
955 ddx
[i
] = LLVMBuildExtractElement(bld
->base
.builder
, src1
, index0
, "");
956 ddy
[i
] = LLVMBuildExtractElement(bld
->base
.builder
, src2
, index0
, "");
958 unit
= inst
->Src
[3].Register
.Index
;
960 for (i
= 0; i
< num_coords
; i
++) {
961 ddx
[i
] = lp_build_scalar_ddx( &bld
->base
, coords
[i
] );
962 ddy
[i
] = lp_build_scalar_ddy( &bld
->base
, coords
[i
] );
964 unit
= inst
->Src
[1].Register
.Index
;
966 for (i
= num_coords
; i
< 3; i
++) {
967 ddx
[i
] = LLVMGetUndef(bld
->base
.elem_type
);
968 ddy
[i
] = LLVMGetUndef(bld
->base
.elem_type
);
971 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
974 unit
, num_coords
, coords
,
976 lod_bias
, explicit_lod
,
981 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
986 for (i
= 0; i
< 5; i
++) {
989 if (pc
+ i
>= bld
->info
->num_instructions
)
992 opcode
= bld
->instructions
[pc
+ i
].Instruction
.Opcode
;
994 if (opcode
== TGSI_OPCODE_END
)
997 if (opcode
== TGSI_OPCODE_TEX
||
998 opcode
== TGSI_OPCODE_TXP
||
999 opcode
== TGSI_OPCODE_TXD
||
1000 opcode
== TGSI_OPCODE_TXB
||
1001 opcode
== TGSI_OPCODE_TXL
||
1002 opcode
== TGSI_OPCODE_TXF
||
1003 opcode
== TGSI_OPCODE_TXQ
||
1004 opcode
== TGSI_OPCODE_CAL
||
1005 opcode
== TGSI_OPCODE_CALLNZ
||
1006 opcode
== TGSI_OPCODE_IF
||
1007 opcode
== TGSI_OPCODE_IFC
||
1008 opcode
== TGSI_OPCODE_BGNLOOP
||
1009 opcode
== TGSI_OPCODE_SWITCH
)
1019 * Kill fragment if any of the src register values are negative.
1023 struct lp_build_tgsi_soa_context
*bld
,
1024 const struct tgsi_full_instruction
*inst
,
1027 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1028 LLVMValueRef terms
[NUM_CHANNELS
];
1030 unsigned chan_index
;
1032 memset(&terms
, 0, sizeof terms
);
1034 FOR_EACH_CHANNEL( chan_index
) {
1037 /* Unswizzle channel */
1038 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
1040 /* Check if the component has not been already tested. */
1041 assert(swizzle
< NUM_CHANNELS
);
1042 if( !terms
[swizzle
] )
1043 /* TODO: change the comparison operator instead of setting the sign */
1044 terms
[swizzle
] = emit_fetch(bld
, inst
, 0, chan_index
);
1048 FOR_EACH_CHANNEL( chan_index
) {
1049 if(terms
[chan_index
]) {
1050 LLVMValueRef chan_mask
;
1053 * If term < 0 then mask = 0 else mask = ~0.
1055 chan_mask
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->base
.zero
);
1058 mask
= LLVMBuildAnd(bld
->base
.builder
, mask
, chan_mask
, "");
1065 lp_build_mask_update(bld
->mask
, mask
);
1067 if (!near_end_of_shader(bld
, pc
))
1068 lp_build_mask_check(bld
->mask
);
1074 * Predicated fragment kill.
1075 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1076 * The only predication is the execution mask which will apply if
1077 * we're inside a loop or conditional.
1080 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
1081 const struct tgsi_full_instruction
*inst
,
1086 /* For those channels which are "alive", disable fragment shader
1089 if (bld
->exec_mask
.has_mask
) {
1090 mask
= LLVMBuildNot(bld
->base
.builder
, bld
->exec_mask
.exec_mask
, "kilp");
1093 LLVMValueRef zero
= LLVMConstNull(bld
->base
.int_vec_type
);
1097 lp_build_mask_update(bld
->mask
, mask
);
1099 if (!near_end_of_shader(bld
, pc
))
1100 lp_build_mask_check(bld
->mask
);
1105 * Emit code which will dump the value of all the temporary registers
1109 emit_dump_temps(struct lp_build_tgsi_soa_context
*bld
)
1111 LLVMBuilderRef builder
= bld
->base
.builder
;
1112 LLVMValueRef temp_ptr
;
1113 LLVMValueRef i0
= lp_build_const_int32(0);
1114 LLVMValueRef i1
= lp_build_const_int32(1);
1115 LLVMValueRef i2
= lp_build_const_int32(2);
1116 LLVMValueRef i3
= lp_build_const_int32(3);
1118 int n
= bld
->info
->file_max
[TGSI_FILE_TEMPORARY
];
1120 for (index
= 0; index
< n
; index
++) {
1121 LLVMValueRef idx
= lp_build_const_int32(index
);
1122 LLVMValueRef v
[4][4], res
;
1125 lp_build_printf(builder
, "TEMP[%d]:\n", idx
);
1127 for (chan
= 0; chan
< 4; chan
++) {
1128 temp_ptr
= get_temp_ptr(bld
, index
, chan
);
1129 res
= LLVMBuildLoad(bld
->base
.builder
, temp_ptr
, "");
1130 v
[chan
][0] = LLVMBuildExtractElement(builder
, res
, i0
, "");
1131 v
[chan
][1] = LLVMBuildExtractElement(builder
, res
, i1
, "");
1132 v
[chan
][2] = LLVMBuildExtractElement(builder
, res
, i2
, "");
1133 v
[chan
][3] = LLVMBuildExtractElement(builder
, res
, i3
, "");
1136 lp_build_printf(builder
, " X: %f %f %f %f\n",
1137 v
[0][0], v
[0][1], v
[0][2], v
[0][3]);
1138 lp_build_printf(builder
, " Y: %f %f %f %f\n",
1139 v
[1][0], v
[1][1], v
[1][2], v
[1][3]);
1140 lp_build_printf(builder
, " Z: %f %f %f %f\n",
1141 v
[2][0], v
[2][1], v
[2][2], v
[2][3]);
1142 lp_build_printf(builder
, " W: %f %f %f %f\n",
1143 v
[3][0], v
[3][1], v
[3][2], v
[3][3]);
1151 struct lp_build_tgsi_soa_context
*bld
,
1152 const struct tgsi_full_declaration
*decl
)
1154 LLVMTypeRef vec_type
= bld
->base
.vec_type
;
1156 unsigned first
= decl
->Range
.First
;
1157 unsigned last
= decl
->Range
.Last
;
1160 for (idx
= first
; idx
<= last
; ++idx
) {
1161 assert(last
<= bld
->info
->file_max
[decl
->Declaration
.File
]);
1162 switch (decl
->Declaration
.File
) {
1163 case TGSI_FILE_TEMPORARY
:
1164 assert(idx
< LP_MAX_TGSI_TEMPS
);
1165 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
1166 LLVMValueRef array_size
= LLVMConstInt(LLVMInt32Type(),
1168 bld
->temps_array
= lp_build_array_alloca(bld
->base
.builder
,
1169 vec_type
, array_size
, "temparray");
1171 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1172 bld
->temps
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
1177 case TGSI_FILE_OUTPUT
:
1178 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1179 bld
->outputs
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
1180 vec_type
, "output");
1183 case TGSI_FILE_ADDRESS
:
1184 assert(idx
< LP_MAX_TGSI_ADDRS
);
1185 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1186 bld
->addr
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
1190 case TGSI_FILE_PREDICATE
:
1191 assert(idx
< LP_MAX_TGSI_PREDS
);
1192 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1193 bld
->preds
[idx
][i
] = lp_build_alloca(bld
->base
.builder
,
1194 vec_type
, "predicate");
1198 /* don't need to declare other vars */
1206 * Emit LLVM for one TGSI instruction.
1207 * \param return TRUE for success, FALSE otherwise
1211 struct lp_build_tgsi_soa_context
*bld
,
1212 const struct tgsi_full_instruction
*inst
,
1213 const struct tgsi_opcode_info
*info
,
1216 unsigned chan_index
;
1217 LLVMValueRef src0
, src1
, src2
;
1218 LLVMValueRef tmp0
, tmp1
, tmp2
;
1219 LLVMValueRef tmp3
= NULL
;
1220 LLVMValueRef tmp4
= NULL
;
1221 LLVMValueRef tmp5
= NULL
;
1222 LLVMValueRef tmp6
= NULL
;
1223 LLVMValueRef tmp7
= NULL
;
1225 LLVMValueRef dst0
[NUM_CHANNELS
];
1228 * Stores and write masks are handled in a general fashion after the long
1229 * instruction opcode switch statement.
1231 * Although not stricitly necessary, we avoid generating instructions for
1232 * channels which won't be stored, in cases where's that easy. For some
1233 * complex instructions, like texture sampling, it is more convenient to
1234 * assume a full writemask and then let LLVM optimization passes eliminate
1240 assert(info
->num_dst
<= 1);
1241 if (info
->num_dst
) {
1242 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1243 dst0
[chan_index
] = bld
->base
.undef
;
1247 switch (inst
->Instruction
.Opcode
) {
1248 case TGSI_OPCODE_ARL
:
1249 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1250 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1251 tmp0
= lp_build_floor(&bld
->base
, tmp0
);
1252 dst0
[chan_index
] = tmp0
;
1256 case TGSI_OPCODE_MOV
:
1257 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1258 dst0
[chan_index
] = emit_fetch( bld
, inst
, 0, chan_index
);
1262 case TGSI_OPCODE_LIT
:
1263 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ) {
1264 dst0
[CHAN_X
] = bld
->base
.one
;
1266 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1267 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1268 dst0
[CHAN_Y
] = lp_build_max( &bld
->base
, src0
, bld
->base
.zero
);
1270 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1271 /* XMM[1] = SrcReg[0].yyyy */
1272 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1273 /* XMM[1] = max(XMM[1], 0) */
1274 tmp1
= lp_build_max( &bld
->base
, tmp1
, bld
->base
.zero
);
1275 /* XMM[2] = SrcReg[0].wwww */
1276 tmp2
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1277 tmp1
= lp_build_pow( &bld
->base
, tmp1
, tmp2
);
1278 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1279 tmp2
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GREATER
, tmp0
, bld
->base
.zero
);
1280 dst0
[CHAN_Z
] = lp_build_select(&bld
->base
, tmp2
, tmp1
, bld
->base
.zero
);
1282 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) ) {
1283 dst0
[CHAN_W
] = bld
->base
.one
;
1287 case TGSI_OPCODE_RCP
:
1288 /* TGSI_OPCODE_RECIP */
1289 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1290 res
= lp_build_rcp(&bld
->base
, src0
);
1291 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1292 dst0
[chan_index
] = res
;
1296 case TGSI_OPCODE_RSQ
:
1297 /* TGSI_OPCODE_RECIPSQRT */
1298 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1299 src0
= lp_build_abs(&bld
->base
, src0
);
1300 res
= lp_build_rsqrt(&bld
->base
, src0
);
1301 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1302 dst0
[chan_index
] = res
;
1306 case TGSI_OPCODE_EXP
:
1307 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1308 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1309 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1310 LLVMValueRef
*p_exp2_int_part
= NULL
;
1311 LLVMValueRef
*p_frac_part
= NULL
;
1312 LLVMValueRef
*p_exp2
= NULL
;
1314 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1316 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1317 p_exp2_int_part
= &tmp0
;
1318 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1319 p_frac_part
= &tmp1
;
1320 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1323 lp_build_exp2_approx(&bld
->base
, src0
, p_exp2_int_part
, p_frac_part
, p_exp2
);
1325 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1326 dst0
[CHAN_X
] = tmp0
;
1327 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1328 dst0
[CHAN_Y
] = tmp1
;
1329 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1330 dst0
[CHAN_Z
] = tmp2
;
1333 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1334 dst0
[CHAN_W
] = bld
->base
.one
;
1338 case TGSI_OPCODE_LOG
:
1339 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1340 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1341 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
)) {
1342 LLVMValueRef
*p_floor_log2
= NULL
;
1343 LLVMValueRef
*p_exp
= NULL
;
1344 LLVMValueRef
*p_log2
= NULL
;
1346 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1347 src0
= lp_build_abs( &bld
->base
, src0
);
1349 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1350 p_floor_log2
= &tmp0
;
1351 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
))
1353 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1356 lp_build_log2_approx(&bld
->base
, src0
, p_exp
, p_floor_log2
, p_log2
);
1358 /* dst.x = floor(lg2(abs(src.x))) */
1359 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
))
1360 dst0
[CHAN_X
] = tmp0
;
1361 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1362 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
)) {
1363 dst0
[CHAN_Y
] = lp_build_div( &bld
->base
, src0
, tmp1
);
1365 /* dst.z = lg2(abs(src.x)) */
1366 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
))
1367 dst0
[CHAN_Z
] = tmp2
;
1370 if (IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
)) {
1371 dst0
[CHAN_W
] = bld
->base
.one
;
1375 case TGSI_OPCODE_MUL
:
1376 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1377 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1378 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1379 dst0
[chan_index
] = lp_build_mul(&bld
->base
, src0
, src1
);
1383 case TGSI_OPCODE_ADD
:
1384 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1385 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1386 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1387 dst0
[chan_index
] = lp_build_add(&bld
->base
, src0
, src1
);
1391 case TGSI_OPCODE_DP3
:
1392 /* TGSI_OPCODE_DOT3 */
1393 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1394 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1395 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1396 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1397 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1398 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1399 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1400 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1401 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1402 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1403 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1404 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1405 dst0
[chan_index
] = tmp0
;
1409 case TGSI_OPCODE_DP4
:
1410 /* TGSI_OPCODE_DOT4 */
1411 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1412 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1413 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1414 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1415 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1416 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1417 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1418 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1419 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1420 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1421 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1422 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_W
);
1423 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1424 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1425 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1426 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1427 dst0
[chan_index
] = tmp0
;
1431 case TGSI_OPCODE_DST
:
1432 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1433 dst0
[CHAN_X
] = bld
->base
.one
;
1435 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1436 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1437 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1438 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1440 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1441 dst0
[CHAN_Z
] = emit_fetch( bld
, inst
, 0, CHAN_Z
);
1443 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1444 dst0
[CHAN_W
] = emit_fetch( bld
, inst
, 1, CHAN_W
);
1448 case TGSI_OPCODE_MIN
:
1449 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1450 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1451 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1452 dst0
[chan_index
] = lp_build_min( &bld
->base
, src0
, src1
);
1456 case TGSI_OPCODE_MAX
:
1457 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1458 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1459 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1460 dst0
[chan_index
] = lp_build_max( &bld
->base
, src0
, src1
);
1464 case TGSI_OPCODE_SLT
:
1465 /* TGSI_OPCODE_SETLT */
1466 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1467 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1468 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1469 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, src1
);
1470 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1474 case TGSI_OPCODE_SGE
:
1475 /* TGSI_OPCODE_SETGE */
1476 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1477 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1478 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1479 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GEQUAL
, src0
, src1
);
1480 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1484 case TGSI_OPCODE_MAD
:
1485 /* TGSI_OPCODE_MADD */
1486 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1487 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1488 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1489 tmp2
= emit_fetch( bld
, inst
, 2, chan_index
);
1490 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1491 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp2
);
1492 dst0
[chan_index
] = tmp0
;
1496 case TGSI_OPCODE_SUB
:
1497 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1498 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1499 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1500 dst0
[chan_index
] = lp_build_sub( &bld
->base
, tmp0
, tmp1
);
1504 case TGSI_OPCODE_LRP
:
1505 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1506 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1507 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1508 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1509 tmp0
= lp_build_sub( &bld
->base
, src1
, src2
);
1510 tmp0
= lp_build_mul( &bld
->base
, src0
, tmp0
);
1511 dst0
[chan_index
] = lp_build_add( &bld
->base
, tmp0
, src2
);
1515 case TGSI_OPCODE_CND
:
1516 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1517 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1518 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1519 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1520 tmp1
= lp_build_const_vec(bld
->base
.type
, 0.5);
1521 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src2
, tmp1
);
1522 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src0
, src1
);
1526 case TGSI_OPCODE_DP2A
:
1527 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1528 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1529 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1530 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1531 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1532 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1533 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1534 tmp1
= emit_fetch( bld
, inst
, 2, CHAN_X
); /* xmm1 = src[2].x */
1535 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1536 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1537 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1541 case TGSI_OPCODE_FRC
:
1542 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1543 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1544 tmp0
= lp_build_floor(&bld
->base
, src0
);
1545 tmp0
= lp_build_sub(&bld
->base
, src0
, tmp0
);
1546 dst0
[chan_index
] = tmp0
;
1550 case TGSI_OPCODE_CLAMP
:
1551 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1552 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1553 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1554 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1555 tmp0
= lp_build_max(&bld
->base
, tmp0
, src1
);
1556 tmp0
= lp_build_min(&bld
->base
, tmp0
, src2
);
1557 dst0
[chan_index
] = tmp0
;
1561 case TGSI_OPCODE_FLR
:
1562 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1563 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1564 dst0
[chan_index
] = lp_build_floor(&bld
->base
, tmp0
);
1568 case TGSI_OPCODE_ROUND
:
1569 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1570 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1571 dst0
[chan_index
] = lp_build_round(&bld
->base
, tmp0
);
1575 case TGSI_OPCODE_EX2
: {
1576 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1577 tmp0
= lp_build_exp2( &bld
->base
, tmp0
);
1578 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1579 dst0
[chan_index
] = tmp0
;
1584 case TGSI_OPCODE_LG2
:
1585 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1586 tmp0
= lp_build_log2( &bld
->base
, tmp0
);
1587 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1588 dst0
[chan_index
] = tmp0
;
1592 case TGSI_OPCODE_POW
:
1593 src0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1594 src1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1595 res
= lp_build_pow( &bld
->base
, src0
, src1
);
1596 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1597 dst0
[chan_index
] = res
;
1601 case TGSI_OPCODE_XPD
:
1602 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1603 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ) {
1604 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1605 tmp3
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1607 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) ||
1608 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1609 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1610 tmp4
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1612 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1614 tmp2
= lp_build_mul( &bld
->base
, tmp2
, tmp1
);
1616 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1617 tmp2
= lp_build_sub( &bld
->base
, tmp2
, tmp5
);
1618 dst0
[CHAN_X
] = tmp2
;
1620 if( IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) ||
1621 IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) ) {
1622 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1623 tmp5
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1625 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1626 tmp3
= lp_build_mul( &bld
->base
, tmp3
, tmp2
);
1627 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp5
);
1628 tmp3
= lp_build_sub( &bld
->base
, tmp3
, tmp1
);
1629 dst0
[CHAN_Y
] = tmp3
;
1631 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1632 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1633 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp2
);
1634 tmp5
= lp_build_sub( &bld
->base
, tmp5
, tmp0
);
1635 dst0
[CHAN_Z
] = tmp5
;
1637 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1638 dst0
[CHAN_W
] = bld
->base
.one
;
1642 case TGSI_OPCODE_ABS
:
1643 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1644 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1645 dst0
[chan_index
] = lp_build_abs( &bld
->base
, tmp0
);
1649 case TGSI_OPCODE_RCC
:
1654 case TGSI_OPCODE_DPH
:
1655 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1656 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
);
1657 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1658 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
);
1659 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
);
1660 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1661 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1662 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Z
);
1663 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Z
);
1664 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1665 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1666 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_W
);
1667 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1668 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1669 dst0
[chan_index
] = tmp0
;
1673 case TGSI_OPCODE_COS
:
1674 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1675 tmp0
= lp_build_cos( &bld
->base
, tmp0
);
1676 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1677 dst0
[chan_index
] = tmp0
;
1681 case TGSI_OPCODE_DDX
:
1682 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1683 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, &dst0
[chan_index
], NULL
);
1687 case TGSI_OPCODE_DDY
:
1688 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1689 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, NULL
, &dst0
[chan_index
]);
1693 case TGSI_OPCODE_KILP
:
1694 /* predicated kill */
1695 emit_kilp( bld
, inst
, (*pc
)-1 );
1698 case TGSI_OPCODE_KIL
:
1699 /* conditional kill */
1700 emit_kil( bld
, inst
, (*pc
)-1 );
1703 case TGSI_OPCODE_PK2H
:
1707 case TGSI_OPCODE_PK2US
:
1711 case TGSI_OPCODE_PK4B
:
1715 case TGSI_OPCODE_PK4UB
:
1719 case TGSI_OPCODE_RFL
:
1723 case TGSI_OPCODE_SEQ
:
1724 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1725 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1726 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1727 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_EQUAL
, src0
, src1
);
1728 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1732 case TGSI_OPCODE_SFL
:
1733 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1734 dst0
[chan_index
] = bld
->base
.zero
;
1738 case TGSI_OPCODE_SGT
:
1739 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1740 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1741 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1742 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src0
, src1
);
1743 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1747 case TGSI_OPCODE_SIN
:
1748 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1749 tmp0
= lp_build_sin( &bld
->base
, tmp0
);
1750 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1751 dst0
[chan_index
] = tmp0
;
1755 case TGSI_OPCODE_SLE
:
1756 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1757 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1758 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1759 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LEQUAL
, src0
, src1
);
1760 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1764 case TGSI_OPCODE_SNE
:
1765 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1766 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1767 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1768 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_NOTEQUAL
, src0
, src1
);
1769 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1773 case TGSI_OPCODE_STR
:
1774 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1775 dst0
[chan_index
] = bld
->base
.one
;
1779 case TGSI_OPCODE_TEX
:
1780 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_NONE
, dst0
);
1783 case TGSI_OPCODE_TXD
:
1784 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
, dst0
);
1787 case TGSI_OPCODE_UP2H
:
1793 case TGSI_OPCODE_UP2US
:
1799 case TGSI_OPCODE_UP4B
:
1805 case TGSI_OPCODE_UP4UB
:
1811 case TGSI_OPCODE_X2D
:
1817 case TGSI_OPCODE_ARA
:
1823 case TGSI_OPCODE_ARR
:
1824 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1825 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1826 tmp0
= lp_build_round(&bld
->base
, tmp0
);
1827 dst0
[chan_index
] = tmp0
;
1831 case TGSI_OPCODE_BRA
:
1837 case TGSI_OPCODE_CAL
:
1838 lp_exec_mask_call(&bld
->exec_mask
,
1844 case TGSI_OPCODE_RET
:
1845 lp_exec_mask_ret(&bld
->exec_mask
, pc
);
1848 case TGSI_OPCODE_END
:
1851 emit_dump_temps(bld
);
1856 case TGSI_OPCODE_SSG
:
1857 /* TGSI_OPCODE_SGN */
1858 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1859 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1860 dst0
[chan_index
] = lp_build_sgn( &bld
->base
, tmp0
);
1864 case TGSI_OPCODE_CMP
:
1865 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1866 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1867 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1868 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1869 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, bld
->base
.zero
);
1870 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src1
, src2
);
1874 case TGSI_OPCODE_SCS
:
1875 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_X
) {
1876 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1877 dst0
[CHAN_X
] = lp_build_cos( &bld
->base
, tmp0
);
1879 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Y
) {
1880 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
);
1881 dst0
[CHAN_Y
] = lp_build_sin( &bld
->base
, tmp0
);
1883 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_Z
) {
1884 dst0
[CHAN_Z
] = bld
->base
.zero
;
1886 IF_IS_DST0_CHANNEL_ENABLED( inst
, CHAN_W
) {
1887 dst0
[CHAN_W
] = bld
->base
.one
;
1891 case TGSI_OPCODE_TXB
:
1892 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
, dst0
);
1895 case TGSI_OPCODE_NRM
:
1897 case TGSI_OPCODE_NRM4
:
1898 /* 3 or 4-component normalization */
1900 uint dims
= (inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
1902 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) ||
1903 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
) ||
1904 IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
) ||
1905 (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 4)) {
1907 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1910 /* xmm0 = src.x * src.x */
1911 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
1912 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1915 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp0
);
1918 /* xmm0 = xmm0 + src.y * src.y */
1919 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Y
);
1920 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1923 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1924 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1927 /* xmm0 = xmm0 + src.z * src.z */
1928 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_Z
);
1929 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1932 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1933 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1937 /* xmm0 = xmm0 + src.w * src.w */
1938 tmp1
= emit_fetch(bld
, inst
, 0, CHAN_W
);
1939 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
)) {
1942 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
1943 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1946 /* xmm1 = 1 / sqrt(xmm0) */
1947 tmp1
= lp_build_rsqrt( &bld
->base
, tmp0
);
1949 /* dst.x = xmm1 * src.x */
1950 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
)) {
1951 dst0
[CHAN_X
] = lp_build_mul( &bld
->base
, tmp4
, tmp1
);
1954 /* dst.y = xmm1 * src.y */
1955 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Y
)) {
1956 dst0
[CHAN_Y
] = lp_build_mul( &bld
->base
, tmp5
, tmp1
);
1959 /* dst.z = xmm1 * src.z */
1960 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_Z
)) {
1961 dst0
[CHAN_Z
] = lp_build_mul( &bld
->base
, tmp6
, tmp1
);
1964 /* dst.w = xmm1 * src.w */
1965 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_X
) && dims
== 4) {
1966 dst0
[CHAN_W
] = lp_build_mul( &bld
->base
, tmp7
, tmp1
);
1971 if (IS_DST0_CHANNEL_ENABLED(inst
, CHAN_W
) && dims
== 3) {
1972 dst0
[CHAN_W
] = bld
->base
.one
;
1977 case TGSI_OPCODE_DIV
:
1983 case TGSI_OPCODE_DP2
:
1984 tmp0
= emit_fetch( bld
, inst
, 0, CHAN_X
); /* xmm0 = src[0].x */
1985 tmp1
= emit_fetch( bld
, inst
, 1, CHAN_X
); /* xmm1 = src[1].x */
1986 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1987 tmp1
= emit_fetch( bld
, inst
, 0, CHAN_Y
); /* xmm1 = src[0].y */
1988 tmp2
= emit_fetch( bld
, inst
, 1, CHAN_Y
); /* xmm2 = src[1].y */
1989 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1990 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1991 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1992 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1996 case TGSI_OPCODE_TXL
:
1997 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
, dst0
);
2000 case TGSI_OPCODE_TXP
:
2001 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_PROJECTED
, dst0
);
2004 case TGSI_OPCODE_BRK
:
2005 lp_exec_break(&bld
->exec_mask
);
2008 case TGSI_OPCODE_IF
:
2009 tmp0
= emit_fetch(bld
, inst
, 0, CHAN_X
);
2010 tmp0
= lp_build_cmp(&bld
->base
, PIPE_FUNC_NOTEQUAL
,
2011 tmp0
, bld
->base
.zero
);
2012 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp0
);
2015 case TGSI_OPCODE_BGNLOOP
:
2016 lp_exec_bgnloop(&bld
->exec_mask
);
2019 case TGSI_OPCODE_BGNSUB
:
2020 lp_exec_mask_bgnsub(&bld
->exec_mask
);
2023 case TGSI_OPCODE_ELSE
:
2024 lp_exec_mask_cond_invert(&bld
->exec_mask
);
2027 case TGSI_OPCODE_ENDIF
:
2028 lp_exec_mask_cond_pop(&bld
->exec_mask
);
2031 case TGSI_OPCODE_ENDLOOP
:
2032 lp_exec_endloop(&bld
->exec_mask
);
2035 case TGSI_OPCODE_ENDSUB
:
2036 lp_exec_mask_endsub(&bld
->exec_mask
, pc
);
2039 case TGSI_OPCODE_PUSHA
:
2045 case TGSI_OPCODE_POPA
:
2051 case TGSI_OPCODE_CEIL
:
2052 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2053 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
2054 dst0
[chan_index
] = lp_build_ceil(&bld
->base
, tmp0
);
2058 case TGSI_OPCODE_I2F
:
2064 case TGSI_OPCODE_NOT
:
2070 case TGSI_OPCODE_TRUNC
:
2071 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2072 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
2073 dst0
[chan_index
] = lp_build_trunc(&bld
->base
, tmp0
);
2077 case TGSI_OPCODE_SHL
:
2083 case TGSI_OPCODE_ISHR
:
2089 case TGSI_OPCODE_AND
:
2095 case TGSI_OPCODE_OR
:
2101 case TGSI_OPCODE_MOD
:
2107 case TGSI_OPCODE_XOR
:
2113 case TGSI_OPCODE_SAD
:
2119 case TGSI_OPCODE_TXF
:
2125 case TGSI_OPCODE_TXQ
:
2131 case TGSI_OPCODE_CONT
:
2132 lp_exec_continue(&bld
->exec_mask
);
2135 case TGSI_OPCODE_EMIT
:
2139 case TGSI_OPCODE_ENDPRIM
:
2143 case TGSI_OPCODE_NOP
:
2151 LLVMValueRef pred
[NUM_CHANNELS
];
2153 emit_fetch_predicate( bld
, inst
, pred
);
2155 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2156 emit_store( bld
, inst
, 0, chan_index
, pred
[chan_index
], dst0
[chan_index
]);
2165 lp_build_tgsi_soa(LLVMBuilderRef builder
,
2166 const struct tgsi_token
*tokens
,
2167 struct lp_type type
,
2168 struct lp_build_mask_context
*mask
,
2169 LLVMValueRef consts_ptr
,
2170 const LLVMValueRef
*pos
,
2171 const LLVMValueRef (*inputs
)[NUM_CHANNELS
],
2172 LLVMValueRef (*outputs
)[NUM_CHANNELS
],
2173 struct lp_build_sampler_soa
*sampler
,
2174 const struct tgsi_shader_info
*info
)
2176 struct lp_build_tgsi_soa_context bld
;
2177 struct tgsi_parse_context parse
;
2178 uint num_immediates
= 0;
2179 uint num_instructions
= 0;
2183 struct lp_type res_type
;
2185 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
2186 memset(&res_type
, 0, sizeof res_type
);
2187 res_type
.width
= type
.width
;
2188 res_type
.length
= type
.length
;
2191 /* Setup build context */
2192 memset(&bld
, 0, sizeof bld
);
2193 lp_build_context_init(&bld
.base
, builder
, type
);
2194 lp_build_context_init(&bld
.uint_bld
, builder
, lp_uint_type(type
));
2197 bld
.inputs
= inputs
;
2198 bld
.outputs
= outputs
;
2199 bld
.consts_ptr
= consts_ptr
;
2200 bld
.sampler
= sampler
;
2202 bld
.indirect_files
= info
->indirect_files
;
2203 bld
.instructions
= (struct tgsi_full_instruction
*)
2204 MALLOC( LP_MAX_INSTRUCTIONS
* sizeof(struct tgsi_full_instruction
) );
2205 bld
.max_instructions
= LP_MAX_INSTRUCTIONS
;
2207 if (!bld
.instructions
) {
2211 lp_exec_mask_init(&bld
.exec_mask
, &bld
.base
);
2213 tgsi_parse_init( &parse
, tokens
);
2215 while( !tgsi_parse_end_of_tokens( &parse
) ) {
2216 tgsi_parse_token( &parse
);
2218 switch( parse
.FullToken
.Token
.Type
) {
2219 case TGSI_TOKEN_TYPE_DECLARATION
:
2220 /* Inputs already interpolated */
2221 emit_declaration( &bld
, &parse
.FullToken
.FullDeclaration
);
2224 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2226 /* save expanded instruction */
2227 if (num_instructions
== bld
.max_instructions
) {
2228 struct tgsi_full_instruction
*instructions
;
2229 instructions
= REALLOC(bld
.instructions
,
2230 bld
.max_instructions
2231 * sizeof(struct tgsi_full_instruction
),
2232 (bld
.max_instructions
+ LP_MAX_INSTRUCTIONS
)
2233 * sizeof(struct tgsi_full_instruction
));
2234 if (!instructions
) {
2237 bld
.instructions
= instructions
;
2238 bld
.max_instructions
+= LP_MAX_INSTRUCTIONS
;
2241 memcpy(bld
.instructions
+ num_instructions
,
2242 &parse
.FullToken
.FullInstruction
,
2243 sizeof(bld
.instructions
[0]));
2250 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2251 /* simply copy the immediate values into the next immediates[] slot */
2253 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
2255 assert(num_immediates
< LP_MAX_TGSI_IMMEDIATES
);
2256 for( i
= 0; i
< size
; ++i
)
2257 bld
.immediates
[num_immediates
][i
] =
2258 lp_build_const_vec(type
, parse
.FullToken
.FullImmediate
.u
[i
].Float
);
2259 for( i
= size
; i
< 4; ++i
)
2260 bld
.immediates
[num_immediates
][i
] = bld
.base
.undef
;
2265 case TGSI_TOKEN_TYPE_PROPERTY
:
2274 struct tgsi_full_instruction
*instr
= bld
.instructions
+ pc
;
2275 const struct tgsi_opcode_info
*opcode_info
=
2276 tgsi_get_opcode_info(instr
->Instruction
.Opcode
);
2277 if (!emit_instruction( &bld
, instr
, opcode_info
, &pc
))
2278 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2279 opcode_info
->mnemonic
);
2283 LLVMBasicBlockRef block
= LLVMGetInsertBlock(builder
);
2284 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
2285 debug_printf("11111111111111111111111111111 \n");
2286 tgsi_dump(tokens
, 0);
2287 lp_debug_dump_value(function
);
2288 debug_printf("2222222222222222222222222222 \n");
2290 tgsi_parse_free( &parse
);
2293 LLVMModuleRef module
= LLVMGetGlobalParent(
2294 LLVMGetBasicBlockParent(LLVMGetInsertBlock(bld
.base
.builder
)));
2295 LLVMDumpModule(module
);
2299 FREE( bld
.instructions
);