1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_type.h"
51 #include "lp_bld_const.h"
52 #include "lp_bld_arit.h"
53 #include "lp_bld_bitarit.h"
54 #include "lp_bld_gather.h"
55 #include "lp_bld_init.h"
56 #include "lp_bld_logic.h"
57 #include "lp_bld_swizzle.h"
58 #include "lp_bld_flow.h"
59 #include "lp_bld_quad.h"
60 #include "lp_bld_tgsi.h"
61 #include "lp_bld_limits.h"
62 #include "lp_bld_debug.h"
63 #include "lp_bld_printf.h"
66 #define FOR_EACH_CHANNEL( CHAN )\
67 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
69 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
70 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
72 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
73 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
75 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
76 FOR_EACH_CHANNEL( CHAN )\
77 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
79 #define NUM_CHANNELS 4
81 #define LP_MAX_INSTRUCTIONS 256
85 struct lp_build_context
*bld
;
89 LLVMTypeRef int_vec_type
;
91 LLVMValueRef cond_stack
[LP_MAX_TGSI_NESTING
];
93 LLVMValueRef cond_mask
;
95 LLVMBasicBlockRef loop_block
;
96 LLVMValueRef cont_mask
;
97 LLVMValueRef break_mask
;
98 LLVMValueRef break_var
;
100 LLVMBasicBlockRef loop_block
;
101 LLVMValueRef cont_mask
;
102 LLVMValueRef break_mask
;
103 LLVMValueRef break_var
;
104 } loop_stack
[LP_MAX_TGSI_NESTING
];
107 LLVMValueRef ret_mask
;
110 LLVMValueRef ret_mask
;
111 } call_stack
[LP_MAX_TGSI_NESTING
];
114 LLVMValueRef exec_mask
;
117 struct lp_build_tgsi_soa_context
119 struct lp_build_context base
;
121 /* Builder for vector integer masks and indices */
122 struct lp_build_context uint_bld
;
124 /* Builder for scalar elements of shader's data type (float) */
125 struct lp_build_context elem_bld
;
127 LLVMValueRef consts_ptr
;
128 const LLVMValueRef
*pos
;
129 const LLVMValueRef (*inputs
)[NUM_CHANNELS
];
130 LLVMValueRef (*outputs
)[NUM_CHANNELS
];
132 const struct lp_build_sampler_soa
*sampler
;
134 LLVMValueRef immediates
[LP_MAX_TGSI_IMMEDIATES
][NUM_CHANNELS
];
135 LLVMValueRef temps
[LP_MAX_TGSI_TEMPS
][NUM_CHANNELS
];
136 LLVMValueRef addr
[LP_MAX_TGSI_ADDRS
][NUM_CHANNELS
];
137 LLVMValueRef preds
[LP_MAX_TGSI_PREDS
][NUM_CHANNELS
];
139 /* We allocate/use this array of temps if (1 << TGSI_FILE_TEMPORARY) is
140 * set in the indirect_files field.
141 * The temps[] array above is unused then.
143 LLVMValueRef temps_array
;
145 /* We allocate/use this array of output if (1 << TGSI_FILE_OUTPUT) is
146 * set in the indirect_files field.
147 * The outputs[] array above is unused then.
149 LLVMValueRef outputs_array
;
151 /* We allocate/use this array of inputs if (1 << TGSI_FILE_INPUT) is
152 * set in the indirect_files field.
153 * The inputs[] array above is unused then.
155 LLVMValueRef inputs_array
;
157 LLVMValueRef system_values_array
;
159 const struct tgsi_shader_info
*info
;
160 /** bitmask indicating which register files are accessed indirectly */
161 unsigned indirect_files
;
163 struct lp_build_mask_context
*mask
;
164 struct lp_exec_mask exec_mask
;
166 struct tgsi_full_instruction
*instructions
;
167 uint max_instructions
;
170 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
173 mask
->has_mask
= FALSE
;
174 mask
->cond_stack_size
= 0;
175 mask
->loop_stack_size
= 0;
176 mask
->call_stack_size
= 0;
178 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
179 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
= mask
->cond_mask
=
180 LLVMConstAllOnes(mask
->int_vec_type
);
183 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
185 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
187 if (mask
->loop_stack_size
) {
188 /*for loops we need to update the entire mask at runtime */
190 assert(mask
->break_mask
);
191 tmp
= LLVMBuildAnd(builder
,
195 mask
->exec_mask
= LLVMBuildAnd(builder
,
200 mask
->exec_mask
= mask
->cond_mask
;
202 if (mask
->call_stack_size
) {
203 mask
->exec_mask
= LLVMBuildAnd(builder
,
209 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
210 mask
->loop_stack_size
> 0 ||
211 mask
->call_stack_size
> 0);
214 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
217 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
219 assert(mask
->cond_stack_size
< LP_MAX_TGSI_NESTING
);
220 if (mask
->cond_stack_size
== 0) {
221 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
223 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
224 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
225 mask
->cond_mask
= LLVMBuildAnd(builder
,
229 lp_exec_mask_update(mask
);
232 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
234 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
235 LLVMValueRef prev_mask
;
236 LLVMValueRef inv_mask
;
238 assert(mask
->cond_stack_size
);
239 prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
240 if (mask
->cond_stack_size
== 1) {
241 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
244 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
246 mask
->cond_mask
= LLVMBuildAnd(builder
,
249 lp_exec_mask_update(mask
);
252 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
254 assert(mask
->cond_stack_size
);
255 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
256 lp_exec_mask_update(mask
);
259 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
261 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
263 if (mask
->loop_stack_size
== 0) {
264 assert(mask
->loop_block
== NULL
);
265 assert(mask
->cont_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
266 assert(mask
->break_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
267 assert(mask
->break_var
== NULL
);
270 assert(mask
->loop_stack_size
< LP_MAX_TGSI_NESTING
);
272 mask
->loop_stack
[mask
->loop_stack_size
].loop_block
= mask
->loop_block
;
273 mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
274 mask
->loop_stack
[mask
->loop_stack_size
].break_mask
= mask
->break_mask
;
275 mask
->loop_stack
[mask
->loop_stack_size
].break_var
= mask
->break_var
;
276 ++mask
->loop_stack_size
;
278 mask
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
279 LLVMBuildStore(builder
, mask
->break_mask
, mask
->break_var
);
281 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
282 LLVMBuildBr(builder
, mask
->loop_block
);
283 LLVMPositionBuilderAtEnd(builder
, mask
->loop_block
);
285 mask
->break_mask
= LLVMBuildLoad(builder
, mask
->break_var
, "");
287 lp_exec_mask_update(mask
);
290 static void lp_exec_break(struct lp_exec_mask
*mask
)
292 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
293 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
297 mask
->break_mask
= LLVMBuildAnd(builder
,
299 exec_mask
, "break_full");
301 lp_exec_mask_update(mask
);
304 static void lp_exec_continue(struct lp_exec_mask
*mask
)
306 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
307 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
311 mask
->cont_mask
= LLVMBuildAnd(builder
,
315 lp_exec_mask_update(mask
);
319 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
320 struct lp_exec_mask
*mask
)
322 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
323 LLVMBasicBlockRef endloop
;
324 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
325 mask
->bld
->type
.width
*
326 mask
->bld
->type
.length
);
329 assert(mask
->break_mask
);
332 * Restore the cont_mask, but don't pop
334 assert(mask
->loop_stack_size
);
335 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
- 1].cont_mask
;
336 lp_exec_mask_update(mask
);
339 * Unlike the continue mask, the break_mask must be preserved across loop
342 LLVMBuildStore(builder
, mask
->break_mask
, mask
->break_var
);
344 /* i1cond = (mask == 0) */
345 i1cond
= LLVMBuildICmp(
348 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
349 LLVMConstNull(reg_type
), "");
351 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
353 LLVMBuildCondBr(builder
,
354 i1cond
, mask
->loop_block
, endloop
);
356 LLVMPositionBuilderAtEnd(builder
, endloop
);
358 assert(mask
->loop_stack_size
);
359 --mask
->loop_stack_size
;
360 mask
->loop_block
= mask
->loop_stack
[mask
->loop_stack_size
].loop_block
;
361 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
;
362 mask
->break_mask
= mask
->loop_stack
[mask
->loop_stack_size
].break_mask
;
363 mask
->break_var
= mask
->loop_stack
[mask
->loop_stack_size
].break_var
;
365 lp_exec_mask_update(mask
);
368 /* stores val into an address pointed to by dst.
369 * mask->exec_mask is used to figure out which bits of val
370 * should be stored into the address
371 * (0 means don't store this bit, 1 means do store).
373 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
378 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
380 /* Mix the predicate and execution mask */
381 if (mask
->has_mask
) {
383 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
385 pred
= mask
->exec_mask
;
390 LLVMValueRef real_val
, dst_val
;
392 dst_val
= LLVMBuildLoad(builder
, dst
, "");
393 real_val
= lp_build_select(mask
->bld
,
397 LLVMBuildStore(builder
, real_val
, dst
);
399 LLVMBuildStore(builder
, val
, dst
);
402 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
406 assert(mask
->call_stack_size
< LP_MAX_TGSI_NESTING
);
407 mask
->call_stack
[mask
->call_stack_size
].pc
= *pc
;
408 mask
->call_stack
[mask
->call_stack_size
].ret_mask
= mask
->ret_mask
;
409 mask
->call_stack_size
++;
413 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
415 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
416 LLVMValueRef exec_mask
;
418 if (mask
->call_stack_size
== 0) {
419 /* returning from main() */
423 exec_mask
= LLVMBuildNot(builder
,
427 mask
->ret_mask
= LLVMBuildAnd(builder
,
429 exec_mask
, "ret_full");
431 lp_exec_mask_update(mask
);
434 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
438 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
440 assert(mask
->call_stack_size
);
441 mask
->call_stack_size
--;
442 *pc
= mask
->call_stack
[mask
->call_stack_size
].pc
;
443 mask
->ret_mask
= mask
->call_stack
[mask
->call_stack_size
].ret_mask
;
444 lp_exec_mask_update(mask
);
449 * Return pointer to a temporary register channel (src or dest).
450 * Note that indirect addressing cannot be handled here.
451 * \param index which temporary register
452 * \param chan which channel of the temp register.
455 get_temp_ptr(struct lp_build_tgsi_soa_context
*bld
,
459 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
461 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
462 LLVMValueRef lindex
= lp_build_const_int32(bld
->base
.gallivm
, index
* 4 + chan
);
463 return LLVMBuildGEP(builder
, bld
->temps_array
, &lindex
, 1, "");
466 return bld
->temps
[index
][chan
];
471 * Return pointer to a output register channel (src or dest).
472 * Note that indirect addressing cannot be handled here.
473 * \param index which output register
474 * \param chan which channel of the output register.
477 get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
481 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
483 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
484 LLVMValueRef lindex
= lp_build_const_int32(bld
->base
.gallivm
,
486 return LLVMBuildGEP(builder
, bld
->outputs_array
, &lindex
, 1, "");
489 return bld
->outputs
[index
][chan
];
495 * XXX the lp_build_gather() function should be capable of doing this
496 * with a little work.
499 build_gather(struct lp_build_tgsi_soa_context
*bld
,
500 LLVMValueRef base_ptr
,
501 LLVMValueRef indexes
)
503 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
504 LLVMValueRef res
= bld
->base
.undef
;
508 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
510 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
511 LLVMValueRef ii
= lp_build_const_int32(bld
->base
.gallivm
, i
);
512 LLVMValueRef index
= LLVMBuildExtractElement(builder
,
514 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
515 &index
, 1, "gather_ptr");
516 LLVMValueRef scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
518 res
= LLVMBuildInsertElement(builder
, res
, scalar
, ii
, "");
526 * Scatter/store vector.
529 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
530 LLVMValueRef base_ptr
,
531 LLVMValueRef indexes
,
533 struct lp_exec_mask
*mask
,
536 struct gallivm_state
*gallivm
= bld
->base
.gallivm
;
537 LLVMBuilderRef builder
= gallivm
->builder
;
540 /* Mix the predicate and execution mask */
541 if (mask
->has_mask
) {
543 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
546 pred
= mask
->exec_mask
;
551 * Loop over elements of index_vec, store scalar value.
553 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
554 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
555 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
556 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
557 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
558 LLVMValueRef scalar_pred
= pred
?
559 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
562 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
563 ii
, val
, index
, scalar_ptr
);
566 LLVMValueRef real_val
, dst_val
;
567 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
568 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
569 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
572 LLVMBuildStore(builder
, val
, scalar_ptr
);
579 * Read the current value of the ADDR register, convert the floats to
580 * ints, add the base index and return the vector of offsets.
581 * The offsets will be used to index into the constant buffer or
582 * temporary register file.
585 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
586 unsigned reg_file
, unsigned reg_index
,
587 const struct tgsi_src_register
*indirect_reg
)
589 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
590 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
591 /* always use X component of address register */
592 unsigned swizzle
= indirect_reg
->SwizzleX
;
595 LLVMValueRef max_index
;
598 assert(bld
->indirect_files
& (1 << reg_file
));
600 base
= lp_build_const_int_vec(bld
->base
.gallivm
, uint_bld
->type
, reg_index
);
603 rel
= LLVMBuildLoad(builder
,
604 bld
->addr
[indirect_reg
->Index
][swizzle
],
607 /* for indexing we want integers */
608 rel
= LLVMBuildFPToSI(builder
,
610 uint_bld
->vec_type
, "");
612 index
= lp_build_add(uint_bld
, base
, rel
);
614 max_index
= lp_build_const_int_vec(bld
->base
.gallivm
,
616 bld
->info
->file_max
[reg_file
]);
618 assert(!uint_bld
->type
.sign
);
619 index
= lp_build_min(uint_bld
, index
, max_index
);
630 struct lp_build_tgsi_soa_context
*bld
,
631 const struct tgsi_full_instruction
*inst
,
633 const unsigned chan_index
)
635 struct gallivm_state
*gallivm
= bld
->base
.gallivm
;
636 LLVMBuilderRef builder
= gallivm
->builder
;
637 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
638 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
639 const unsigned swizzle
=
640 tgsi_util_get_full_src_register_swizzle(reg
, chan_index
);
642 LLVMValueRef indirect_index
= NULL
;
645 assert(0 && "invalid swizzle in emit_fetch()");
646 return bld
->base
.undef
;
649 if (reg
->Register
.Indirect
) {
650 indirect_index
= get_indirect_index(bld
,
655 assert(reg
->Register
.Index
<= bld
->info
->file_max
[reg
->Register
.File
]);
658 switch (reg
->Register
.File
) {
659 case TGSI_FILE_CONSTANT
:
660 if (reg
->Register
.Indirect
) {
661 LLVMValueRef swizzle_vec
=
662 lp_build_const_int_vec(bld
->base
.gallivm
, uint_bld
->type
, swizzle
);
663 LLVMValueRef index_vec
; /* index into the const buffer */
665 /* index_vec = indirect_index * 4 + swizzle */
666 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
667 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
669 /* Gather values from the constant buffer */
670 res
= build_gather(bld
, bld
->consts_ptr
, index_vec
);
673 LLVMValueRef index
; /* index into the const buffer */
674 LLVMValueRef scalar
, scalar_ptr
;
676 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
*4 + swizzle
);
678 scalar_ptr
= LLVMBuildGEP(builder
, bld
->consts_ptr
,
680 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
682 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
686 case TGSI_FILE_IMMEDIATE
:
687 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
691 case TGSI_FILE_INPUT
:
692 if (reg
->Register
.Indirect
) {
693 LLVMValueRef swizzle_vec
=
694 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
695 LLVMValueRef length_vec
=
696 lp_build_const_int_vec(gallivm
, uint_bld
->type
, bld
->base
.type
.length
);
697 LLVMValueRef index_vec
; /* index into the const buffer */
698 LLVMValueRef inputs_array
;
699 LLVMTypeRef float4_ptr_type
;
701 /* index_vec = (indirect_index * 4 + swizzle) * length */
702 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
703 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
704 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
706 /* cast inputs_array pointer to float* */
707 float4_ptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
708 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
,
709 float4_ptr_type
, "");
711 /* Gather values from the temporary register array */
712 res
= build_gather(bld
, inputs_array
, index_vec
);
714 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
715 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
716 reg
->Register
.Index
* 4 + swizzle
);
717 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
718 bld
->inputs_array
, &lindex
, 1, "");
719 res
= LLVMBuildLoad(builder
, input_ptr
, "");
722 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
728 case TGSI_FILE_TEMPORARY
:
729 if (reg
->Register
.Indirect
) {
730 LLVMValueRef swizzle_vec
=
731 lp_build_const_int_vec(bld
->base
.gallivm
, uint_bld
->type
, swizzle
);
732 LLVMValueRef length_vec
=
733 lp_build_const_int_vec(bld
->base
.gallivm
, uint_bld
->type
,
734 bld
->base
.type
.length
);
735 LLVMValueRef index_vec
; /* index into the const buffer */
736 LLVMValueRef temps_array
;
737 LLVMTypeRef float4_ptr_type
;
739 /* index_vec = (indirect_index * 4 + swizzle) * length */
740 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
741 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
742 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
744 /* cast temps_array pointer to float* */
745 float4_ptr_type
= LLVMPointerType(LLVMFloatTypeInContext(bld
->base
.gallivm
->context
), 0);
746 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
,
747 float4_ptr_type
, "");
749 /* Gather values from the temporary register array */
750 res
= build_gather(bld
, temps_array
, index_vec
);
753 LLVMValueRef temp_ptr
;
754 temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
, swizzle
);
755 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
757 return bld
->base
.undef
;
761 case TGSI_FILE_SYSTEM_VALUE
:
762 assert(!reg
->Register
.Indirect
);
764 LLVMValueRef index
; /* index into the system value array */
765 LLVMValueRef scalar
, scalar_ptr
;
767 index
= lp_build_const_int32(gallivm
,
768 reg
->Register
.Index
* 4 + swizzle
);
770 scalar_ptr
= LLVMBuildGEP(builder
, bld
->system_values_array
,
772 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
774 res
= lp_build_broadcast_scalar(&bld
->base
, scalar
);
779 assert(0 && "invalid src register in emit_fetch()");
780 return bld
->base
.undef
;
783 if (reg
->Register
.Absolute
) {
784 res
= lp_build_abs( &bld
->base
, res
);
787 if (reg
->Register
.Negate
) {
788 res
= lp_build_negate( &bld
->base
, res
);
796 * Register fetch with derivatives.
800 struct lp_build_tgsi_soa_context
*bld
,
801 const struct tgsi_full_instruction
*inst
,
803 const unsigned chan_index
,
810 src
= emit_fetch(bld
, inst
, index
, chan_index
);
815 /* TODO: use interpolation coeffs for inputs */
818 *ddx
= lp_build_ddx(&bld
->base
, src
);
821 *ddy
= lp_build_ddy(&bld
->base
, src
);
829 emit_fetch_predicate(
830 struct lp_build_tgsi_soa_context
*bld
,
831 const struct tgsi_full_instruction
*inst
,
834 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
836 unsigned char swizzles
[4];
837 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
841 if (!inst
->Instruction
.Predicate
) {
842 FOR_EACH_CHANNEL( chan
) {
848 swizzles
[0] = inst
->Predicate
.SwizzleX
;
849 swizzles
[1] = inst
->Predicate
.SwizzleY
;
850 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
851 swizzles
[3] = inst
->Predicate
.SwizzleW
;
853 index
= inst
->Predicate
.Index
;
854 assert(index
< LP_MAX_TGSI_PREDS
);
856 FOR_EACH_CHANNEL( chan
) {
857 unsigned swizzle
= swizzles
[chan
];
860 * Only fetch the predicate register channels that are actually listed
863 if (!unswizzled
[swizzle
]) {
864 value
= LLVMBuildLoad(builder
,
865 bld
->preds
[index
][swizzle
], "");
868 * Convert the value to an integer mask.
870 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
871 * is needlessly causing two comparisons due to storing the intermediate
872 * result as float vector instead of an integer mask vector.
874 value
= lp_build_compare(bld
->base
.gallivm
,
879 if (inst
->Predicate
.Negate
) {
880 value
= LLVMBuildNot(builder
, value
, "");
883 unswizzled
[swizzle
] = value
;
885 value
= unswizzled
[swizzle
];
898 struct lp_build_tgsi_soa_context
*bld
,
899 const struct tgsi_full_instruction
*inst
,
905 struct gallivm_state
*gallivm
= bld
->base
.gallivm
;
906 LLVMBuilderRef builder
= gallivm
->builder
;
907 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
908 struct lp_build_context
*uint_bld
= &bld
->uint_bld
;
909 LLVMValueRef indirect_index
= NULL
;
911 switch( inst
->Instruction
.Saturate
) {
915 case TGSI_SAT_ZERO_ONE
:
916 value
= lp_build_max(&bld
->base
, value
, bld
->base
.zero
);
917 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
920 case TGSI_SAT_MINUS_PLUS_ONE
:
921 value
= lp_build_max(&bld
->base
, value
, lp_build_const_vec(bld
->base
.gallivm
, bld
->base
.type
, -1.0));
922 value
= lp_build_min(&bld
->base
, value
, bld
->base
.one
);
929 if (reg
->Register
.Indirect
) {
930 indirect_index
= get_indirect_index(bld
,
935 assert(reg
->Register
.Index
<= bld
->info
->file_max
[reg
->Register
.File
]);
938 switch( reg
->Register
.File
) {
939 case TGSI_FILE_OUTPUT
:
940 if (reg
->Register
.Indirect
) {
941 LLVMValueRef chan_vec
=
942 lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
);
943 LLVMValueRef length_vec
=
944 lp_build_const_int_vec(gallivm
, uint_bld
->type
, bld
->base
.type
.length
);
945 LLVMValueRef index_vec
; /* indexes into the temp registers */
946 LLVMValueRef outputs_array
;
947 LLVMValueRef pixel_offsets
;
948 LLVMTypeRef float_ptr_type
;
951 /* build pixel offset vector: {0, 1, 2, 3, ...} */
952 pixel_offsets
= uint_bld
->undef
;
953 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
954 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
955 pixel_offsets
= LLVMBuildInsertElement(builder
, pixel_offsets
,
959 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
960 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
961 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
962 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
963 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
966 LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
967 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
,
970 /* Scatter store values into temp registers */
971 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
972 &bld
->exec_mask
, pred
);
975 LLVMValueRef out_ptr
= get_output_ptr(bld
, reg
->Register
.Index
,
977 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
, out_ptr
);
981 case TGSI_FILE_TEMPORARY
:
982 if (reg
->Register
.Indirect
) {
983 LLVMValueRef chan_vec
=
984 lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
);
985 LLVMValueRef length_vec
=
986 lp_build_const_int_vec(gallivm
, uint_bld
->type
,
987 bld
->base
.type
.length
);
988 LLVMValueRef index_vec
; /* indexes into the temp registers */
989 LLVMValueRef temps_array
;
990 LLVMValueRef pixel_offsets
;
991 LLVMTypeRef float_ptr_type
;
994 /* build pixel offset vector: {0, 1, 2, 3, ...} */
995 pixel_offsets
= uint_bld
->undef
;
996 for (i
= 0; i
< bld
->base
.type
.length
; i
++) {
997 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
998 pixel_offsets
= LLVMBuildInsertElement(builder
, pixel_offsets
,
1002 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1003 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1004 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1005 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1006 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1009 LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1010 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
,
1011 float_ptr_type
, "");
1013 /* Scatter store values into temp registers */
1014 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1015 &bld
->exec_mask
, pred
);
1018 LLVMValueRef temp_ptr
= get_temp_ptr(bld
, reg
->Register
.Index
,
1020 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
, temp_ptr
);
1024 case TGSI_FILE_ADDRESS
:
1025 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
1026 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1029 case TGSI_FILE_PREDICATE
:
1030 lp_exec_mask_store(&bld
->exec_mask
, pred
, value
,
1031 bld
->preds
[reg
->Register
.Index
][chan_index
]);
1041 * High-level instruction translators.
1045 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
1046 const struct tgsi_full_instruction
*inst
,
1047 enum lp_build_tex_modifier modifier
,
1048 LLVMValueRef
*texel
)
1050 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
1052 LLVMValueRef lod_bias
, explicit_lod
;
1053 LLVMValueRef oow
= NULL
;
1054 LLVMValueRef coords
[3];
1055 LLVMValueRef ddx
[3];
1056 LLVMValueRef ddy
[3];
1057 unsigned num_coords
;
1060 if (!bld
->sampler
) {
1061 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1062 for (i
= 0; i
< 4; i
++) {
1063 texel
[i
] = bld
->base
.undef
;
1068 switch (inst
->Texture
.Texture
) {
1069 case TGSI_TEXTURE_1D
:
1072 case TGSI_TEXTURE_1D_ARRAY
:
1073 case TGSI_TEXTURE_2D
:
1074 case TGSI_TEXTURE_RECT
:
1077 case TGSI_TEXTURE_SHADOW1D
:
1078 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1079 case TGSI_TEXTURE_SHADOW2D
:
1080 case TGSI_TEXTURE_SHADOWRECT
:
1081 case TGSI_TEXTURE_2D_ARRAY
:
1082 case TGSI_TEXTURE_3D
:
1083 case TGSI_TEXTURE_CUBE
:
1086 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1094 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
1095 lod_bias
= emit_fetch( bld
, inst
, 0, 3 );
1096 explicit_lod
= NULL
;
1098 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
1100 explicit_lod
= emit_fetch( bld
, inst
, 0, 3 );
1104 explicit_lod
= NULL
;
1107 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
1108 oow
= emit_fetch( bld
, inst
, 0, 3 );
1109 oow
= lp_build_rcp(&bld
->base
, oow
);
1112 for (i
= 0; i
< num_coords
; i
++) {
1113 coords
[i
] = emit_fetch( bld
, inst
, 0, i
);
1114 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
1115 coords
[i
] = lp_build_mul(&bld
->base
, coords
[i
], oow
);
1117 for (i
= num_coords
; i
< 3; i
++) {
1118 coords
[i
] = bld
->base
.undef
;
1121 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
1122 LLVMValueRef index0
= lp_build_const_int32(bld
->base
.gallivm
, 0);
1123 for (i
= 0; i
< num_coords
; i
++) {
1124 LLVMValueRef src1
= emit_fetch( bld
, inst
, 1, i
);
1125 LLVMValueRef src2
= emit_fetch( bld
, inst
, 2, i
);
1126 ddx
[i
] = LLVMBuildExtractElement(builder
, src1
, index0
, "");
1127 ddy
[i
] = LLVMBuildExtractElement(builder
, src2
, index0
, "");
1129 unit
= inst
->Src
[3].Register
.Index
;
1131 for (i
= 0; i
< num_coords
; i
++) {
1132 ddx
[i
] = lp_build_scalar_ddx( &bld
->base
, coords
[i
] );
1133 ddy
[i
] = lp_build_scalar_ddy( &bld
->base
, coords
[i
] );
1135 unit
= inst
->Src
[1].Register
.Index
;
1137 for (i
= num_coords
; i
< 3; i
++) {
1138 ddx
[i
] = LLVMGetUndef(bld
->base
.elem_type
);
1139 ddy
[i
] = LLVMGetUndef(bld
->base
.elem_type
);
1142 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
1145 unit
, num_coords
, coords
,
1147 lod_bias
, explicit_lod
,
1152 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
1157 for (i
= 0; i
< 5; i
++) {
1160 if (pc
+ i
>= bld
->info
->num_instructions
)
1163 opcode
= bld
->instructions
[pc
+ i
].Instruction
.Opcode
;
1165 if (opcode
== TGSI_OPCODE_END
)
1168 if (opcode
== TGSI_OPCODE_TEX
||
1169 opcode
== TGSI_OPCODE_TXP
||
1170 opcode
== TGSI_OPCODE_TXD
||
1171 opcode
== TGSI_OPCODE_TXB
||
1172 opcode
== TGSI_OPCODE_TXL
||
1173 opcode
== TGSI_OPCODE_TXF
||
1174 opcode
== TGSI_OPCODE_TXQ
||
1175 opcode
== TGSI_OPCODE_CAL
||
1176 opcode
== TGSI_OPCODE_CALLNZ
||
1177 opcode
== TGSI_OPCODE_IF
||
1178 opcode
== TGSI_OPCODE_IFC
||
1179 opcode
== TGSI_OPCODE_BGNLOOP
||
1180 opcode
== TGSI_OPCODE_SWITCH
)
1190 * Kill fragment if any of the src register values are negative.
1194 struct lp_build_tgsi_soa_context
*bld
,
1195 const struct tgsi_full_instruction
*inst
,
1198 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
1199 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1200 LLVMValueRef terms
[NUM_CHANNELS
];
1202 unsigned chan_index
;
1204 memset(&terms
, 0, sizeof terms
);
1206 FOR_EACH_CHANNEL( chan_index
) {
1209 /* Unswizzle channel */
1210 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
1212 /* Check if the component has not been already tested. */
1213 assert(swizzle
< NUM_CHANNELS
);
1214 if( !terms
[swizzle
] )
1215 /* TODO: change the comparison operator instead of setting the sign */
1216 terms
[swizzle
] = emit_fetch(bld
, inst
, 0, chan_index
);
1220 FOR_EACH_CHANNEL( chan_index
) {
1221 if(terms
[chan_index
]) {
1222 LLVMValueRef chan_mask
;
1225 * If term < 0 then mask = 0 else mask = ~0.
1227 chan_mask
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->base
.zero
);
1230 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
1237 lp_build_mask_update(bld
->mask
, mask
);
1239 if (!near_end_of_shader(bld
, pc
))
1240 lp_build_mask_check(bld
->mask
);
1246 * Predicated fragment kill.
1247 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1248 * The only predication is the execution mask which will apply if
1249 * we're inside a loop or conditional.
1252 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
1253 const struct tgsi_full_instruction
*inst
,
1256 LLVMBuilderRef builder
= bld
->base
.gallivm
->builder
;
1259 /* For those channels which are "alive", disable fragment shader
1262 if (bld
->exec_mask
.has_mask
) {
1263 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
1266 LLVMValueRef zero
= LLVMConstNull(bld
->base
.int_vec_type
);
1270 lp_build_mask_update(bld
->mask
, mask
);
1272 if (!near_end_of_shader(bld
, pc
))
1273 lp_build_mask_check(bld
->mask
);
1278 * Emit code which will dump the value of all the temporary registers
1282 emit_dump_temps(struct lp_build_tgsi_soa_context
*bld
)
1284 struct gallivm_state
*gallivm
= bld
->base
.gallivm
;
1285 LLVMBuilderRef builder
= gallivm
->builder
;
1286 LLVMValueRef temp_ptr
;
1287 LLVMValueRef i0
= lp_build_const_int32(gallivm
, 0);
1288 LLVMValueRef i1
= lp_build_const_int32(gallivm
, 1);
1289 LLVMValueRef i2
= lp_build_const_int32(gallivm
, 2);
1290 LLVMValueRef i3
= lp_build_const_int32(gallivm
, 3);
1292 int n
= bld
->info
->file_max
[TGSI_FILE_TEMPORARY
];
1294 for (index
= 0; index
< n
; index
++) {
1295 LLVMValueRef idx
= lp_build_const_int32(gallivm
, index
);
1296 LLVMValueRef v
[4][4], res
;
1299 lp_build_printf(gallivm
, "TEMP[%d]:\n", idx
);
1301 for (chan
= 0; chan
< 4; chan
++) {
1302 temp_ptr
= get_temp_ptr(bld
, index
, chan
);
1303 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1304 v
[chan
][0] = LLVMBuildExtractElement(builder
, res
, i0
, "");
1305 v
[chan
][1] = LLVMBuildExtractElement(builder
, res
, i1
, "");
1306 v
[chan
][2] = LLVMBuildExtractElement(builder
, res
, i2
, "");
1307 v
[chan
][3] = LLVMBuildExtractElement(builder
, res
, i3
, "");
1310 lp_build_printf(gallivm
, " X: %f %f %f %f\n",
1311 v
[0][0], v
[0][1], v
[0][2], v
[0][3]);
1312 lp_build_printf(gallivm
, " Y: %f %f %f %f\n",
1313 v
[1][0], v
[1][1], v
[1][2], v
[1][3]);
1314 lp_build_printf(gallivm
, " Z: %f %f %f %f\n",
1315 v
[2][0], v
[2][1], v
[2][2], v
[2][3]);
1316 lp_build_printf(gallivm
, " W: %f %f %f %f\n",
1317 v
[3][0], v
[3][1], v
[3][2], v
[3][3]);
1325 struct lp_build_tgsi_soa_context
*bld
,
1326 const struct tgsi_full_declaration
*decl
)
1328 struct gallivm_state
*gallivm
= bld
->base
.gallivm
;
1329 LLVMTypeRef vec_type
= bld
->base
.vec_type
;
1330 const unsigned first
= decl
->Range
.First
;
1331 const unsigned last
= decl
->Range
.Last
;
1334 for (idx
= first
; idx
<= last
; ++idx
) {
1335 assert(last
<= bld
->info
->file_max
[decl
->Declaration
.File
]);
1336 switch (decl
->Declaration
.File
) {
1337 case TGSI_FILE_TEMPORARY
:
1338 assert(idx
< LP_MAX_TGSI_TEMPS
);
1339 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
1340 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1341 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
1345 case TGSI_FILE_OUTPUT
:
1346 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
1347 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1348 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
1349 vec_type
, "output");
1353 case TGSI_FILE_ADDRESS
:
1354 assert(idx
< LP_MAX_TGSI_ADDRS
);
1355 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1356 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "addr");
1359 case TGSI_FILE_PREDICATE
:
1360 assert(idx
< LP_MAX_TGSI_PREDS
);
1361 for (i
= 0; i
< NUM_CHANNELS
; i
++)
1362 bld
->preds
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
,
1367 /* don't need to declare other vars */
1375 * Emit LLVM for one TGSI instruction.
1376 * \param return TRUE for success, FALSE otherwise
1380 struct lp_build_tgsi_soa_context
*bld
,
1381 const struct tgsi_full_instruction
*inst
,
1382 const struct tgsi_opcode_info
*info
,
1385 unsigned chan_index
;
1386 LLVMValueRef src0
, src1
, src2
;
1387 LLVMValueRef tmp0
, tmp1
, tmp2
;
1388 LLVMValueRef tmp3
= NULL
;
1389 LLVMValueRef tmp4
= NULL
;
1390 LLVMValueRef tmp5
= NULL
;
1391 LLVMValueRef tmp6
= NULL
;
1392 LLVMValueRef tmp7
= NULL
;
1394 LLVMValueRef dst0
[NUM_CHANNELS
];
1397 * Stores and write masks are handled in a general fashion after the long
1398 * instruction opcode switch statement.
1400 * Although not stricitly necessary, we avoid generating instructions for
1401 * channels which won't be stored, in cases where's that easy. For some
1402 * complex instructions, like texture sampling, it is more convenient to
1403 * assume a full writemask and then let LLVM optimization passes eliminate
1409 assert(info
->num_dst
<= 1);
1410 if (info
->num_dst
) {
1411 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1412 dst0
[chan_index
] = bld
->base
.undef
;
1416 switch (inst
->Instruction
.Opcode
) {
1417 case TGSI_OPCODE_ARL
:
1418 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1419 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1420 tmp0
= lp_build_floor(&bld
->base
, tmp0
);
1421 dst0
[chan_index
] = tmp0
;
1425 case TGSI_OPCODE_MOV
:
1426 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1427 dst0
[chan_index
] = emit_fetch( bld
, inst
, 0, chan_index
);
1431 case TGSI_OPCODE_LIT
:
1432 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) ) {
1433 dst0
[TGSI_CHAN_X
] = bld
->base
.one
;
1435 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) ) {
1436 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1437 dst0
[TGSI_CHAN_Y
] = lp_build_max( &bld
->base
, src0
, bld
->base
.zero
);
1439 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) ) {
1440 /* XMM[1] = SrcReg[0].yyyy */
1441 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1442 /* XMM[1] = max(XMM[1], 0) */
1443 tmp1
= lp_build_max( &bld
->base
, tmp1
, bld
->base
.zero
);
1444 /* XMM[2] = SrcReg[0].wwww */
1445 tmp2
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_W
);
1446 tmp1
= lp_build_pow( &bld
->base
, tmp1
, tmp2
);
1447 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1448 tmp2
= lp_build_cmp(&bld
->base
, PIPE_FUNC_GREATER
, tmp0
, bld
->base
.zero
);
1449 dst0
[TGSI_CHAN_Z
] = lp_build_select(&bld
->base
, tmp2
, tmp1
, bld
->base
.zero
);
1451 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
) ) {
1452 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
1456 case TGSI_OPCODE_RCP
:
1457 /* TGSI_OPCODE_RECIP */
1458 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1459 res
= lp_build_rcp(&bld
->base
, src0
);
1460 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1461 dst0
[chan_index
] = res
;
1465 case TGSI_OPCODE_RSQ
:
1466 /* TGSI_OPCODE_RECIPSQRT */
1467 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1468 src0
= lp_build_abs(&bld
->base
, src0
);
1469 res
= lp_build_rsqrt(&bld
->base
, src0
);
1470 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1471 dst0
[chan_index
] = res
;
1475 case TGSI_OPCODE_EXP
:
1476 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) ||
1477 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) ||
1478 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
)) {
1479 LLVMValueRef
*p_exp2_int_part
= NULL
;
1480 LLVMValueRef
*p_frac_part
= NULL
;
1481 LLVMValueRef
*p_exp2
= NULL
;
1483 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1485 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
))
1486 p_exp2_int_part
= &tmp0
;
1487 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
))
1488 p_frac_part
= &tmp1
;
1489 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
))
1492 lp_build_exp2_approx(&bld
->base
, src0
, p_exp2_int_part
, p_frac_part
, p_exp2
);
1494 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
))
1495 dst0
[TGSI_CHAN_X
] = tmp0
;
1496 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
))
1497 dst0
[TGSI_CHAN_Y
] = tmp1
;
1498 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
))
1499 dst0
[TGSI_CHAN_Z
] = tmp2
;
1502 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
)) {
1503 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
1507 case TGSI_OPCODE_LOG
:
1508 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) ||
1509 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) ||
1510 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
)) {
1511 LLVMValueRef
*p_floor_log2
= NULL
;
1512 LLVMValueRef
*p_exp
= NULL
;
1513 LLVMValueRef
*p_log2
= NULL
;
1515 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1516 src0
= lp_build_abs( &bld
->base
, src0
);
1518 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
))
1519 p_floor_log2
= &tmp0
;
1520 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
))
1522 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
))
1525 lp_build_log2_approx(&bld
->base
, src0
, p_exp
, p_floor_log2
, p_log2
);
1527 /* dst.x = floor(lg2(abs(src.x))) */
1528 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
))
1529 dst0
[TGSI_CHAN_X
] = tmp0
;
1530 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1531 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
)) {
1532 dst0
[TGSI_CHAN_Y
] = lp_build_div( &bld
->base
, src0
, tmp1
);
1534 /* dst.z = lg2(abs(src.x)) */
1535 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
))
1536 dst0
[TGSI_CHAN_Z
] = tmp2
;
1539 if (IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
)) {
1540 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
1544 case TGSI_OPCODE_MUL
:
1545 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1546 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1547 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1548 dst0
[chan_index
] = lp_build_mul(&bld
->base
, src0
, src1
);
1552 case TGSI_OPCODE_ADD
:
1553 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1554 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1555 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1556 dst0
[chan_index
] = lp_build_add(&bld
->base
, src0
, src1
);
1560 case TGSI_OPCODE_DP3
:
1561 /* TGSI_OPCODE_DOT3 */
1562 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1563 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
);
1564 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1565 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1566 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
);
1567 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1568 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1569 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Z
);
1570 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Z
);
1571 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1572 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1573 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1574 dst0
[chan_index
] = tmp0
;
1578 case TGSI_OPCODE_DP4
:
1579 /* TGSI_OPCODE_DOT4 */
1580 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1581 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
);
1582 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1583 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1584 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
);
1585 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1586 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1587 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Z
);
1588 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Z
);
1589 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1590 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1591 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_W
);
1592 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_W
);
1593 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1594 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1595 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1596 dst0
[chan_index
] = tmp0
;
1600 case TGSI_OPCODE_DST
:
1601 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) {
1602 dst0
[TGSI_CHAN_X
] = bld
->base
.one
;
1604 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) {
1605 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1606 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
);
1607 dst0
[TGSI_CHAN_Y
] = lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1609 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) {
1610 dst0
[TGSI_CHAN_Z
] = emit_fetch( bld
, inst
, 0, TGSI_CHAN_Z
);
1612 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
) {
1613 dst0
[TGSI_CHAN_W
] = emit_fetch( bld
, inst
, 1, TGSI_CHAN_W
);
1617 case TGSI_OPCODE_MIN
:
1618 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1619 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1620 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1621 dst0
[chan_index
] = lp_build_min( &bld
->base
, src0
, src1
);
1625 case TGSI_OPCODE_MAX
:
1626 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1627 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1628 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1629 dst0
[chan_index
] = lp_build_max( &bld
->base
, src0
, src1
);
1633 case TGSI_OPCODE_SLT
:
1634 /* TGSI_OPCODE_SETLT */
1635 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1636 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1637 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1638 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, src1
);
1639 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1643 case TGSI_OPCODE_SGE
:
1644 /* TGSI_OPCODE_SETGE */
1645 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1646 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1647 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1648 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GEQUAL
, src0
, src1
);
1649 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1653 case TGSI_OPCODE_MAD
:
1654 /* TGSI_OPCODE_MADD */
1655 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1656 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1657 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1658 tmp2
= emit_fetch( bld
, inst
, 2, chan_index
);
1659 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1660 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp2
);
1661 dst0
[chan_index
] = tmp0
;
1665 case TGSI_OPCODE_SUB
:
1666 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1667 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1668 tmp1
= emit_fetch( bld
, inst
, 1, chan_index
);
1669 dst0
[chan_index
] = lp_build_sub( &bld
->base
, tmp0
, tmp1
);
1673 case TGSI_OPCODE_LRP
:
1674 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1675 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1676 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1677 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1678 tmp0
= lp_build_sub( &bld
->base
, src1
, src2
);
1679 tmp0
= lp_build_mul( &bld
->base
, src0
, tmp0
);
1680 dst0
[chan_index
] = lp_build_add( &bld
->base
, tmp0
, src2
);
1684 case TGSI_OPCODE_CND
:
1685 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1686 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1687 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1688 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1689 tmp1
= lp_build_const_vec(bld
->base
.gallivm
, bld
->base
.type
, 0.5);
1690 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src2
, tmp1
);
1691 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src0
, src1
);
1695 case TGSI_OPCODE_DP2A
:
1696 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
); /* xmm0 = src[0].x */
1697 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
); /* xmm1 = src[1].x */
1698 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
1699 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
); /* xmm1 = src[0].y */
1700 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
); /* xmm2 = src[1].y */
1701 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
1702 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1703 tmp1
= emit_fetch( bld
, inst
, 2, TGSI_CHAN_X
); /* xmm1 = src[2].x */
1704 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
1705 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1706 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
1710 case TGSI_OPCODE_FRC
:
1711 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1712 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1713 tmp0
= lp_build_floor(&bld
->base
, src0
);
1714 tmp0
= lp_build_sub(&bld
->base
, src0
, tmp0
);
1715 dst0
[chan_index
] = tmp0
;
1719 case TGSI_OPCODE_CLAMP
:
1720 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1721 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1722 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1723 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
1724 tmp0
= lp_build_max(&bld
->base
, tmp0
, src1
);
1725 tmp0
= lp_build_min(&bld
->base
, tmp0
, src2
);
1726 dst0
[chan_index
] = tmp0
;
1730 case TGSI_OPCODE_FLR
:
1731 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1732 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1733 dst0
[chan_index
] = lp_build_floor(&bld
->base
, tmp0
);
1737 case TGSI_OPCODE_ROUND
:
1738 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1739 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1740 dst0
[chan_index
] = lp_build_round(&bld
->base
, tmp0
);
1744 case TGSI_OPCODE_EX2
: {
1745 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1746 tmp0
= lp_build_exp2( &bld
->base
, tmp0
);
1747 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1748 dst0
[chan_index
] = tmp0
;
1753 case TGSI_OPCODE_LG2
:
1754 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1755 tmp0
= lp_build_log2( &bld
->base
, tmp0
);
1756 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1757 dst0
[chan_index
] = tmp0
;
1761 case TGSI_OPCODE_POW
:
1762 src0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1763 src1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
);
1764 res
= lp_build_pow( &bld
->base
, src0
, src1
);
1765 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1766 dst0
[chan_index
] = res
;
1770 case TGSI_OPCODE_XPD
:
1771 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) ||
1772 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) ) {
1773 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Z
);
1774 tmp3
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Z
);
1776 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) ||
1777 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) ) {
1778 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1779 tmp4
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
);
1781 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) {
1783 tmp2
= lp_build_mul( &bld
->base
, tmp2
, tmp1
);
1785 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1786 tmp2
= lp_build_sub( &bld
->base
, tmp2
, tmp5
);
1787 dst0
[TGSI_CHAN_X
] = tmp2
;
1789 if( IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) ||
1790 IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) ) {
1791 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
);
1792 tmp5
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1794 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) {
1795 tmp3
= lp_build_mul( &bld
->base
, tmp3
, tmp2
);
1796 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp5
);
1797 tmp3
= lp_build_sub( &bld
->base
, tmp3
, tmp1
);
1798 dst0
[TGSI_CHAN_Y
] = tmp3
;
1800 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) {
1801 tmp5
= lp_build_mul( &bld
->base
, tmp5
, tmp4
);
1802 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp2
);
1803 tmp5
= lp_build_sub( &bld
->base
, tmp5
, tmp0
);
1804 dst0
[TGSI_CHAN_Z
] = tmp5
;
1806 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
) {
1807 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
1811 case TGSI_OPCODE_ABS
:
1812 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1813 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1814 dst0
[chan_index
] = lp_build_abs( &bld
->base
, tmp0
);
1818 case TGSI_OPCODE_RCC
:
1823 case TGSI_OPCODE_DPH
:
1824 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1825 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
);
1826 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
);
1827 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
);
1828 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
);
1829 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1830 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1831 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Z
);
1832 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Z
);
1833 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
);
1834 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1835 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_W
);
1836 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
1837 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1838 dst0
[chan_index
] = tmp0
;
1842 case TGSI_OPCODE_COS
:
1843 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1844 tmp0
= lp_build_cos( &bld
->base
, tmp0
);
1845 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1846 dst0
[chan_index
] = tmp0
;
1850 case TGSI_OPCODE_DDX
:
1851 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1852 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, &dst0
[chan_index
], NULL
);
1856 case TGSI_OPCODE_DDY
:
1857 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1858 emit_fetch_deriv( bld
, inst
, 0, chan_index
, NULL
, NULL
, &dst0
[chan_index
]);
1862 case TGSI_OPCODE_KILP
:
1863 /* predicated kill */
1864 emit_kilp( bld
, inst
, (*pc
)-1 );
1867 case TGSI_OPCODE_KIL
:
1868 /* conditional kill */
1869 emit_kil( bld
, inst
, (*pc
)-1 );
1872 case TGSI_OPCODE_PK2H
:
1876 case TGSI_OPCODE_PK2US
:
1880 case TGSI_OPCODE_PK4B
:
1884 case TGSI_OPCODE_PK4UB
:
1888 case TGSI_OPCODE_RFL
:
1892 case TGSI_OPCODE_SEQ
:
1893 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1894 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1895 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1896 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_EQUAL
, src0
, src1
);
1897 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1901 case TGSI_OPCODE_SFL
:
1902 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1903 dst0
[chan_index
] = bld
->base
.zero
;
1907 case TGSI_OPCODE_SGT
:
1908 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1909 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1910 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1911 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_GREATER
, src0
, src1
);
1912 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1916 case TGSI_OPCODE_SIN
:
1917 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
1918 tmp0
= lp_build_sin( &bld
->base
, tmp0
);
1919 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1920 dst0
[chan_index
] = tmp0
;
1924 case TGSI_OPCODE_SLE
:
1925 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1926 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1927 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1928 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LEQUAL
, src0
, src1
);
1929 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1933 case TGSI_OPCODE_SNE
:
1934 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1935 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
1936 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
1937 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_NOTEQUAL
, src0
, src1
);
1938 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, bld
->base
.one
, bld
->base
.zero
);
1942 case TGSI_OPCODE_STR
:
1943 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1944 dst0
[chan_index
] = bld
->base
.one
;
1948 case TGSI_OPCODE_TEX
:
1949 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_NONE
, dst0
);
1952 case TGSI_OPCODE_TXD
:
1953 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
, dst0
);
1956 case TGSI_OPCODE_UP2H
:
1962 case TGSI_OPCODE_UP2US
:
1968 case TGSI_OPCODE_UP4B
:
1974 case TGSI_OPCODE_UP4UB
:
1980 case TGSI_OPCODE_X2D
:
1986 case TGSI_OPCODE_ARA
:
1992 case TGSI_OPCODE_ARR
:
1993 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1994 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
1995 tmp0
= lp_build_round(&bld
->base
, tmp0
);
1996 dst0
[chan_index
] = tmp0
;
2000 case TGSI_OPCODE_BRA
:
2006 case TGSI_OPCODE_CAL
:
2007 lp_exec_mask_call(&bld
->exec_mask
,
2013 case TGSI_OPCODE_RET
:
2014 lp_exec_mask_ret(&bld
->exec_mask
, pc
);
2017 case TGSI_OPCODE_END
:
2020 emit_dump_temps(bld
);
2025 case TGSI_OPCODE_SSG
:
2026 /* TGSI_OPCODE_SGN */
2027 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2028 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
2029 dst0
[chan_index
] = lp_build_sgn( &bld
->base
, tmp0
);
2033 case TGSI_OPCODE_CMP
:
2034 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2035 src0
= emit_fetch( bld
, inst
, 0, chan_index
);
2036 src1
= emit_fetch( bld
, inst
, 1, chan_index
);
2037 src2
= emit_fetch( bld
, inst
, 2, chan_index
);
2038 tmp0
= lp_build_cmp( &bld
->base
, PIPE_FUNC_LESS
, src0
, bld
->base
.zero
);
2039 dst0
[chan_index
] = lp_build_select( &bld
->base
, tmp0
, src1
, src2
);
2043 case TGSI_OPCODE_SCS
:
2044 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_X
) {
2045 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
2046 dst0
[TGSI_CHAN_X
] = lp_build_cos( &bld
->base
, tmp0
);
2048 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Y
) {
2049 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
);
2050 dst0
[TGSI_CHAN_Y
] = lp_build_sin( &bld
->base
, tmp0
);
2052 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_Z
) {
2053 dst0
[TGSI_CHAN_Z
] = bld
->base
.zero
;
2055 IF_IS_DST0_CHANNEL_ENABLED( inst
, TGSI_CHAN_W
) {
2056 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
2060 case TGSI_OPCODE_TXB
:
2061 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
, dst0
);
2064 case TGSI_OPCODE_NRM
:
2066 case TGSI_OPCODE_NRM4
:
2067 /* 3 or 4-component normalization */
2069 uint dims
= (inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
2071 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_X
) ||
2072 IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Y
) ||
2073 IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Z
) ||
2074 (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_W
) && dims
== 4)) {
2076 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
2079 /* xmm0 = src.x * src.x */
2080 tmp0
= emit_fetch(bld
, inst
, 0, TGSI_CHAN_X
);
2081 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_X
)) {
2084 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp0
);
2087 /* xmm0 = xmm0 + src.y * src.y */
2088 tmp1
= emit_fetch(bld
, inst
, 0, TGSI_CHAN_Y
);
2089 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Y
)) {
2092 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
2093 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
2096 /* xmm0 = xmm0 + src.z * src.z */
2097 tmp1
= emit_fetch(bld
, inst
, 0, TGSI_CHAN_Z
);
2098 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Z
)) {
2101 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
2102 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
2106 /* xmm0 = xmm0 + src.w * src.w */
2107 tmp1
= emit_fetch(bld
, inst
, 0, TGSI_CHAN_W
);
2108 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_W
)) {
2111 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp1
);
2112 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
);
2115 /* xmm1 = 1 / sqrt(xmm0) */
2116 tmp1
= lp_build_rsqrt( &bld
->base
, tmp0
);
2118 /* dst.x = xmm1 * src.x */
2119 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_X
)) {
2120 dst0
[TGSI_CHAN_X
] = lp_build_mul( &bld
->base
, tmp4
, tmp1
);
2123 /* dst.y = xmm1 * src.y */
2124 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Y
)) {
2125 dst0
[TGSI_CHAN_Y
] = lp_build_mul( &bld
->base
, tmp5
, tmp1
);
2128 /* dst.z = xmm1 * src.z */
2129 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_Z
)) {
2130 dst0
[TGSI_CHAN_Z
] = lp_build_mul( &bld
->base
, tmp6
, tmp1
);
2133 /* dst.w = xmm1 * src.w */
2134 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_X
) && dims
== 4) {
2135 dst0
[TGSI_CHAN_W
] = lp_build_mul( &bld
->base
, tmp7
, tmp1
);
2140 if (IS_DST0_CHANNEL_ENABLED(inst
, TGSI_CHAN_W
) && dims
== 3) {
2141 dst0
[TGSI_CHAN_W
] = bld
->base
.one
;
2146 case TGSI_OPCODE_DIV
:
2152 case TGSI_OPCODE_DP2
:
2153 tmp0
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_X
); /* xmm0 = src[0].x */
2154 tmp1
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_X
); /* xmm1 = src[1].x */
2155 tmp0
= lp_build_mul( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 * xmm1 */
2156 tmp1
= emit_fetch( bld
, inst
, 0, TGSI_CHAN_Y
); /* xmm1 = src[0].y */
2157 tmp2
= emit_fetch( bld
, inst
, 1, TGSI_CHAN_Y
); /* xmm2 = src[1].y */
2158 tmp1
= lp_build_mul( &bld
->base
, tmp1
, tmp2
); /* xmm1 = xmm1 * xmm2 */
2159 tmp0
= lp_build_add( &bld
->base
, tmp0
, tmp1
); /* xmm0 = xmm0 + xmm1 */
2160 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2161 dst0
[chan_index
] = tmp0
; /* dest[ch] = xmm0 */
2165 case TGSI_OPCODE_TXL
:
2166 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
, dst0
);
2169 case TGSI_OPCODE_TXP
:
2170 emit_tex( bld
, inst
, LP_BLD_TEX_MODIFIER_PROJECTED
, dst0
);
2173 case TGSI_OPCODE_BRK
:
2174 lp_exec_break(&bld
->exec_mask
);
2177 case TGSI_OPCODE_IF
:
2178 tmp0
= emit_fetch(bld
, inst
, 0, TGSI_CHAN_X
);
2179 tmp0
= lp_build_cmp(&bld
->base
, PIPE_FUNC_NOTEQUAL
,
2180 tmp0
, bld
->base
.zero
);
2181 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp0
);
2184 case TGSI_OPCODE_BGNLOOP
:
2185 lp_exec_bgnloop(&bld
->exec_mask
);
2188 case TGSI_OPCODE_BGNSUB
:
2189 lp_exec_mask_bgnsub(&bld
->exec_mask
);
2192 case TGSI_OPCODE_ELSE
:
2193 lp_exec_mask_cond_invert(&bld
->exec_mask
);
2196 case TGSI_OPCODE_ENDIF
:
2197 lp_exec_mask_cond_pop(&bld
->exec_mask
);
2200 case TGSI_OPCODE_ENDLOOP
:
2201 lp_exec_endloop(bld
->base
.gallivm
, &bld
->exec_mask
);
2204 case TGSI_OPCODE_ENDSUB
:
2205 lp_exec_mask_endsub(&bld
->exec_mask
, pc
);
2208 case TGSI_OPCODE_PUSHA
:
2214 case TGSI_OPCODE_POPA
:
2220 case TGSI_OPCODE_CEIL
:
2221 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2222 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
2223 dst0
[chan_index
] = lp_build_ceil(&bld
->base
, tmp0
);
2227 case TGSI_OPCODE_I2F
:
2233 case TGSI_OPCODE_NOT
:
2239 case TGSI_OPCODE_TRUNC
:
2240 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2241 tmp0
= emit_fetch( bld
, inst
, 0, chan_index
);
2242 dst0
[chan_index
] = lp_build_trunc(&bld
->base
, tmp0
);
2246 case TGSI_OPCODE_SHL
:
2252 case TGSI_OPCODE_ISHR
:
2258 case TGSI_OPCODE_AND
:
2264 case TGSI_OPCODE_OR
:
2270 case TGSI_OPCODE_MOD
:
2276 case TGSI_OPCODE_XOR
:
2282 case TGSI_OPCODE_SAD
:
2288 case TGSI_OPCODE_TXF
:
2294 case TGSI_OPCODE_TXQ
:
2300 case TGSI_OPCODE_CONT
:
2301 lp_exec_continue(&bld
->exec_mask
);
2304 case TGSI_OPCODE_EMIT
:
2308 case TGSI_OPCODE_ENDPRIM
:
2312 case TGSI_OPCODE_NOP
:
2320 LLVMValueRef pred
[NUM_CHANNELS
];
2322 emit_fetch_predicate( bld
, inst
, pred
);
2324 FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
2325 emit_store( bld
, inst
, 0, chan_index
, pred
[chan_index
], dst0
[chan_index
]);
2334 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
2335 const struct tgsi_token
*tokens
,
2336 struct lp_type type
,
2337 struct lp_build_mask_context
*mask
,
2338 LLVMValueRef consts_ptr
,
2339 LLVMValueRef system_values_array
,
2340 const LLVMValueRef
*pos
,
2341 const LLVMValueRef (*inputs
)[NUM_CHANNELS
],
2342 LLVMValueRef (*outputs
)[NUM_CHANNELS
],
2343 struct lp_build_sampler_soa
*sampler
,
2344 const struct tgsi_shader_info
*info
)
2346 struct lp_build_tgsi_soa_context bld
;
2347 struct tgsi_parse_context parse
;
2348 uint num_immediates
= 0;
2349 uint num_instructions
= 0;
2353 struct lp_type res_type
;
2355 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
2356 memset(&res_type
, 0, sizeof res_type
);
2357 res_type
.width
= type
.width
;
2358 res_type
.length
= type
.length
;
2361 /* Setup build context */
2362 memset(&bld
, 0, sizeof bld
);
2363 lp_build_context_init(&bld
.base
, gallivm
, type
);
2364 lp_build_context_init(&bld
.uint_bld
, gallivm
, lp_uint_type(type
));
2365 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
2368 bld
.inputs
= inputs
;
2369 bld
.outputs
= outputs
;
2370 bld
.consts_ptr
= consts_ptr
;
2371 bld
.sampler
= sampler
;
2373 bld
.indirect_files
= info
->indirect_files
;
2374 bld
.instructions
= (struct tgsi_full_instruction
*)
2375 MALLOC( LP_MAX_INSTRUCTIONS
* sizeof(struct tgsi_full_instruction
) );
2376 bld
.max_instructions
= LP_MAX_INSTRUCTIONS
;
2378 if (!bld
.instructions
) {
2382 lp_exec_mask_init(&bld
.exec_mask
, &bld
.base
);
2384 if (bld
.indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
2385 LLVMValueRef array_size
=
2386 lp_build_const_int32(gallivm
,
2387 info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4);
2388 bld
.temps_array
= lp_build_array_alloca(gallivm
,
2389 bld
.base
.vec_type
, array_size
,
2393 if (bld
.indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
2394 LLVMValueRef array_size
=
2395 lp_build_const_int32(gallivm
,
2396 info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
2397 bld
.outputs_array
= lp_build_array_alloca(gallivm
,
2398 bld
.base
.vec_type
, array_size
,
2402 /* If we have indirect addressing in inputs we need to copy them into
2403 * our alloca array to be able to iterate over them */
2404 if (bld
.indirect_files
& (1 << TGSI_FILE_INPUT
)) {
2405 unsigned index
, chan
;
2406 LLVMTypeRef vec_type
= bld
.base
.vec_type
;
2407 LLVMValueRef array_size
=
2408 lp_build_const_int32(gallivm
, info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
2409 bld
.inputs_array
= lp_build_array_alloca(gallivm
,
2410 vec_type
, array_size
,
2413 assert(info
->num_inputs
<= info
->file_max
[TGSI_FILE_INPUT
] + 1);
2415 for (index
= 0; index
< info
->num_inputs
; ++index
) {
2416 for (chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
2417 LLVMValueRef lindex
=
2418 lp_build_const_int32(gallivm
, index
* 4 + chan
);
2419 LLVMValueRef input_ptr
=
2420 LLVMBuildGEP(gallivm
->builder
, bld
.inputs_array
,
2422 LLVMValueRef value
= bld
.inputs
[index
][chan
];
2424 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
2429 bld
.system_values_array
= system_values_array
;
2431 tgsi_parse_init( &parse
, tokens
);
2433 while( !tgsi_parse_end_of_tokens( &parse
) ) {
2434 tgsi_parse_token( &parse
);
2436 switch( parse
.FullToken
.Token
.Type
) {
2437 case TGSI_TOKEN_TYPE_DECLARATION
:
2438 /* Inputs already interpolated */
2439 emit_declaration( &bld
, &parse
.FullToken
.FullDeclaration
);
2442 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2444 /* save expanded instruction */
2445 if (num_instructions
== bld
.max_instructions
) {
2446 struct tgsi_full_instruction
*instructions
;
2447 instructions
= REALLOC(bld
.instructions
,
2448 bld
.max_instructions
2449 * sizeof(struct tgsi_full_instruction
),
2450 (bld
.max_instructions
+ LP_MAX_INSTRUCTIONS
)
2451 * sizeof(struct tgsi_full_instruction
));
2452 if (!instructions
) {
2455 bld
.instructions
= instructions
;
2456 bld
.max_instructions
+= LP_MAX_INSTRUCTIONS
;
2459 memcpy(bld
.instructions
+ num_instructions
,
2460 &parse
.FullToken
.FullInstruction
,
2461 sizeof(bld
.instructions
[0]));
2468 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2469 /* simply copy the immediate values into the next immediates[] slot */
2471 const uint size
= parse
.FullToken
.FullImmediate
.Immediate
.NrTokens
- 1;
2473 assert(num_immediates
< LP_MAX_TGSI_IMMEDIATES
);
2474 for( i
= 0; i
< size
; ++i
)
2475 bld
.immediates
[num_immediates
][i
] =
2476 lp_build_const_vec(gallivm
, type
, parse
.FullToken
.FullImmediate
.u
[i
].Float
);
2477 for( i
= size
; i
< 4; ++i
)
2478 bld
.immediates
[num_immediates
][i
] = bld
.base
.undef
;
2483 case TGSI_TOKEN_TYPE_PROPERTY
:
2492 struct tgsi_full_instruction
*instr
= bld
.instructions
+ pc
;
2493 const struct tgsi_opcode_info
*opcode_info
=
2494 tgsi_get_opcode_info(instr
->Instruction
.Opcode
);
2495 if (!emit_instruction( &bld
, instr
, opcode_info
, &pc
))
2496 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2497 opcode_info
->mnemonic
);
2500 /* If we have indirect addressing in outputs we need to copy our alloca array
2501 * to the outputs slots specified by the called */
2502 if (bld
.indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
2503 unsigned index
, chan
;
2504 assert(info
->num_outputs
<= info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
2505 for (index
= 0; index
< info
->num_outputs
; ++index
) {
2506 for (chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
2507 bld
.outputs
[index
][chan
] = get_output_ptr(&bld
, index
, chan
);
2513 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
2514 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
2515 debug_printf("11111111111111111111111111111 \n");
2516 tgsi_dump(tokens
, 0);
2517 lp_debug_dump_value(function
);
2518 debug_printf("2222222222222222222222222222 \n");
2520 tgsi_parse_free( &parse
);
2523 LLVMModuleRef module
= LLVMGetGlobalParent(
2524 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
2525 LLVMDumpModule(module
);
2529 FREE( bld
.instructions
);
2534 * Build up the system values array out of individual values such as
2535 * the instance ID, front-face, primitive ID, etc. The shader info is
2536 * used to determine which system values are needed and where to put
2537 * them in the system values array.
2539 * XXX only instance ID is implemented at this time.
2541 * The system values register file is similar to the constants buffer.
2542 * Example declaration:
2543 * DCL SV[0], INSTANCEID
2544 * Example instruction:
2545 * MOVE foo, SV[0].xxxx;
2547 * \return LLVM float array (interpreted as float [][4])
2550 lp_build_system_values_array(struct gallivm_state
*gallivm
,
2551 const struct tgsi_shader_info
*info
,
2552 LLVMValueRef instance_id
,
2553 LLVMValueRef facing
)
2555 LLVMValueRef size
= lp_build_const_int32(gallivm
, 4 * info
->num_system_values
);
2556 LLVMTypeRef float_t
= LLVMFloatTypeInContext(gallivm
->context
);
2557 LLVMValueRef array
= lp_build_array_alloca(gallivm
, float_t
,
2558 size
, "sysvals_array");
2561 for (i
= 0; i
< info
->num_system_values
; i
++) {
2562 LLVMValueRef index
= lp_build_const_int32(gallivm
, i
* 4);
2563 LLVMValueRef ptr
, value
= 0;
2565 switch (info
->system_value_semantic_name
[i
]) {
2566 case TGSI_SEMANTIC_INSTANCEID
:
2567 /* convert instance ID from int to float */
2568 value
= LLVMBuildSIToFP(gallivm
->builder
, instance_id
, float_t
,
2569 "sysval_instanceid");
2571 case TGSI_SEMANTIC_FACE
:
2574 assert(0 && "unexpected semantic in build_system_values_array()");
2577 ptr
= LLVMBuildGEP(gallivm
->builder
, array
, &index
, 1, "");
2578 LLVMBuildStore(gallivm
->builder
, value
, ptr
);