1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_tgsi_action.h"
51 #include "lp_bld_type.h"
52 #include "lp_bld_const.h"
53 #include "lp_bld_arit.h"
54 #include "lp_bld_bitarit.h"
55 #include "lp_bld_gather.h"
56 #include "lp_bld_init.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_swizzle.h"
59 #include "lp_bld_flow.h"
60 #include "lp_bld_quad.h"
61 #include "lp_bld_tgsi.h"
62 #include "lp_bld_limits.h"
63 #include "lp_bld_debug.h"
64 #include "lp_bld_printf.h"
65 #include "lp_bld_sample.h"
68 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
70 LLVMTypeRef int_type
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
71 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
74 mask
->has_mask
= FALSE
;
75 mask
->cond_stack_size
= 0;
76 mask
->loop_stack_size
= 0;
77 mask
->call_stack_size
= 0;
79 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
80 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
= mask
->cond_mask
=
81 LLVMConstAllOnes(mask
->int_vec_type
);
83 mask
->loop_limiter
= lp_build_alloca(bld
->gallivm
, int_type
, "looplimiter");
87 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
91 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
93 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
95 if (mask
->loop_stack_size
) {
96 /*for loops we need to update the entire mask at runtime */
98 assert(mask
->break_mask
);
99 tmp
= LLVMBuildAnd(builder
,
103 mask
->exec_mask
= LLVMBuildAnd(builder
,
108 mask
->exec_mask
= mask
->cond_mask
;
110 if (mask
->call_stack_size
) {
111 mask
->exec_mask
= LLVMBuildAnd(builder
,
117 mask
->has_mask
= (mask
->cond_stack_size
> 0 ||
118 mask
->loop_stack_size
> 0 ||
119 mask
->call_stack_size
> 0);
122 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
125 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
127 assert(mask
->cond_stack_size
< LP_MAX_TGSI_NESTING
);
128 if (mask
->cond_stack_size
== 0) {
129 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
131 mask
->cond_stack
[mask
->cond_stack_size
++] = mask
->cond_mask
;
132 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
133 mask
->cond_mask
= LLVMBuildAnd(builder
,
137 lp_exec_mask_update(mask
);
140 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
142 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
143 LLVMValueRef prev_mask
;
144 LLVMValueRef inv_mask
;
146 assert(mask
->cond_stack_size
);
147 prev_mask
= mask
->cond_stack
[mask
->cond_stack_size
- 1];
148 if (mask
->cond_stack_size
== 1) {
149 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
152 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
154 mask
->cond_mask
= LLVMBuildAnd(builder
,
157 lp_exec_mask_update(mask
);
160 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
162 assert(mask
->cond_stack_size
);
163 mask
->cond_mask
= mask
->cond_stack
[--mask
->cond_stack_size
];
164 lp_exec_mask_update(mask
);
167 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
169 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
171 if (mask
->loop_stack_size
== 0) {
172 assert(mask
->loop_block
== NULL
);
173 assert(mask
->cont_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
174 assert(mask
->break_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
175 assert(mask
->break_var
== NULL
);
178 assert(mask
->loop_stack_size
< LP_MAX_TGSI_NESTING
);
180 mask
->loop_stack
[mask
->loop_stack_size
].loop_block
= mask
->loop_block
;
181 mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
182 mask
->loop_stack
[mask
->loop_stack_size
].break_mask
= mask
->break_mask
;
183 mask
->loop_stack
[mask
->loop_stack_size
].break_var
= mask
->break_var
;
184 ++mask
->loop_stack_size
;
186 mask
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
187 LLVMBuildStore(builder
, mask
->break_mask
, mask
->break_var
);
189 mask
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
191 LLVMBuildBr(builder
, mask
->loop_block
);
192 LLVMPositionBuilderAtEnd(builder
, mask
->loop_block
);
194 mask
->break_mask
= LLVMBuildLoad(builder
, mask
->break_var
, "");
196 lp_exec_mask_update(mask
);
199 static void lp_exec_break(struct lp_exec_mask
*mask
)
201 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
202 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
206 mask
->break_mask
= LLVMBuildAnd(builder
,
208 exec_mask
, "break_full");
210 lp_exec_mask_update(mask
);
213 static void lp_exec_continue(struct lp_exec_mask
*mask
)
215 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
216 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
220 mask
->cont_mask
= LLVMBuildAnd(builder
,
224 lp_exec_mask_update(mask
);
228 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
229 struct lp_exec_mask
*mask
)
231 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
232 LLVMBasicBlockRef endloop
;
233 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
234 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
235 mask
->bld
->type
.width
*
236 mask
->bld
->type
.length
);
237 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
239 assert(mask
->break_mask
);
242 * Restore the cont_mask, but don't pop
244 assert(mask
->loop_stack_size
);
245 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
- 1].cont_mask
;
246 lp_exec_mask_update(mask
);
249 * Unlike the continue mask, the break_mask must be preserved across loop
252 LLVMBuildStore(builder
, mask
->break_mask
, mask
->break_var
);
254 /* Decrement the loop limiter */
255 limiter
= LLVMBuildLoad(builder
, mask
->loop_limiter
, "");
257 limiter
= LLVMBuildSub(
260 LLVMConstInt(int_type
, 1, false),
263 LLVMBuildStore(builder
, limiter
, mask
->loop_limiter
);
265 /* i1cond = (mask != 0) */
266 i1cond
= LLVMBuildICmp(
269 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
270 LLVMConstNull(reg_type
), "");
272 /* i2cond = (looplimiter > 0) */
273 i2cond
= LLVMBuildICmp(
277 LLVMConstNull(int_type
), "");
279 /* if( i1cond && i2cond ) */
280 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
282 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
284 LLVMBuildCondBr(builder
,
285 icond
, mask
->loop_block
, endloop
);
287 LLVMPositionBuilderAtEnd(builder
, endloop
);
289 assert(mask
->loop_stack_size
);
290 --mask
->loop_stack_size
;
291 mask
->loop_block
= mask
->loop_stack
[mask
->loop_stack_size
].loop_block
;
292 mask
->cont_mask
= mask
->loop_stack
[mask
->loop_stack_size
].cont_mask
;
293 mask
->break_mask
= mask
->loop_stack
[mask
->loop_stack_size
].break_mask
;
294 mask
->break_var
= mask
->loop_stack
[mask
->loop_stack_size
].break_var
;
296 lp_exec_mask_update(mask
);
299 /* stores val into an address pointed to by dst.
300 * mask->exec_mask is used to figure out which bits of val
301 * should be stored into the address
302 * (0 means don't store this bit, 1 means do store).
304 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
305 struct lp_build_context
*bld_store
,
310 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
312 /* Mix the predicate and execution mask */
313 if (mask
->has_mask
) {
315 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
317 pred
= mask
->exec_mask
;
322 LLVMValueRef real_val
, dst_val
;
324 dst_val
= LLVMBuildLoad(builder
, dst
, "");
325 real_val
= lp_build_select(bld_store
,
329 LLVMBuildStore(builder
, real_val
, dst
);
331 LLVMBuildStore(builder
, val
, dst
);
334 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
338 assert(mask
->call_stack_size
< LP_MAX_TGSI_NESTING
);
339 mask
->call_stack
[mask
->call_stack_size
].pc
= *pc
;
340 mask
->call_stack
[mask
->call_stack_size
].ret_mask
= mask
->ret_mask
;
341 mask
->call_stack_size
++;
345 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
347 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
348 LLVMValueRef exec_mask
;
350 if (mask
->call_stack_size
== 0) {
351 /* returning from main() */
355 exec_mask
= LLVMBuildNot(builder
,
359 mask
->ret_mask
= LLVMBuildAnd(builder
,
361 exec_mask
, "ret_full");
363 lp_exec_mask_update(mask
);
366 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
370 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
372 assert(mask
->call_stack_size
);
373 mask
->call_stack_size
--;
374 *pc
= mask
->call_stack
[mask
->call_stack_size
].pc
;
375 mask
->ret_mask
= mask
->call_stack
[mask
->call_stack_size
].ret_mask
;
376 lp_exec_mask_update(mask
);
381 * Return pointer to a temporary register channel (src or dest).
382 * Note that indirect addressing cannot be handled here.
383 * \param index which temporary register
384 * \param chan which channel of the temp register.
387 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
391 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
393 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
394 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
395 return LLVMBuildGEP(builder
, bld
->temps_array
, &lindex
, 1, "");
398 return bld
->temps
[index
][chan
];
403 * Return pointer to a output register channel (src or dest).
404 * Note that indirect addressing cannot be handled here.
405 * \param index which output register
406 * \param chan which channel of the output register.
409 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
413 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
415 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
416 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
,
418 return LLVMBuildGEP(builder
, bld
->outputs_array
, &lindex
, 1, "");
421 return bld
->outputs
[index
][chan
];
427 * XXX the lp_build_gather() function should be capable of doing this
428 * with a little work.
431 build_gather(struct lp_build_context
*bld
,
432 LLVMValueRef base_ptr
,
433 LLVMValueRef indexes
)
435 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
436 LLVMValueRef res
= bld
->undef
;
440 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
442 for (i
= 0; i
< bld
->type
.length
; i
++) {
443 LLVMValueRef ii
= lp_build_const_int32(bld
->gallivm
, i
);
444 LLVMValueRef index
= LLVMBuildExtractElement(builder
,
446 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
447 &index
, 1, "gather_ptr");
448 LLVMValueRef scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
450 res
= LLVMBuildInsertElement(builder
, res
, scalar
, ii
, "");
458 * Scatter/store vector.
461 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
462 LLVMValueRef base_ptr
,
463 LLVMValueRef indexes
,
465 struct lp_exec_mask
*mask
,
468 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
469 LLVMBuilderRef builder
= gallivm
->builder
;
472 /* Mix the predicate and execution mask */
473 if (mask
->has_mask
) {
475 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
478 pred
= mask
->exec_mask
;
483 * Loop over elements of index_vec, store scalar value.
485 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
486 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
487 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
488 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
489 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
490 LLVMValueRef scalar_pred
= pred
?
491 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
494 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
495 ii
, val
, index
, scalar_ptr
);
498 LLVMValueRef real_val
, dst_val
;
499 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
500 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
501 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
504 LLVMBuildStore(builder
, val
, scalar_ptr
);
511 * Read the current value of the ADDR register, convert the floats to
512 * ints, add the base index and return the vector of offsets.
513 * The offsets will be used to index into the constant buffer or
514 * temporary register file.
517 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
518 unsigned reg_file
, unsigned reg_index
,
519 const struct tgsi_src_register
*indirect_reg
)
521 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
522 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
523 /* always use X component of address register */
524 unsigned swizzle
= indirect_reg
->SwizzleX
;
527 LLVMValueRef max_index
;
530 assert(bld
->indirect_files
& (1 << reg_file
));
532 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
535 rel
= LLVMBuildLoad(builder
,
536 bld
->addr
[indirect_reg
->Index
][swizzle
],
539 index
= lp_build_add(uint_bld
, base
, rel
);
541 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
543 bld
->bld_base
.info
->file_max
[reg_file
]);
545 assert(!uint_bld
->type
.sign
);
546 index
= lp_build_min(uint_bld
, index
, max_index
);
551 static struct lp_build_context
*
552 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
553 enum tgsi_opcode_type stype
)
555 struct lp_build_context
*bld_fetch
;
558 case TGSI_TYPE_FLOAT
:
559 case TGSI_TYPE_UNTYPED
:
560 bld_fetch
= &bld_base
->base
;
562 case TGSI_TYPE_UNSIGNED
:
563 bld_fetch
= &bld_base
->uint_bld
;
565 case TGSI_TYPE_SIGNED
:
566 bld_fetch
= &bld_base
->int_bld
;
569 case TGSI_TYPE_DOUBLE
:
580 struct lp_build_tgsi_context
* bld_base
,
581 const struct tgsi_full_src_register
* reg
,
582 enum tgsi_opcode_type stype
,
585 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
586 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
587 LLVMBuilderRef builder
= gallivm
->builder
;
588 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
589 LLVMValueRef indirect_index
= NULL
;
590 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
592 /* XXX: Handle fetching xyzw components as a vector */
593 assert(swizzle
!= ~0);
595 if (reg
->Register
.Indirect
) {
596 indirect_index
= get_indirect_index(bld
,
602 if (reg
->Register
.Indirect
) {
603 LLVMValueRef swizzle_vec
=
604 lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, swizzle
);
605 LLVMValueRef index_vec
; /* index into the const buffer */
607 /* index_vec = indirect_index * 4 + swizzle */
608 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
609 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
611 /* Gather values from the constant buffer */
612 return build_gather(bld_fetch
, bld
->consts_ptr
, index_vec
);
615 LLVMValueRef index
; /* index into the const buffer */
616 LLVMValueRef scalar
, scalar_ptr
;
618 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
*4 + swizzle
);
620 scalar_ptr
= LLVMBuildGEP(builder
, bld
->consts_ptr
,
623 if (stype
!= TGSI_TYPE_FLOAT
&& stype
!= TGSI_TYPE_UNTYPED
) {
624 LLVMTypeRef ivtype
= LLVMPointerType(LLVMInt32TypeInContext(gallivm
->context
), 0);
625 LLVMValueRef temp_ptr
;
626 temp_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, ivtype
, "");
627 scalar
= LLVMBuildLoad(builder
, temp_ptr
, "");
629 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
631 return lp_build_broadcast_scalar(bld_fetch
, scalar
);
636 emit_fetch_immediate(
637 struct lp_build_tgsi_context
* bld_base
,
638 const struct tgsi_full_src_register
* reg
,
639 enum tgsi_opcode_type stype
,
642 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
643 LLVMValueRef res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
646 if (stype
== TGSI_TYPE_UNSIGNED
) {
647 res
= LLVMConstBitCast(res
, bld_base
->uint_bld
.vec_type
);
648 } else if (stype
== TGSI_TYPE_SIGNED
) {
649 res
= LLVMConstBitCast(res
, bld_base
->int_bld
.vec_type
);
656 struct lp_build_tgsi_context
* bld_base
,
657 const struct tgsi_full_src_register
* reg
,
658 enum tgsi_opcode_type stype
,
661 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
662 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
663 LLVMBuilderRef builder
= gallivm
->builder
;
664 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
665 LLVMValueRef indirect_index
= NULL
;
668 if (reg
->Register
.Indirect
) {
669 indirect_index
= get_indirect_index(bld
,
675 if (reg
->Register
.Indirect
) {
676 LLVMValueRef swizzle_vec
=
677 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
678 LLVMValueRef length_vec
=
679 lp_build_const_int_vec(gallivm
, uint_bld
->type
, bld
->bld_base
.base
.type
.length
);
680 LLVMValueRef index_vec
; /* index into the const buffer */
681 LLVMValueRef inputs_array
;
682 LLVMTypeRef float4_ptr_type
;
684 /* index_vec = (indirect_index * 4 + swizzle) * length */
685 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
686 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
687 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
689 /* cast inputs_array pointer to float* */
690 float4_ptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
691 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
,
692 float4_ptr_type
, "");
694 /* Gather values from the temporary register array */
695 res
= build_gather(&bld_base
->base
, inputs_array
, index_vec
);
697 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
698 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
699 reg
->Register
.Index
* 4 + swizzle
);
700 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
701 bld
->inputs_array
, &lindex
, 1, "");
702 res
= LLVMBuildLoad(builder
, input_ptr
, "");
705 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
711 if (stype
== TGSI_TYPE_UNSIGNED
) {
712 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
713 } else if (stype
== TGSI_TYPE_SIGNED
) {
714 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
721 emit_fetch_temporary(
722 struct lp_build_tgsi_context
* bld_base
,
723 const struct tgsi_full_src_register
* reg
,
724 enum tgsi_opcode_type stype
,
727 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
728 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
729 LLVMBuilderRef builder
= gallivm
->builder
;
730 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
731 LLVMValueRef indirect_index
= NULL
;
734 if (reg
->Register
.Indirect
) {
735 indirect_index
= get_indirect_index(bld
,
741 if (reg
->Register
.Indirect
) {
742 LLVMValueRef swizzle_vec
=
743 lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, swizzle
);
744 LLVMValueRef length_vec
=
745 lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
,
746 bld
->bld_base
.base
.type
.length
);
747 LLVMValueRef index_vec
; /* index into the const buffer */
748 LLVMValueRef temps_array
;
749 LLVMTypeRef float4_ptr_type
;
751 /* index_vec = (indirect_index * 4 + swizzle) * length */
752 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
753 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
754 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
756 /* cast temps_array pointer to float* */
757 float4_ptr_type
= LLVMPointerType(LLVMFloatTypeInContext(bld
->bld_base
.base
.gallivm
->context
), 0);
758 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
,
759 float4_ptr_type
, "");
761 /* Gather values from the temporary register array */
762 res
= build_gather(&bld_base
->base
, temps_array
, index_vec
);
765 LLVMValueRef temp_ptr
;
766 if (stype
!= TGSI_TYPE_FLOAT
&& stype
!= TGSI_TYPE_UNTYPED
) {
767 LLVMTypeRef itype
= LLVMPointerType(bld
->bld_base
.int_bld
.vec_type
, 0);
768 LLVMValueRef tint_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
,
770 temp_ptr
= LLVMBuildBitCast(builder
, tint_ptr
, itype
, "");
772 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
773 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
775 return bld
->bld_base
.base
.undef
;
782 emit_fetch_system_value(
783 struct lp_build_tgsi_context
* bld_base
,
784 const struct tgsi_full_src_register
* reg
,
785 enum tgsi_opcode_type stype
,
788 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
789 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
790 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
791 LLVMBuilderRef builder
= gallivm
->builder
;
793 enum tgsi_opcode_type atype
; // Actual type of the value
795 assert(!reg
->Register
.Indirect
);
797 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
798 case TGSI_SEMANTIC_INSTANCEID
:
799 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
800 atype
= TGSI_TYPE_UNSIGNED
;
803 case TGSI_SEMANTIC_VERTEXID
:
804 res
= bld
->system_values
.vertex_id
;
805 atype
= TGSI_TYPE_UNSIGNED
;
809 assert(!"unexpected semantic in emit_fetch_system_value");
810 res
= bld_base
->base
.zero
;
811 atype
= TGSI_TYPE_FLOAT
;
815 if (atype
!= stype
) {
816 if (stype
== TGSI_TYPE_FLOAT
) {
817 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
818 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
819 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
820 } else if (stype
== TGSI_TYPE_SIGNED
) {
821 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
829 * Register fetch with derivatives.
833 struct lp_build_tgsi_soa_context
*bld
,
842 /* TODO: use interpolation coeffs for inputs */
845 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
848 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
856 emit_fetch_predicate(
857 struct lp_build_tgsi_soa_context
*bld
,
858 const struct tgsi_full_instruction
*inst
,
861 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
863 unsigned char swizzles
[4];
864 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
868 if (!inst
->Instruction
.Predicate
) {
869 TGSI_FOR_EACH_CHANNEL( chan
) {
875 swizzles
[0] = inst
->Predicate
.SwizzleX
;
876 swizzles
[1] = inst
->Predicate
.SwizzleY
;
877 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
878 swizzles
[3] = inst
->Predicate
.SwizzleW
;
880 index
= inst
->Predicate
.Index
;
881 assert(index
< LP_MAX_TGSI_PREDS
);
883 TGSI_FOR_EACH_CHANNEL( chan
) {
884 unsigned swizzle
= swizzles
[chan
];
887 * Only fetch the predicate register channels that are actually listed
890 if (!unswizzled
[swizzle
]) {
891 value
= LLVMBuildLoad(builder
,
892 bld
->preds
[index
][swizzle
], "");
895 * Convert the value to an integer mask.
897 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
898 * is needlessly causing two comparisons due to storing the intermediate
899 * result as float vector instead of an integer mask vector.
901 value
= lp_build_compare(bld
->bld_base
.base
.gallivm
,
902 bld
->bld_base
.base
.type
,
905 bld
->bld_base
.base
.zero
);
906 if (inst
->Predicate
.Negate
) {
907 value
= LLVMBuildNot(builder
, value
, "");
910 unswizzled
[swizzle
] = value
;
912 value
= unswizzled
[swizzle
];
924 struct lp_build_tgsi_context
*bld_base
,
925 const struct tgsi_full_instruction
*inst
,
931 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
932 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
933 LLVMBuilderRef builder
= gallivm
->builder
;
934 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
935 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
936 LLVMValueRef indirect_index
= NULL
;
937 struct lp_build_context
*bld_store
;
938 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
);
942 case TGSI_TYPE_FLOAT
:
943 case TGSI_TYPE_UNTYPED
:
944 bld_store
= &bld_base
->base
;
946 case TGSI_TYPE_UNSIGNED
:
947 bld_store
= &bld_base
->uint_bld
;
949 case TGSI_TYPE_SIGNED
:
950 bld_store
= &bld_base
->int_bld
;
952 case TGSI_TYPE_DOUBLE
:
959 switch( inst
->Instruction
.Saturate
) {
963 case TGSI_SAT_ZERO_ONE
:
964 value
= lp_build_max(&bld
->bld_base
.base
, value
, bld
->bld_base
.base
.zero
);
965 value
= lp_build_min(&bld
->bld_base
.base
, value
, bld
->bld_base
.base
.one
);
968 case TGSI_SAT_MINUS_PLUS_ONE
:
969 value
= lp_build_max(&bld
->bld_base
.base
, value
, lp_build_const_vec(bld
->bld_base
.base
.gallivm
, bld
->bld_base
.base
.type
, -1.0));
970 value
= lp_build_min(&bld
->bld_base
.base
, value
, bld
->bld_base
.base
.one
);
977 if (reg
->Register
.Indirect
) {
978 indirect_index
= get_indirect_index(bld
,
983 assert(reg
->Register
.Index
<=
984 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
987 switch( reg
->Register
.File
) {
988 case TGSI_FILE_OUTPUT
:
989 if (reg
->Register
.Indirect
) {
990 LLVMValueRef chan_vec
=
991 lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
);
992 LLVMValueRef length_vec
=
993 lp_build_const_int_vec(gallivm
, uint_bld
->type
, bld
->bld_base
.base
.type
.length
);
994 LLVMValueRef index_vec
; /* indexes into the temp registers */
995 LLVMValueRef outputs_array
;
996 LLVMValueRef pixel_offsets
;
997 LLVMTypeRef float_ptr_type
;
1000 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1001 pixel_offsets
= uint_bld
->undef
;
1002 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1003 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1004 pixel_offsets
= LLVMBuildInsertElement(builder
, pixel_offsets
,
1008 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1009 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1010 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1011 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1012 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1015 LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1016 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
,
1017 float_ptr_type
, "");
1019 /* Scatter store values into temp registers */
1020 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1021 &bld
->exec_mask
, pred
);
1024 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1026 lp_exec_mask_store(&bld
->exec_mask
, bld_store
, pred
, value
, out_ptr
);
1030 case TGSI_FILE_TEMPORARY
:
1031 if (reg
->Register
.Indirect
) {
1032 LLVMValueRef chan_vec
=
1033 lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
);
1034 LLVMValueRef length_vec
=
1035 lp_build_const_int_vec(gallivm
, uint_bld
->type
,
1036 bld
->bld_base
.base
.type
.length
);
1037 LLVMValueRef index_vec
; /* indexes into the temp registers */
1038 LLVMValueRef temps_array
;
1039 LLVMValueRef pixel_offsets
;
1040 LLVMTypeRef float_ptr_type
;
1043 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1044 pixel_offsets
= uint_bld
->undef
;
1045 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1046 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1047 pixel_offsets
= LLVMBuildInsertElement(builder
, pixel_offsets
,
1051 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1052 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1053 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1054 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1055 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1058 LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1059 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
,
1060 float_ptr_type
, "");
1062 /* Scatter store values into temp registers */
1063 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1064 &bld
->exec_mask
, pred
);
1067 LLVMValueRef temp_ptr
;
1070 case TGSI_TYPE_UNSIGNED
:
1071 case TGSI_TYPE_SIGNED
: {
1072 LLVMTypeRef itype
= bld_base
->int_bld
.vec_type
;
1073 LLVMTypeRef ivtype
= LLVMPointerType(itype
, 0);
1074 LLVMValueRef tint_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
,
1076 LLVMValueRef temp_value_ptr
;
1078 temp_ptr
= LLVMBuildBitCast(builder
, tint_ptr
, ivtype
, "");
1079 temp_value_ptr
= LLVMBuildBitCast(builder
, value
, itype
, "");
1080 value
= temp_value_ptr
;
1084 case TGSI_TYPE_FLOAT
:
1085 case TGSI_TYPE_UNTYPED
:
1086 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
,
1091 lp_exec_mask_store(&bld
->exec_mask
, bld_store
, pred
, value
, temp_ptr
);
1095 case TGSI_FILE_ADDRESS
:
1096 assert(dtype
== TGSI_TYPE_SIGNED
);
1097 assert(LLVMTypeOf(value
) == bld_base
->base
.int_vec_type
);
1098 lp_exec_mask_store(&bld
->exec_mask
, bld_store
, pred
, value
,
1099 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1102 case TGSI_FILE_PREDICATE
:
1103 lp_exec_mask_store(&bld
->exec_mask
, bld_store
, pred
, value
,
1104 bld
->preds
[reg
->Register
.Index
][chan_index
]);
1114 struct lp_build_tgsi_context
* bld_base
,
1115 const struct tgsi_full_instruction
* inst
,
1116 const struct tgsi_opcode_info
* info
,
1117 LLVMValueRef dst
[4])
1120 unsigned chan_index
;
1121 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1124 LLVMValueRef pred
[TGSI_NUM_CHANNELS
];
1126 emit_fetch_predicate( bld
, inst
, pred
);
1128 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1129 emit_store_chan(bld_base
, inst
, 0, chan_index
, pred
[chan_index
], dst
[chan_index
]);
1135 * High-level instruction translators.
1139 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
1140 const struct tgsi_full_instruction
*inst
,
1141 enum lp_build_tex_modifier modifier
,
1142 LLVMValueRef
*texel
)
1144 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1145 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1147 LLVMValueRef lod_bias
, explicit_lod
;
1148 LLVMValueRef oow
= NULL
;
1149 LLVMValueRef coords
[4];
1150 LLVMValueRef offsets
[3] = { NULL
};
1151 struct lp_derivatives derivs
;
1152 unsigned num_coords
;
1156 if (!bld
->sampler
) {
1157 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1158 for (i
= 0; i
< 4; i
++) {
1159 texel
[i
] = bld
->bld_base
.base
.undef
;
1164 derivs
.ddx_ddy
[0] = bld
->bld_base
.base
.undef
;
1165 derivs
.ddx_ddy
[1] = bld
->bld_base
.base
.undef
;
1167 switch (inst
->Texture
.Texture
) {
1168 case TGSI_TEXTURE_1D
:
1172 case TGSI_TEXTURE_1D_ARRAY
:
1176 case TGSI_TEXTURE_2D
:
1177 case TGSI_TEXTURE_RECT
:
1181 case TGSI_TEXTURE_SHADOW1D
:
1182 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1186 case TGSI_TEXTURE_SHADOW2D
:
1187 case TGSI_TEXTURE_SHADOWRECT
:
1188 case TGSI_TEXTURE_2D_ARRAY
:
1189 case TGSI_TEXTURE_CUBE
:
1193 case TGSI_TEXTURE_3D
:
1197 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1198 case TGSI_TEXTURE_SHADOWCUBE
:
1207 /* Note lod and especially projected are illegal in a LOT of cases */
1208 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
1209 assert(num_coords
< 4);
1210 lod_bias
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, 3 );
1211 explicit_lod
= NULL
;
1213 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
1214 assert(num_coords
< 4);
1216 explicit_lod
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, 3 );
1220 explicit_lod
= NULL
;
1223 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
1224 assert(num_coords
< 4);
1225 oow
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, 3 );
1226 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
1229 for (i
= 0; i
< num_coords
; i
++) {
1230 coords
[i
] = lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, i
);
1231 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
1232 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
1234 for (i
= num_coords
; i
< 4; i
++) {
1235 coords
[i
] = bld
->bld_base
.base
.undef
;
1238 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
1239 LLVMValueRef i32undef
= LLVMGetUndef(LLVMInt32TypeInContext(gallivm
->context
));
1240 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
1241 LLVMValueRef ddxdyonec
[3];
1242 unsigned length
= bld
->bld_base
.base
.type
.length
;
1243 unsigned num_quads
= length
/ 4;
1247 for (dim
= 0; dim
< dims
; ++dim
) {
1248 LLVMValueRef srcx
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 1, dim
);
1249 LLVMValueRef srcy
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 2, dim
);
1250 for (quad
= 0; quad
< num_quads
; ++quad
) {
1251 unsigned s1
= 4*quad
;
1252 unsigned s2
= 4*quad
+ length
;
1253 shuffles
[4*quad
+ 0] = lp_build_const_int32(gallivm
, s1
);
1254 shuffles
[4*quad
+ 1] = lp_build_const_int32(gallivm
, s2
);
1255 shuffles
[4*quad
+ 2] = i32undef
;
1256 shuffles
[4*quad
+ 3] = i32undef
;
1258 ddxdyonec
[dim
] = LLVMBuildShuffleVector(builder
, srcx
, srcy
,
1259 LLVMConstVector(shuffles
, length
), "");
1262 derivs
.ddx_ddy
[0] = ddxdyonec
[0];
1264 else if (dims
>= 2) {
1265 for (quad
= 0; quad
< num_quads
; ++quad
) {
1266 unsigned s1
= 4*quad
;
1267 unsigned s2
= 4*quad
+ length
;
1268 shuffles
[4*quad
+ 0] = lp_build_const_int32(gallivm
, s1
);
1269 shuffles
[4*quad
+ 1] = lp_build_const_int32(gallivm
, s1
+ 1);
1270 shuffles
[4*quad
+ 2] = lp_build_const_int32(gallivm
, s2
);
1271 shuffles
[4*quad
+ 3] = lp_build_const_int32(gallivm
, s2
+ 1);
1273 derivs
.ddx_ddy
[0] = LLVMBuildShuffleVector(builder
, ddxdyonec
[0], ddxdyonec
[1],
1274 LLVMConstVector(shuffles
, length
), "");
1276 derivs
.ddx_ddy
[1] = ddxdyonec
[2];
1279 unit
= inst
->Src
[3].Register
.Index
;
1282 derivs
.ddx_ddy
[0] = lp_build_packed_ddx_ddy_onecoord(&bld
->bld_base
.base
, coords
[0]);
1284 else if (dims
>= 2) {
1285 derivs
.ddx_ddy
[0] = lp_build_packed_ddx_ddy_twocoord(&bld
->bld_base
.base
,
1286 coords
[0], coords
[1]);
1288 derivs
.ddx_ddy
[1] = lp_build_packed_ddx_ddy_onecoord(&bld
->bld_base
.base
, coords
[2]);
1291 unit
= inst
->Src
[1].Register
.Index
;
1294 /* some advanced gather instructions (txgo) would require 4 offsets */
1295 if (inst
->Texture
.NumOffsets
== 1) {
1297 for (dim
= 0; dim
< dims
; dim
++) {
1298 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
1302 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
1303 bld
->bld_base
.base
.gallivm
,
1304 bld
->bld_base
.base
.type
,
1309 lod_bias
, explicit_lod
,
1314 emit_txf( struct lp_build_tgsi_soa_context
*bld
,
1315 const struct tgsi_full_instruction
*inst
,
1316 LLVMValueRef
*texel
)
1319 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
1320 LLVMValueRef explicit_lod
= NULL
;
1321 LLVMValueRef coords
[3];
1322 LLVMValueRef offsets
[3] = { NULL
};
1323 struct lp_derivatives derivs
;
1324 unsigned num_coords
;
1328 if (!bld
->sampler
) {
1329 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1330 for (i
= 0; i
< 4; i
++) {
1331 texel
[i
] = coord_undef
;
1336 derivs
.ddx_ddy
[0] = coord_undef
;
1337 derivs
.ddx_ddy
[1] = coord_undef
;
1339 switch (inst
->Texture
.Texture
) {
1340 case TGSI_TEXTURE_1D
:
1341 case TGSI_TEXTURE_BUFFER
:
1345 case TGSI_TEXTURE_1D_ARRAY
:
1349 case TGSI_TEXTURE_2D
:
1350 case TGSI_TEXTURE_RECT
:
1354 case TGSI_TEXTURE_2D_ARRAY
:
1358 case TGSI_TEXTURE_3D
:
1367 /* always have lod except for buffers ? */
1368 if (inst
->Texture
.Texture
!= TGSI_TEXTURE_BUFFER
) {
1369 explicit_lod
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, 3 );
1372 for (i
= 0; i
< num_coords
; i
++) {
1373 coords
[i
] = lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, i
);
1375 for (i
= num_coords
; i
< 3; i
++) {
1376 coords
[i
] = coord_undef
;
1379 unit
= inst
->Src
[1].Register
.Index
;
1381 if (inst
->Texture
.NumOffsets
== 1) {
1383 for (dim
= 0; dim
< dims
; dim
++) {
1384 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
1388 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
1389 bld
->bld_base
.base
.gallivm
,
1390 bld
->bld_base
.base
.type
,
1400 emit_txq( struct lp_build_tgsi_soa_context
*bld
,
1401 const struct tgsi_full_instruction
*inst
,
1402 LLVMValueRef
*sizes_out
)
1404 LLVMValueRef explicit_lod
;
1405 unsigned num_coords
, has_lod
;
1408 switch (inst
->Texture
.Texture
) {
1409 case TGSI_TEXTURE_1D
:
1410 case TGSI_TEXTURE_SHADOW1D
:
1414 case TGSI_TEXTURE_2D
:
1415 case TGSI_TEXTURE_SHADOW2D
:
1416 case TGSI_TEXTURE_CUBE
:
1417 case TGSI_TEXTURE_SHADOWCUBE
:
1418 case TGSI_TEXTURE_1D_ARRAY
:
1419 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1423 case TGSI_TEXTURE_3D
:
1424 // case TGSI_TEXTURE_CUBE_ARRAY:
1425 // case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1426 case TGSI_TEXTURE_2D_ARRAY
:
1427 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1432 case TGSI_TEXTURE_BUFFER
:
1437 case TGSI_TEXTURE_RECT
:
1438 case TGSI_TEXTURE_SHADOWRECT
:
1439 // case TGSI_TEXTURE_2D_MS:
1444 // case TGSI_TEXTURE_2D_MS_ARRAY:
1454 if (!bld
->sampler
) {
1455 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
1456 for (i
= 0; i
< num_coords
; i
++)
1457 sizes_out
[i
] = bld
->bld_base
.base
.undef
;
1462 explicit_lod
= lp_build_emit_fetch( &bld
->bld_base
, inst
, 0, 2 );
1464 explicit_lod
= NULL
;
1466 bld
->sampler
->emit_size_query(bld
->sampler
,
1467 bld
->bld_base
.base
.gallivm
,
1468 bld
->bld_base
.int_bld
.type
,
1469 inst
->Src
[1].Register
.Index
,
1475 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
1480 for (i
= 0; i
< 5; i
++) {
1483 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
1486 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
1488 if (opcode
== TGSI_OPCODE_END
)
1491 if (opcode
== TGSI_OPCODE_TEX
||
1492 opcode
== TGSI_OPCODE_TXP
||
1493 opcode
== TGSI_OPCODE_TXD
||
1494 opcode
== TGSI_OPCODE_TXB
||
1495 opcode
== TGSI_OPCODE_TXL
||
1496 opcode
== TGSI_OPCODE_TXF
||
1497 opcode
== TGSI_OPCODE_TXQ
||
1498 opcode
== TGSI_OPCODE_CAL
||
1499 opcode
== TGSI_OPCODE_CALLNZ
||
1500 opcode
== TGSI_OPCODE_IF
||
1501 opcode
== TGSI_OPCODE_IFC
||
1502 opcode
== TGSI_OPCODE_BGNLOOP
||
1503 opcode
== TGSI_OPCODE_SWITCH
)
1513 * Kill fragment if any of the src register values are negative.
1517 struct lp_build_tgsi_soa_context
*bld
,
1518 const struct tgsi_full_instruction
*inst
,
1521 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1522 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
1523 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
1525 unsigned chan_index
;
1527 memset(&terms
, 0, sizeof terms
);
1529 TGSI_FOR_EACH_CHANNEL( chan_index
) {
1532 /* Unswizzle channel */
1533 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
1535 /* Check if the component has not been already tested. */
1536 assert(swizzle
< TGSI_NUM_CHANNELS
);
1537 if( !terms
[swizzle
] )
1538 /* TODO: change the comparison operator instead of setting the sign */
1539 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
1543 TGSI_FOR_EACH_CHANNEL( chan_index
) {
1544 if(terms
[chan_index
]) {
1545 LLVMValueRef chan_mask
;
1548 * If term < 0 then mask = 0 else mask = ~0.
1550 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
1553 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
1560 lp_build_mask_update(bld
->mask
, mask
);
1562 if (!near_end_of_shader(bld
, pc
))
1563 lp_build_mask_check(bld
->mask
);
1569 * Predicated fragment kill.
1570 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1571 * The only predication is the execution mask which will apply if
1572 * we're inside a loop or conditional.
1575 emit_kilp(struct lp_build_tgsi_soa_context
*bld
,
1578 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1581 /* For those channels which are "alive", disable fragment shader
1584 if (bld
->exec_mask
.has_mask
) {
1585 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
1588 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
1592 lp_build_mask_update(bld
->mask
, mask
);
1594 if (!near_end_of_shader(bld
, pc
))
1595 lp_build_mask_check(bld
->mask
);
1600 * Emit code which will dump the value of all the temporary registers
1604 emit_dump_temps(struct lp_build_tgsi_soa_context
*bld
)
1606 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1607 LLVMBuilderRef builder
= gallivm
->builder
;
1608 LLVMValueRef temp_ptr
;
1609 LLVMValueRef i0
= lp_build_const_int32(gallivm
, 0);
1610 LLVMValueRef i1
= lp_build_const_int32(gallivm
, 1);
1611 LLVMValueRef i2
= lp_build_const_int32(gallivm
, 2);
1612 LLVMValueRef i3
= lp_build_const_int32(gallivm
, 3);
1614 int n
= bld
->bld_base
.info
->file_max
[TGSI_FILE_TEMPORARY
];
1616 for (index
= 0; index
< n
; index
++) {
1617 LLVMValueRef idx
= lp_build_const_int32(gallivm
, index
);
1618 LLVMValueRef v
[4][4], res
;
1621 lp_build_printf(gallivm
, "TEMP[%d]:\n", idx
);
1623 for (chan
= 0; chan
< 4; chan
++) {
1624 temp_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
1625 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1626 v
[chan
][0] = LLVMBuildExtractElement(builder
, res
, i0
, "");
1627 v
[chan
][1] = LLVMBuildExtractElement(builder
, res
, i1
, "");
1628 v
[chan
][2] = LLVMBuildExtractElement(builder
, res
, i2
, "");
1629 v
[chan
][3] = LLVMBuildExtractElement(builder
, res
, i3
, "");
1632 lp_build_printf(gallivm
, " X: %f %f %f %f\n",
1633 v
[0][0], v
[0][1], v
[0][2], v
[0][3]);
1634 lp_build_printf(gallivm
, " Y: %f %f %f %f\n",
1635 v
[1][0], v
[1][1], v
[1][2], v
[1][3]);
1636 lp_build_printf(gallivm
, " Z: %f %f %f %f\n",
1637 v
[2][0], v
[2][1], v
[2][2], v
[2][3]);
1638 lp_build_printf(gallivm
, " W: %f %f %f %f\n",
1639 v
[3][0], v
[3][1], v
[3][2], v
[3][3]);
1646 lp_emit_declaration_soa(
1647 struct lp_build_tgsi_context
*bld_base
,
1648 const struct tgsi_full_declaration
*decl
)
1650 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
1651 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1652 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
1653 const unsigned first
= decl
->Range
.First
;
1654 const unsigned last
= decl
->Range
.Last
;
1657 for (idx
= first
; idx
<= last
; ++idx
) {
1658 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
1659 switch (decl
->Declaration
.File
) {
1660 case TGSI_FILE_TEMPORARY
:
1661 assert(idx
< LP_MAX_TGSI_TEMPS
);
1662 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
1663 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
1664 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
1668 case TGSI_FILE_OUTPUT
:
1669 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
1670 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
1671 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
1672 vec_type
, "output");
1676 case TGSI_FILE_ADDRESS
:
1677 /* ADDR registers are the only allocated with an integer LLVM IR type,
1678 * as they are guaranteed to always have integers.
1679 * XXX: Not sure if this exception is worthwhile (or the whole idea of
1680 * an ADDR register for that matter).
1682 assert(idx
< LP_MAX_TGSI_ADDRS
);
1683 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
1684 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
1687 case TGSI_FILE_PREDICATE
:
1688 assert(idx
< LP_MAX_TGSI_PREDS
);
1689 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
1690 bld
->preds
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
,
1695 /* don't need to declare other vars */
1702 void lp_emit_immediate_soa(
1703 struct lp_build_tgsi_context
*bld_base
,
1704 const struct tgsi_full_immediate
*imm
)
1706 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
1707 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
1709 /* simply copy the immediate values into the next immediates[] slot */
1711 const uint size
= imm
->Immediate
.NrTokens
- 1;
1713 assert(bld
->num_immediates
< LP_MAX_TGSI_IMMEDIATES
);
1714 switch (imm
->Immediate
.DataType
) {
1715 case TGSI_IMM_FLOAT32
:
1716 for( i
= 0; i
< size
; ++i
)
1717 bld
->immediates
[bld
->num_immediates
][i
] =
1718 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
1721 case TGSI_IMM_UINT32
:
1722 for( i
= 0; i
< size
; ++i
) {
1723 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
1724 bld
->immediates
[bld
->num_immediates
][i
] =
1725 LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
1729 case TGSI_IMM_INT32
:
1730 for( i
= 0; i
< size
; ++i
) {
1731 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
1732 bld
->immediates
[bld
->num_immediates
][i
] =
1733 LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
1738 for( i
= size
; i
< 4; ++i
)
1739 bld
->immediates
[bld
->num_immediates
][i
] = bld_base
->base
.undef
;
1741 bld
->num_immediates
++;
1746 const struct lp_build_tgsi_action
* action
,
1747 struct lp_build_tgsi_context
* bld_base
,
1748 struct lp_build_emit_data
* emit_data
)
1750 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1752 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
1753 &emit_data
->output
[emit_data
->chan
], NULL
);
1758 const struct lp_build_tgsi_action
* action
,
1759 struct lp_build_tgsi_context
* bld_base
,
1760 struct lp_build_emit_data
* emit_data
)
1762 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1764 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
1765 &emit_data
->output
[emit_data
->chan
]);
1770 const struct lp_build_tgsi_action
* action
,
1771 struct lp_build_tgsi_context
* bld_base
,
1772 struct lp_build_emit_data
* emit_data
)
1774 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1776 emit_kilp(bld
, bld_base
->pc
- 1);
1781 const struct lp_build_tgsi_action
* action
,
1782 struct lp_build_tgsi_context
* bld_base
,
1783 struct lp_build_emit_data
* emit_data
)
1785 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1787 emit_kil(bld
, emit_data
->inst
, bld_base
->pc
- 1);
1792 const struct lp_build_tgsi_action
* action
,
1793 struct lp_build_tgsi_context
* bld_base
,
1794 struct lp_build_emit_data
* emit_data
)
1796 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1798 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
, emit_data
->output
);
1803 const struct lp_build_tgsi_action
* action
,
1804 struct lp_build_tgsi_context
* bld_base
,
1805 struct lp_build_emit_data
* emit_data
)
1807 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1809 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
1815 const struct lp_build_tgsi_action
* action
,
1816 struct lp_build_tgsi_context
* bld_base
,
1817 struct lp_build_emit_data
* emit_data
)
1819 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1821 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
1827 const struct lp_build_tgsi_action
* action
,
1828 struct lp_build_tgsi_context
* bld_base
,
1829 struct lp_build_emit_data
* emit_data
)
1831 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1833 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
1839 const struct lp_build_tgsi_action
* action
,
1840 struct lp_build_tgsi_context
* bld_base
,
1841 struct lp_build_emit_data
* emit_data
)
1843 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1845 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
1851 const struct lp_build_tgsi_action
* action
,
1852 struct lp_build_tgsi_context
* bld_base
,
1853 struct lp_build_emit_data
* emit_data
)
1855 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1857 emit_txq(bld
, emit_data
->inst
, emit_data
->output
);
1862 const struct lp_build_tgsi_action
* action
,
1863 struct lp_build_tgsi_context
* bld_base
,
1864 struct lp_build_emit_data
* emit_data
)
1866 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1868 emit_txf(bld
, emit_data
->inst
, emit_data
->output
);
1873 const struct lp_build_tgsi_action
* action
,
1874 struct lp_build_tgsi_context
* bld_base
,
1875 struct lp_build_emit_data
* emit_data
)
1877 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1879 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
1885 const struct lp_build_tgsi_action
* action
,
1886 struct lp_build_tgsi_context
* bld_base
,
1887 struct lp_build_emit_data
* emit_data
)
1889 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1891 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
1896 const struct lp_build_tgsi_action
* action
,
1897 struct lp_build_tgsi_context
* bld_base
,
1898 struct lp_build_emit_data
* emit_data
)
1900 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1902 lp_exec_break(&bld
->exec_mask
);
1907 const struct lp_build_tgsi_action
* action
,
1908 struct lp_build_tgsi_context
* bld_base
,
1909 struct lp_build_emit_data
* emit_data
)
1912 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1914 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
1915 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
1916 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
1921 const struct lp_build_tgsi_action
* action
,
1922 struct lp_build_tgsi_context
* bld_base
,
1923 struct lp_build_emit_data
* emit_data
)
1925 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1927 lp_exec_bgnloop(&bld
->exec_mask
);
1932 const struct lp_build_tgsi_action
* action
,
1933 struct lp_build_tgsi_context
* bld_base
,
1934 struct lp_build_emit_data
* emit_data
)
1936 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1938 lp_exec_mask_bgnsub(&bld
->exec_mask
);
1943 const struct lp_build_tgsi_action
* action
,
1944 struct lp_build_tgsi_context
* bld_base
,
1945 struct lp_build_emit_data
* emit_data
)
1947 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1949 lp_exec_mask_cond_invert(&bld
->exec_mask
);
1954 const struct lp_build_tgsi_action
* action
,
1955 struct lp_build_tgsi_context
* bld_base
,
1956 struct lp_build_emit_data
* emit_data
)
1958 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1960 lp_exec_mask_cond_pop(&bld
->exec_mask
);
1965 const struct lp_build_tgsi_action
* action
,
1966 struct lp_build_tgsi_context
* bld_base
,
1967 struct lp_build_emit_data
* emit_data
)
1969 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1971 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
1976 const struct lp_build_tgsi_action
* action
,
1977 struct lp_build_tgsi_context
* bld_base
,
1978 struct lp_build_emit_data
* emit_data
)
1980 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1982 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
1987 const struct lp_build_tgsi_action
* action
,
1988 struct lp_build_tgsi_context
* bld_base
,
1989 struct lp_build_emit_data
* emit_data
)
1991 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1993 lp_exec_continue(&bld
->exec_mask
);
1996 /* XXX: Refactor and move it to lp_bld_tgsi_action.c
1998 * XXX: What do the comments about xmm registers mean? Maybe they are left over
1999 * from old code, but there is no garauntee that LLVM will use those registers
2002 * XXX: There should be no calls to lp_build_emit_fetch in this function. This
2003 * should be handled by the emit_data->fetch_args function. */
2006 const struct lp_build_tgsi_action
* action
,
2007 struct lp_build_tgsi_context
* bld_base
,
2008 struct lp_build_emit_data
* emit_data
)
2010 LLVMValueRef tmp0
, tmp1
;
2011 LLVMValueRef tmp4
= NULL
;
2012 LLVMValueRef tmp5
= NULL
;
2013 LLVMValueRef tmp6
= NULL
;
2014 LLVMValueRef tmp7
= NULL
;
2015 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2017 uint dims
= (emit_data
->inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
2019 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
) ||
2020 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
) ||
2021 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
) ||
2022 (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
) && dims
== 4)) {
2024 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
2027 /* xmm0 = src.x * src.x */
2028 tmp0
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
2029 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
)) {
2032 tmp0
= lp_build_mul( &bld
->bld_base
.base
, tmp0
, tmp0
);
2035 /* xmm0 = xmm0 + src.y * src.y */
2036 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_Y
);
2037 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
)) {
2040 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
2041 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
2044 /* xmm0 = xmm0 + src.z * src.z */
2045 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_Z
);
2046 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
)) {
2049 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
2050 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
2054 /* xmm0 = xmm0 + src.w * src.w */
2055 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
2056 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
)) {
2059 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
2060 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
2062 /* xmm1 = 1 / sqrt(xmm0) */
2063 tmp1
= lp_build_rsqrt( &bld
->bld_base
.base
, tmp0
);
2064 /* dst.x = xmm1 * src.x */
2065 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
)) {
2066 emit_data
->output
[TGSI_CHAN_X
] = lp_build_mul( &bld
->bld_base
.base
, tmp4
, tmp1
);
2068 /* dst.y = xmm1 * src.y */
2069 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
)) {
2070 emit_data
->output
[TGSI_CHAN_Y
] = lp_build_mul( &bld
->bld_base
.base
, tmp5
, tmp1
);
2073 /* dst.z = xmm1 * src.z */
2074 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
)) {
2075 emit_data
->output
[TGSI_CHAN_Z
] = lp_build_mul( &bld
->bld_base
.base
, tmp6
, tmp1
);
2077 /* dst.w = xmm1 * src.w */
2078 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
) && dims
== 4) {
2079 emit_data
->output
[TGSI_CHAN_W
] = lp_build_mul( &bld
->bld_base
.base
, tmp7
, tmp1
);
2084 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
) && dims
== 3) {
2085 emit_data
->output
[TGSI_CHAN_W
] = bld
->bld_base
.base
.one
;
2089 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
2091 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2092 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
2094 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
2095 LLVMValueRef array_size
=
2096 lp_build_const_int32(gallivm
,
2097 bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4);
2098 bld
->temps_array
= lp_build_array_alloca(gallivm
,
2099 bld_base
->base
.vec_type
, array_size
,
2103 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
2104 LLVMValueRef array_size
=
2105 lp_build_const_int32(gallivm
,
2106 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
2107 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
2108 bld_base
->base
.vec_type
, array_size
,
2112 /* If we have indirect addressing in inputs we need to copy them into
2113 * our alloca array to be able to iterate over them */
2114 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
2115 unsigned index
, chan
;
2116 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
2117 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
2118 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
2119 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
2120 vec_type
, array_size
,
2123 assert(bld_base
->info
->num_inputs
2124 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
2126 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
2127 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
2128 LLVMValueRef lindex
=
2129 lp_build_const_int32(gallivm
, index
* 4 + chan
);
2130 LLVMValueRef input_ptr
=
2131 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
2133 LLVMValueRef value
= bld
->inputs
[index
][chan
];
2135 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
2141 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
2143 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2147 emit_dump_temps(bld
);
2150 /* If we have indirect addressing in outputs we need to copy our alloca array
2151 * to the outputs slots specified by the called */
2152 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
2153 unsigned index
, chan
;
2154 assert(bld_base
->info
->num_outputs
<=
2155 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
2156 for (index
= 0; index
< bld_base
->info
->num_outputs
; ++index
) {
2157 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
2158 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
2165 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
2166 const struct tgsi_token
*tokens
,
2167 struct lp_type type
,
2168 struct lp_build_mask_context
*mask
,
2169 LLVMValueRef consts_ptr
,
2170 const struct lp_bld_tgsi_system_values
*system_values
,
2171 const LLVMValueRef
*pos
,
2172 const LLVMValueRef (*inputs
)[TGSI_NUM_CHANNELS
],
2173 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
],
2174 struct lp_build_sampler_soa
*sampler
,
2175 const struct tgsi_shader_info
*info
)
2177 struct lp_build_tgsi_soa_context bld
;
2179 struct lp_type res_type
;
2181 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
2182 memset(&res_type
, 0, sizeof res_type
);
2183 res_type
.width
= type
.width
;
2184 res_type
.length
= type
.length
;
2187 /* Setup build context */
2188 memset(&bld
, 0, sizeof bld
);
2189 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
2190 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
2191 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
2192 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
2195 bld
.inputs
= inputs
;
2196 bld
.outputs
= outputs
;
2197 bld
.consts_ptr
= consts_ptr
;
2198 bld
.sampler
= sampler
;
2199 bld
.bld_base
.info
= info
;
2200 bld
.indirect_files
= info
->indirect_files
;
2202 bld
.bld_base
.soa
= TRUE
;
2203 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
2204 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
2205 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
2206 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
2207 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
2208 bld
.bld_base
.emit_store
= emit_store
;
2210 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
2211 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
2213 bld
.bld_base
.emit_prologue
= emit_prologue
;
2214 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
2216 /* Set opcode actions */
2217 lp_set_default_actions_cpu(&bld
.bld_base
);
2219 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
2220 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
2221 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
2222 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
2223 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
2224 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
2225 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
2226 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
2227 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
2228 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
2229 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
2230 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
2231 bld
.bld_base
.op_actions
[TGSI_OPCODE_KIL
].emit
= kil_emit
;
2232 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILP
].emit
= kilp_emit
;
2233 bld
.bld_base
.op_actions
[TGSI_OPCODE_NRM
].emit
= nrm_emit
;
2234 bld
.bld_base
.op_actions
[TGSI_OPCODE_NRM4
].emit
= nrm_emit
;
2235 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
2236 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
2237 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
2238 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
2239 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
2240 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
2241 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
2242 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
2244 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.base
);
2246 bld
.system_values
= *system_values
;
2248 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
2251 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
2252 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
2253 debug_printf("11111111111111111111111111111 \n");
2254 tgsi_dump(tokens
, 0);
2255 lp_debug_dump_value(function
);
2256 debug_printf("2222222222222222222222222222 \n");
2260 LLVMModuleRef module
= LLVMGetGlobalParent(
2261 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
2262 LLVMDumpModule(module
);