1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "tgsi/tgsi_strings.h"
51 #include "lp_bld_tgsi_action.h"
52 #include "lp_bld_type.h"
53 #include "lp_bld_const.h"
54 #include "lp_bld_arit.h"
55 #include "lp_bld_bitarit.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_init.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_swizzle.h"
60 #include "lp_bld_flow.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_tgsi.h"
63 #include "lp_bld_limits.h"
64 #include "lp_bld_debug.h"
65 #include "lp_bld_printf.h"
66 #include "lp_bld_sample.h"
67 #include "lp_bld_struct.h"
69 /* SM 4.0 says that subroutines can nest 32 deep and
70 * we need one more for our main function */
71 #define LP_MAX_NUM_FUNCS 33
73 #define DUMP_GS_EMITS 0
76 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
80 * - take execution masks in consideration
81 * - debug control-flow instructions
83 #define DEBUG_EXECUTION 0
87 * Emit code to print a register value.
90 emit_dump_reg(struct gallivm_state
*gallivm
,
98 util_snprintf(buf
, sizeof buf
, " %s[%u].%c = ",
100 index
, "xyzw"[chan
]);
102 lp_build_print_value(gallivm
, buf
, value
);
106 * Return the context for the current function.
107 * (always 'main', if shader doesn't do any function calls)
109 static INLINE
struct function_ctx
*
110 func_ctx(struct lp_exec_mask
*mask
)
112 assert(mask
->function_stack_size
> 0);
113 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
114 return &mask
->function_stack
[mask
->function_stack_size
- 1];
118 * Returns true if we're in a loop.
119 * It's global, meaning that it returns true even if there's
120 * no loop inside the current function, but we were inside
121 * a loop inside another function, from which this one was called.
123 static INLINE boolean
124 mask_has_loop(struct lp_exec_mask
*mask
)
127 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
128 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
129 if (ctx
->loop_stack_size
> 0)
136 * Returns true if we're inside a switch statement.
137 * It's global, meaning that it returns true even if there's
138 * no switch in the current function, but we were inside
139 * a switch inside another function, from which this one was called.
141 static INLINE boolean
142 mask_has_switch(struct lp_exec_mask
*mask
)
145 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
146 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
147 if (ctx
->switch_stack_size
> 0)
154 * Returns true if we're inside a conditional.
155 * It's global, meaning that it returns true even if there's
156 * no conditional in the current function, but we were inside
157 * a conditional inside another function, from which this one was called.
159 static INLINE boolean
160 mask_has_cond(struct lp_exec_mask
*mask
)
163 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
164 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
165 if (ctx
->cond_stack_size
> 0)
173 * Initialize a function context at the specified index.
176 lp_exec_mask_function_init(struct lp_exec_mask
*mask
, int function_idx
)
178 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
179 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
180 struct function_ctx
*ctx
= &mask
->function_stack
[function_idx
];
182 ctx
->cond_stack_size
= 0;
183 ctx
->loop_stack_size
= 0;
184 ctx
->switch_stack_size
= 0;
186 if (function_idx
== 0) {
187 ctx
->ret_mask
= mask
->ret_mask
;
190 ctx
->loop_limiter
= lp_build_alloca(mask
->bld
->gallivm
,
191 int_type
, "looplimiter");
194 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
198 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
201 mask
->has_mask
= FALSE
;
202 mask
->ret_in_main
= FALSE
;
203 /* For the main function */
204 mask
->function_stack_size
= 1;
206 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
207 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
=
208 mask
->cond_mask
= mask
->switch_mask
=
209 LLVMConstAllOnes(mask
->int_vec_type
);
211 mask
->function_stack
= CALLOC(LP_MAX_NUM_FUNCS
,
212 sizeof(mask
->function_stack
[0]));
213 lp_exec_mask_function_init(mask
, 0);
217 lp_exec_mask_fini(struct lp_exec_mask
*mask
)
219 FREE(mask
->function_stack
);
222 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
224 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
225 boolean has_loop_mask
= mask_has_loop(mask
);
226 boolean has_cond_mask
= mask_has_cond(mask
);
227 boolean has_switch_mask
= mask_has_switch(mask
);
228 boolean has_ret_mask
= mask
->function_stack_size
> 1 ||
232 /*for loops we need to update the entire mask at runtime */
234 assert(mask
->break_mask
);
235 tmp
= LLVMBuildAnd(builder
,
239 mask
->exec_mask
= LLVMBuildAnd(builder
,
244 mask
->exec_mask
= mask
->cond_mask
;
246 if (has_switch_mask
) {
247 mask
->exec_mask
= LLVMBuildAnd(builder
,
254 mask
->exec_mask
= LLVMBuildAnd(builder
,
260 mask
->has_mask
= (has_cond_mask
||
266 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
269 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
270 struct function_ctx
*ctx
= func_ctx(mask
);
272 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
) {
273 ctx
->cond_stack_size
++;
276 if (ctx
->cond_stack_size
== 0 && mask
->function_stack_size
== 1) {
277 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
279 ctx
->cond_stack
[ctx
->cond_stack_size
++] = mask
->cond_mask
;
280 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
281 mask
->cond_mask
= LLVMBuildAnd(builder
,
285 lp_exec_mask_update(mask
);
288 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
290 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
291 struct function_ctx
*ctx
= func_ctx(mask
);
292 LLVMValueRef prev_mask
;
293 LLVMValueRef inv_mask
;
295 assert(ctx
->cond_stack_size
);
296 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
298 prev_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
- 1];
299 if (ctx
->cond_stack_size
== 1 && mask
->function_stack_size
== 1) {
300 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
303 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
305 mask
->cond_mask
= LLVMBuildAnd(builder
,
308 lp_exec_mask_update(mask
);
311 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
313 struct function_ctx
*ctx
= func_ctx(mask
);
314 assert(ctx
->cond_stack_size
);
315 --ctx
->cond_stack_size
;
316 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
318 mask
->cond_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
];
319 lp_exec_mask_update(mask
);
322 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
324 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
325 struct function_ctx
*ctx
= func_ctx(mask
);
327 if (ctx
->loop_stack_size
>= LP_MAX_TGSI_NESTING
) {
328 ++ctx
->loop_stack_size
;
332 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
334 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_LOOP
;
336 ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
= ctx
->loop_block
;
337 ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
338 ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
= mask
->break_mask
;
339 ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
= ctx
->break_var
;
340 ++ctx
->loop_stack_size
;
342 ctx
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
343 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
345 ctx
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
347 LLVMBuildBr(builder
, ctx
->loop_block
);
348 LLVMPositionBuilderAtEnd(builder
, ctx
->loop_block
);
350 mask
->break_mask
= LLVMBuildLoad(builder
, ctx
->break_var
, "");
352 lp_exec_mask_update(mask
);
355 static void lp_exec_break(struct lp_exec_mask
*mask
,
356 struct lp_build_tgsi_context
* bld_base
)
358 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
359 struct function_ctx
*ctx
= func_ctx(mask
);
361 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
362 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
366 mask
->break_mask
= LLVMBuildAnd(builder
,
368 exec_mask
, "break_full");
371 unsigned opcode
= bld_base
->instructions
[bld_base
->pc
+ 1].Instruction
.Opcode
;
372 boolean break_always
= (opcode
== TGSI_OPCODE_ENDSWITCH
||
373 opcode
== TGSI_OPCODE_CASE
);
376 if (ctx
->switch_in_default
) {
378 * stop default execution but only if this is an unconditional switch.
379 * (The condition here is not perfect since dead code after break is
380 * allowed but should be sufficient since false negatives are just
381 * unoptimized - so we don't have to pre-evaluate that).
383 if(break_always
&& ctx
->switch_pc
) {
384 bld_base
->pc
= ctx
->switch_pc
;
390 mask
->switch_mask
= LLVMConstNull(mask
->bld
->int_vec_type
);
393 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
396 mask
->switch_mask
= LLVMBuildAnd(builder
,
398 exec_mask
, "break_switch");
402 lp_exec_mask_update(mask
);
405 static void lp_exec_break_condition(struct lp_exec_mask
*mask
,
408 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
409 struct function_ctx
*ctx
= func_ctx(mask
);
410 LLVMValueRef cond_mask
= LLVMBuildAnd(builder
,
413 cond_mask
= LLVMBuildNot(builder
, cond_mask
, "break_cond");
415 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
416 mask
->break_mask
= LLVMBuildAnd(builder
,
418 cond_mask
, "breakc_full");
421 mask
->switch_mask
= LLVMBuildAnd(builder
,
423 cond_mask
, "breakc_switch");
426 lp_exec_mask_update(mask
);
429 static void lp_exec_continue(struct lp_exec_mask
*mask
)
431 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
432 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
436 mask
->cont_mask
= LLVMBuildAnd(builder
,
440 lp_exec_mask_update(mask
);
444 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
445 struct lp_exec_mask
*mask
)
447 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
448 struct function_ctx
*ctx
= func_ctx(mask
);
449 LLVMBasicBlockRef endloop
;
450 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
451 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
452 mask
->bld
->type
.width
*
453 mask
->bld
->type
.length
);
454 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
456 assert(mask
->break_mask
);
459 assert(ctx
->loop_stack_size
);
460 if (ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
461 --ctx
->loop_stack_size
;
466 * Restore the cont_mask, but don't pop
468 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
- 1].cont_mask
;
469 lp_exec_mask_update(mask
);
472 * Unlike the continue mask, the break_mask must be preserved across loop
475 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
477 /* Decrement the loop limiter */
478 limiter
= LLVMBuildLoad(builder
, ctx
->loop_limiter
, "");
480 limiter
= LLVMBuildSub(
483 LLVMConstInt(int_type
, 1, false),
486 LLVMBuildStore(builder
, limiter
, ctx
->loop_limiter
);
488 /* i1cond = (mask != 0) */
489 i1cond
= LLVMBuildICmp(
492 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
493 LLVMConstNull(reg_type
), "i1cond");
495 /* i2cond = (looplimiter > 0) */
496 i2cond
= LLVMBuildICmp(
500 LLVMConstNull(int_type
), "i2cond");
502 /* if( i1cond && i2cond ) */
503 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
505 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
507 LLVMBuildCondBr(builder
,
508 icond
, ctx
->loop_block
, endloop
);
510 LLVMPositionBuilderAtEnd(builder
, endloop
);
512 assert(ctx
->loop_stack_size
);
513 --ctx
->loop_stack_size
;
514 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
;
515 mask
->break_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
;
516 ctx
->loop_block
= ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
;
517 ctx
->break_var
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
;
518 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+
519 ctx
->switch_stack_size
];
521 lp_exec_mask_update(mask
);
524 static void lp_exec_switch(struct lp_exec_mask
*mask
,
525 LLVMValueRef switchval
)
527 struct function_ctx
*ctx
= func_ctx(mask
);
529 if (ctx
->switch_stack_size
>= LP_MAX_TGSI_NESTING
||
530 ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
531 ctx
->switch_stack_size
++;
535 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
537 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_SWITCH
;
539 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
= mask
->switch_mask
;
540 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
= ctx
->switch_val
;
541 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
= ctx
->switch_mask_default
;
542 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
= ctx
->switch_in_default
;
543 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
= ctx
->switch_pc
;
544 ctx
->switch_stack_size
++;
546 mask
->switch_mask
= LLVMConstNull(mask
->int_vec_type
);
547 ctx
->switch_val
= switchval
;
548 ctx
->switch_mask_default
= LLVMConstNull(mask
->int_vec_type
);
549 ctx
->switch_in_default
= false;
552 lp_exec_mask_update(mask
);
555 static void lp_exec_endswitch(struct lp_exec_mask
*mask
,
556 struct lp_build_tgsi_context
* bld_base
)
558 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
559 struct function_ctx
*ctx
= func_ctx(mask
);
561 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
562 ctx
->switch_stack_size
--;
566 /* check if there's deferred default if so do it now */
567 if (ctx
->switch_pc
&& !ctx
->switch_in_default
) {
568 LLVMValueRef prevmask
, defaultmask
;
570 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
571 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
572 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
573 ctx
->switch_in_default
= true;
575 lp_exec_mask_update(mask
);
577 assert(bld_base
->instructions
[ctx
->switch_pc
- 1].Instruction
.Opcode
==
578 TGSI_OPCODE_DEFAULT
);
580 tmp_pc
= bld_base
->pc
;
581 bld_base
->pc
= ctx
->switch_pc
;
583 * re-purpose switch_pc to point to here again, since we stop execution of
584 * the deferred default after next break.
586 ctx
->switch_pc
= tmp_pc
- 1;
591 else if (ctx
->switch_pc
&& ctx
->switch_in_default
) {
592 assert(bld_base
->pc
== ctx
->switch_pc
+ 1);
595 ctx
->switch_stack_size
--;
596 mask
->switch_mask
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
;
597 ctx
->switch_val
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
;
598 ctx
->switch_mask_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
;
599 ctx
->switch_in_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
;
600 ctx
->switch_pc
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
;
602 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
];
604 lp_exec_mask_update(mask
);
607 static void lp_exec_case(struct lp_exec_mask
*mask
,
608 LLVMValueRef caseval
)
610 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
611 struct function_ctx
*ctx
= func_ctx(mask
);
613 LLVMValueRef casemask
, prevmask
;
615 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
619 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
620 if (!ctx
->switch_in_default
) {
621 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
622 casemask
= lp_build_cmp(mask
->bld
, PIPE_FUNC_EQUAL
, caseval
, ctx
->switch_val
);
623 ctx
->switch_mask_default
= LLVMBuildOr(builder
, casemask
,
624 ctx
->switch_mask_default
, "sw_default_mask");
625 casemask
= LLVMBuildOr(builder
, casemask
, mask
->switch_mask
, "");
626 mask
->switch_mask
= LLVMBuildAnd(builder
, casemask
, prevmask
, "sw_mask");
628 lp_exec_mask_update(mask
);
633 * Analyse default statement in a switch.
634 * \return true if default is last statement, false otherwise
635 * \param default_pc_start contains pc of instruction to jump to
636 * if default wasn't last but there's no
637 * fallthrough into default.
639 static boolean
default_analyse_is_last(struct lp_exec_mask
*mask
,
640 struct lp_build_tgsi_context
* bld_base
,
641 int *default_pc_start
)
643 unsigned pc
= bld_base
->pc
;
644 struct function_ctx
*ctx
= func_ctx(mask
);
645 unsigned curr_switch_stack
= ctx
->switch_stack_size
;
647 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
651 /* skip over case statements which are together with default */
652 while (bld_base
->instructions
[pc
].Instruction
.Opcode
== TGSI_OPCODE_CASE
) {
656 while (pc
!= -1 && pc
< bld_base
->num_instructions
) {
657 unsigned opcode
= bld_base
->instructions
[pc
].Instruction
.Opcode
;
659 case TGSI_OPCODE_CASE
:
660 if (curr_switch_stack
== ctx
->switch_stack_size
) {
661 *default_pc_start
= pc
- 1;
665 case TGSI_OPCODE_SWITCH
:
668 case TGSI_OPCODE_ENDSWITCH
:
669 if (curr_switch_stack
== ctx
->switch_stack_size
) {
670 *default_pc_start
= pc
- 1;
678 /* should never arrive here */
683 static void lp_exec_default(struct lp_exec_mask
*mask
,
684 struct lp_build_tgsi_context
* bld_base
)
686 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
687 struct function_ctx
*ctx
= func_ctx(mask
);
690 boolean default_is_last
;
692 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
697 * This is a messy opcode, because it may not be always at the end and
698 * there can be fallthrough in and out of it.
701 default_is_last
= default_analyse_is_last(mask
, bld_base
, &default_exec_pc
);
703 * If it is last statement in switch (note that case statements appearing
704 * "at the same time" as default don't change that) everything is just fine,
705 * update switch mask and go on. This means we can handle default with
706 * fallthrough INTO it without overhead, if it is last.
708 if (default_is_last
) {
709 LLVMValueRef prevmask
, defaultmask
;
710 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
711 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
712 defaultmask
= LLVMBuildOr(builder
, defaultmask
, mask
->switch_mask
, "");
713 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
714 ctx
->switch_in_default
= true;
716 lp_exec_mask_update(mask
);
720 * Technically, "case" immediately before default isn't really a
721 * fallthrough, however we still have to count them as such as we
722 * already have updated the masks.
723 * If that happens in practice could add a switch optimizer pass
724 * which just gets rid of all case statements appearing together with
725 * default (or could do switch analysis at switch start time instead).
727 unsigned opcode
= bld_base
->instructions
[bld_base
->pc
- 1].Instruction
.Opcode
;
728 boolean ft_into
= (opcode
!= TGSI_OPCODE_BRK
&&
729 opcode
!= TGSI_OPCODE_SWITCH
);
731 * If it is not last statement and there was no fallthrough into it,
732 * we record the PC and continue execution at next case (again, those
733 * case encountered at the same time don't count). At endswitch
734 * time, we update switchmask, and go back executing the code we skipped
735 * until the next break (possibly re-executing some code with changed mask
736 * if there was a fallthrough out of default).
737 * Finally, if it is not last statement and there was a fallthrough into it,
738 * do the same as with the former case, except instead of skipping the code
739 * just execute it without updating the mask, then go back and re-execute.
741 ctx
->switch_pc
= bld_base
->pc
;
743 bld_base
->pc
= default_exec_pc
;
749 /* stores val into an address pointed to by dst_ptr.
750 * mask->exec_mask is used to figure out which bits of val
751 * should be stored into the address
752 * (0 means don't store this bit, 1 means do store).
754 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
755 struct lp_build_context
*bld_store
,
758 LLVMValueRef dst_ptr
)
760 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
762 assert(lp_check_value(bld_store
->type
, val
));
763 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr
)) == LLVMPointerTypeKind
);
764 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr
)) == LLVMTypeOf(val
));
766 /* Mix the predicate and execution mask */
767 if (mask
->has_mask
) {
769 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
771 pred
= mask
->exec_mask
;
776 LLVMValueRef res
, dst
;
778 dst
= LLVMBuildLoad(builder
, dst_ptr
, "");
779 res
= lp_build_select(bld_store
, pred
, val
, dst
);
780 LLVMBuildStore(builder
, res
, dst_ptr
);
782 LLVMBuildStore(builder
, val
, dst_ptr
);
785 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
789 if (mask
->function_stack_size
>= LP_MAX_NUM_FUNCS
) {
793 lp_exec_mask_function_init(mask
, mask
->function_stack_size
);
794 mask
->function_stack
[mask
->function_stack_size
].pc
= *pc
;
795 mask
->function_stack
[mask
->function_stack_size
].ret_mask
= mask
->ret_mask
;
796 mask
->function_stack_size
++;
800 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
802 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
803 struct function_ctx
*ctx
= func_ctx(mask
);
804 LLVMValueRef exec_mask
;
806 if (ctx
->cond_stack_size
== 0 &&
807 ctx
->loop_stack_size
== 0 &&
808 ctx
->switch_stack_size
== 0 &&
809 mask
->function_stack_size
== 1) {
810 /* returning from main() */
815 if (mask
->function_stack_size
== 1) {
817 * This requires special handling since we need to ensure
818 * we don't drop the mask even if we have no call stack
819 * (e.g. after a ret in a if clause after the endif)
821 mask
->ret_in_main
= TRUE
;
824 exec_mask
= LLVMBuildNot(builder
,
828 mask
->ret_mask
= LLVMBuildAnd(builder
,
830 exec_mask
, "ret_full");
832 lp_exec_mask_update(mask
);
835 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
839 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
841 struct function_ctx
*ctx
;
843 assert(mask
->function_stack_size
> 1);
844 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
846 ctx
= func_ctx(mask
);
847 mask
->function_stack_size
--;
850 mask
->ret_mask
= ctx
->ret_mask
;
852 lp_exec_mask_update(mask
);
857 get_file_ptr(struct lp_build_tgsi_soa_context
*bld
,
862 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
863 LLVMValueRef (*array_of_vars
)[TGSI_NUM_CHANNELS
];
864 LLVMValueRef var_of_array
;
867 case TGSI_FILE_TEMPORARY
:
868 array_of_vars
= bld
->temps
;
869 var_of_array
= bld
->temps_array
;
871 case TGSI_FILE_OUTPUT
:
872 array_of_vars
= bld
->outputs
;
873 var_of_array
= bld
->outputs_array
;
882 if (bld
->indirect_files
& (1 << file
)) {
883 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
884 return LLVMBuildGEP(builder
, var_of_array
, &lindex
, 1, "");
887 assert(index
<= bld
->bld_base
.info
->file_max
[file
]);
888 return array_of_vars
[index
][chan
];
894 * Return pointer to a temporary register channel (src or dest).
895 * Note that indirect addressing cannot be handled here.
896 * \param index which temporary register
897 * \param chan which channel of the temp register.
900 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
904 return get_file_ptr(bld
, TGSI_FILE_TEMPORARY
, index
, chan
);
908 * Return pointer to a output register channel (src or dest).
909 * Note that indirect addressing cannot be handled here.
910 * \param index which output register
911 * \param chan which channel of the output register.
914 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
918 return get_file_ptr(bld
, TGSI_FILE_OUTPUT
, index
, chan
);
922 * If we have indirect addressing in outputs copy our alloca array
923 * to the outputs slots specified by the caller to make sure
924 * our outputs are delivered consistently via the same interface.
927 gather_outputs(struct lp_build_tgsi_soa_context
* bld
)
929 if ((bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
930 unsigned index
, chan
;
931 assert(bld
->bld_base
.info
->num_outputs
<=
932 bld
->bld_base
.info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
933 for (index
= 0; index
< bld
->bld_base
.info
->num_outputs
; ++index
) {
934 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
935 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
943 * XXX the lp_build_gather() function should be capable of doing this
944 * with a little work.
947 build_gather(struct lp_build_context
*bld
,
948 LLVMValueRef base_ptr
,
949 LLVMValueRef indexes
,
950 LLVMValueRef
*overflow_mask
)
952 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
953 LLVMValueRef res
= bld
->undef
;
955 LLVMValueRef temp_ptr
;
958 temp_ptr
= lp_build_alloca(
960 lp_build_vec_type(bld
->gallivm
, bld
->type
), "");
964 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
966 for (i
= 0; i
< bld
->type
.length
; i
++) {
967 LLVMValueRef ii
= lp_build_const_int32(bld
->gallivm
, i
);
968 LLVMValueRef index
= LLVMBuildExtractElement(builder
,
970 LLVMValueRef scalar_ptr
, scalar
;
971 LLVMValueRef overflow
;
972 struct lp_build_if_state if_ctx
;
975 * overflow_mask is a boolean vector telling us which channels
976 * in the vector overflowed. We use the overflow behavior for
977 * constant buffers which is defined as:
978 * Out of bounds access to constant buffer returns 0 in all
979 * componenets. Out of bounds behavior is always with respect
980 * to the size of the buffer bound at that slot.
983 overflow
= LLVMBuildExtractElement(builder
, *overflow_mask
,
985 lp_build_if(&if_ctx
, bld
->gallivm
, overflow
);
987 LLVMValueRef val
= LLVMBuildLoad(builder
, temp_ptr
, "");
988 val
= LLVMBuildInsertElement(
990 LLVMConstNull(LLVMFloatTypeInContext(bld
->gallivm
->context
)),
992 LLVMBuildStore(builder
, val
, temp_ptr
);
994 lp_build_else(&if_ctx
);
996 LLVMValueRef val
= LLVMBuildLoad(builder
, temp_ptr
, "");
998 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
999 &index
, 1, "gather_ptr");
1000 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1002 val
= LLVMBuildInsertElement(builder
, val
, scalar
, ii
, "");
1004 LLVMBuildStore(builder
, val
, temp_ptr
);
1006 lp_build_endif(&if_ctx
);
1008 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
1009 &index
, 1, "gather_ptr");
1010 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1012 res
= LLVMBuildInsertElement(builder
, res
, scalar
, ii
, "");
1016 if (overflow_mask
) {
1017 res
= LLVMBuildLoad(builder
, temp_ptr
, "gather_val");
1025 * Scatter/store vector.
1028 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
1029 LLVMValueRef base_ptr
,
1030 LLVMValueRef indexes
,
1031 LLVMValueRef values
,
1032 struct lp_exec_mask
*mask
,
1035 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1036 LLVMBuilderRef builder
= gallivm
->builder
;
1039 /* Mix the predicate and execution mask */
1040 if (mask
->has_mask
) {
1042 pred
= LLVMBuildAnd(builder
, pred
, mask
->exec_mask
, "");
1045 pred
= mask
->exec_mask
;
1050 * Loop over elements of index_vec, store scalar value.
1052 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1053 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1054 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
1055 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
1056 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
1057 LLVMValueRef scalar_pred
= pred
?
1058 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
1061 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
1062 ii
, val
, index
, scalar_ptr
);
1065 LLVMValueRef real_val
, dst_val
;
1066 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1067 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
1068 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
1071 LLVMBuildStore(builder
, val
, scalar_ptr
);
1078 * Read the current value of the ADDR register, convert the floats to
1079 * ints, add the base index and return the vector of offsets.
1080 * The offsets will be used to index into the constant buffer or
1081 * temporary register file.
1084 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
1085 unsigned reg_file
, unsigned reg_index
,
1086 const struct tgsi_ind_register
*indirect_reg
)
1088 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1089 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
1090 /* always use X component of address register */
1091 unsigned swizzle
= indirect_reg
->Swizzle
;
1094 LLVMValueRef max_index
;
1097 assert(bld
->indirect_files
& (1 << reg_file
));
1099 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
1101 assert(swizzle
< 4);
1102 switch (indirect_reg
->File
) {
1103 case TGSI_FILE_ADDRESS
:
1104 rel
= LLVMBuildLoad(builder
,
1105 bld
->addr
[indirect_reg
->Index
][swizzle
],
1107 /* ADDR LLVM values already have LLVM integer type. */
1109 case TGSI_FILE_TEMPORARY
:
1110 rel
= lp_get_temp_ptr_soa(bld
, indirect_reg
->Index
, swizzle
);
1111 rel
= LLVMBuildLoad(builder
, rel
, "load temp reg");
1112 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1113 * value actually stored is expected to be an integer */
1114 rel
= LLVMBuildBitCast(builder
, rel
, uint_bld
->vec_type
, "");
1118 rel
= uint_bld
->zero
;
1121 index
= lp_build_add(uint_bld
, base
, rel
);
1124 * emit_fetch_constant handles constant buffer overflow so this code
1125 * is pointless for them.
1126 * Furthermore the D3D10 spec in section 6.5 says:
1127 * If the constant buffer bound to a slot is larger than the size
1128 * declared in the shader for that slot, implementations are allowed
1129 * to return incorrect data (not necessarily 0) for indices that are
1130 * larger than the declared size but smaller than the buffer size.
1132 if (reg_file
!= TGSI_FILE_CONSTANT
) {
1133 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
1135 bld
->bld_base
.info
->file_max
[reg_file
]);
1137 assert(!uint_bld
->type
.sign
);
1138 index
= lp_build_min(uint_bld
, index
, max_index
);
1144 static struct lp_build_context
*
1145 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
1146 enum tgsi_opcode_type stype
)
1148 struct lp_build_context
*bld_fetch
;
1151 case TGSI_TYPE_FLOAT
:
1152 case TGSI_TYPE_UNTYPED
:
1153 bld_fetch
= &bld_base
->base
;
1155 case TGSI_TYPE_UNSIGNED
:
1156 bld_fetch
= &bld_base
->uint_bld
;
1158 case TGSI_TYPE_SIGNED
:
1159 bld_fetch
= &bld_base
->int_bld
;
1161 case TGSI_TYPE_VOID
:
1162 case TGSI_TYPE_DOUBLE
:
1172 get_soa_array_offsets(struct lp_build_context
*uint_bld
,
1173 LLVMValueRef indirect_index
,
1174 unsigned chan_index
,
1175 boolean need_perelement_offset
)
1177 struct gallivm_state
*gallivm
= uint_bld
->gallivm
;
1178 LLVMValueRef chan_vec
=
1179 lp_build_const_int_vec(uint_bld
->gallivm
, uint_bld
->type
, chan_index
);
1180 LLVMValueRef length_vec
=
1181 lp_build_const_int_vec(gallivm
, uint_bld
->type
, uint_bld
->type
.length
);
1182 LLVMValueRef index_vec
;
1184 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1185 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1186 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1187 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1189 if (need_perelement_offset
) {
1190 LLVMValueRef pixel_offsets
;
1192 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1193 pixel_offsets
= uint_bld
->undef
;
1194 for (i
= 0; i
< uint_bld
->type
.length
; i
++) {
1195 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1196 pixel_offsets
= LLVMBuildInsertElement(gallivm
->builder
, pixel_offsets
,
1199 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1205 emit_fetch_constant(
1206 struct lp_build_tgsi_context
* bld_base
,
1207 const struct tgsi_full_src_register
* reg
,
1208 enum tgsi_opcode_type stype
,
1211 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1212 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1213 LLVMBuilderRef builder
= gallivm
->builder
;
1214 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1215 unsigned dimension
= 0;
1216 LLVMValueRef consts_ptr
;
1217 LLVMValueRef num_consts
;
1220 /* XXX: Handle fetching xyzw components as a vector */
1221 assert(swizzle
!= ~0);
1223 if (reg
->Register
.Dimension
) {
1224 assert(!reg
->Dimension
.Indirect
);
1225 dimension
= reg
->Dimension
.Index
;
1226 assert(dimension
< LP_MAX_TGSI_CONST_BUFFERS
);
1229 consts_ptr
= bld
->consts
[dimension
];
1230 num_consts
= bld
->consts_sizes
[dimension
];
1232 if (reg
->Register
.Indirect
) {
1233 LLVMValueRef indirect_index
;
1234 LLVMValueRef swizzle_vec
=
1235 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
1236 LLVMValueRef index_vec
; /* index into the const buffer */
1237 LLVMValueRef overflow_mask
;
1239 indirect_index
= get_indirect_index(bld
,
1241 reg
->Register
.Index
,
1244 /* All fetches are from the same constant buffer, so
1245 * we need to propagate the size to a vector to do a
1246 * vector comparison */
1247 num_consts
= lp_build_broadcast_scalar(uint_bld
, num_consts
);
1248 /* Construct a boolean vector telling us which channels
1249 * overflow the bound constant buffer */
1250 overflow_mask
= LLVMBuildICmp(builder
, LLVMIntUGE
,
1254 /* index_vec = indirect_index * 4 + swizzle */
1255 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1256 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
1258 /* Gather values from the constant buffer */
1259 res
= build_gather(&bld_base
->base
, consts_ptr
, index_vec
,
1263 LLVMValueRef index
; /* index into the const buffer */
1264 LLVMValueRef scalar
, scalar_ptr
;
1266 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1268 scalar_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1270 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1271 res
= lp_build_broadcast_scalar(&bld_base
->base
, scalar
);
1274 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
) {
1275 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1276 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1283 emit_fetch_immediate(
1284 struct lp_build_tgsi_context
* bld_base
,
1285 const struct tgsi_full_src_register
* reg
,
1286 enum tgsi_opcode_type stype
,
1289 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1290 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1291 LLVMBuilderRef builder
= gallivm
->builder
;
1292 LLVMValueRef res
= NULL
;
1294 if (bld
->use_immediates_array
|| reg
->Register
.Indirect
) {
1295 LLVMValueRef imms_array
;
1296 LLVMTypeRef fptr_type
;
1298 /* cast imms_array pointer to float* */
1299 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1300 imms_array
= LLVMBuildBitCast(builder
, bld
->imms_array
, fptr_type
, "");
1302 if (reg
->Register
.Indirect
) {
1303 LLVMValueRef indirect_index
;
1304 LLVMValueRef index_vec
; /* index into the immediate register array */
1306 indirect_index
= get_indirect_index(bld
,
1308 reg
->Register
.Index
,
1311 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1312 * immediates are stored as full vectors (FIXME??? - might be better
1313 * to store them the same as constants) but all elements are the same
1316 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1321 /* Gather values from the immediate register array */
1322 res
= build_gather(&bld_base
->base
, imms_array
, index_vec
, NULL
);
1324 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1325 reg
->Register
.Index
* 4 + swizzle
);
1326 LLVMValueRef imms_ptr
= LLVMBuildGEP(builder
,
1327 bld
->imms_array
, &lindex
, 1, "");
1328 res
= LLVMBuildLoad(builder
, imms_ptr
, "");
1332 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
1335 if (stype
== TGSI_TYPE_UNSIGNED
) {
1336 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1337 } else if (stype
== TGSI_TYPE_SIGNED
) {
1338 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1345 struct lp_build_tgsi_context
* bld_base
,
1346 const struct tgsi_full_src_register
* reg
,
1347 enum tgsi_opcode_type stype
,
1350 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1351 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1352 LLVMBuilderRef builder
= gallivm
->builder
;
1355 if (reg
->Register
.Indirect
) {
1356 LLVMValueRef indirect_index
;
1357 LLVMValueRef index_vec
; /* index into the input reg array */
1358 LLVMValueRef inputs_array
;
1359 LLVMTypeRef fptr_type
;
1361 indirect_index
= get_indirect_index(bld
,
1363 reg
->Register
.Index
,
1366 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1371 /* cast inputs_array pointer to float* */
1372 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1373 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
, fptr_type
, "");
1375 /* Gather values from the input register array */
1376 res
= build_gather(&bld_base
->base
, inputs_array
, index_vec
, NULL
);
1378 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
1379 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1380 reg
->Register
.Index
* 4 + swizzle
);
1381 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
1382 bld
->inputs_array
, &lindex
, 1, "");
1383 res
= LLVMBuildLoad(builder
, input_ptr
, "");
1386 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
1392 if (stype
== TGSI_TYPE_UNSIGNED
) {
1393 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1394 } else if (stype
== TGSI_TYPE_SIGNED
) {
1395 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1403 emit_fetch_gs_input(
1404 struct lp_build_tgsi_context
* bld_base
,
1405 const struct tgsi_full_src_register
* reg
,
1406 enum tgsi_opcode_type stype
,
1409 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1410 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1411 LLVMBuilderRef builder
= gallivm
->builder
;
1412 LLVMValueRef attrib_index
= NULL
;
1413 LLVMValueRef vertex_index
= NULL
;
1414 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
);
1417 if (reg
->Register
.Indirect
) {
1418 attrib_index
= get_indirect_index(bld
,
1420 reg
->Register
.Index
,
1423 attrib_index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
);
1426 if (reg
->Dimension
.Indirect
) {
1427 vertex_index
= get_indirect_index(bld
,
1429 reg
->Dimension
.Index
,
1432 vertex_index
= lp_build_const_int32(gallivm
, reg
->Dimension
.Index
);
1435 res
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1436 reg
->Dimension
.Indirect
,
1438 reg
->Register
.Indirect
,
1444 if (stype
== TGSI_TYPE_UNSIGNED
) {
1445 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1446 } else if (stype
== TGSI_TYPE_SIGNED
) {
1447 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1454 emit_fetch_temporary(
1455 struct lp_build_tgsi_context
* bld_base
,
1456 const struct tgsi_full_src_register
* reg
,
1457 enum tgsi_opcode_type stype
,
1460 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1461 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1462 LLVMBuilderRef builder
= gallivm
->builder
;
1465 if (reg
->Register
.Indirect
) {
1466 LLVMValueRef indirect_index
;
1467 LLVMValueRef index_vec
; /* index into the temp reg array */
1468 LLVMValueRef temps_array
;
1469 LLVMTypeRef fptr_type
;
1471 indirect_index
= get_indirect_index(bld
,
1473 reg
->Register
.Index
,
1476 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1481 /* cast temps_array pointer to float* */
1482 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1483 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1485 /* Gather values from the temporary register array */
1486 res
= build_gather(&bld_base
->base
, temps_array
, index_vec
, NULL
);
1489 LLVMValueRef temp_ptr
;
1490 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
1491 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1494 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
) {
1495 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1496 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1503 emit_fetch_system_value(
1504 struct lp_build_tgsi_context
* bld_base
,
1505 const struct tgsi_full_src_register
* reg
,
1506 enum tgsi_opcode_type stype
,
1509 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1510 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1511 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1512 LLVMBuilderRef builder
= gallivm
->builder
;
1514 enum tgsi_opcode_type atype
; // Actual type of the value
1516 assert(!reg
->Register
.Indirect
);
1518 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
1519 case TGSI_SEMANTIC_INSTANCEID
:
1520 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
1521 atype
= TGSI_TYPE_UNSIGNED
;
1524 case TGSI_SEMANTIC_VERTEXID
:
1525 res
= bld
->system_values
.vertex_id
;
1526 atype
= TGSI_TYPE_UNSIGNED
;
1529 case TGSI_SEMANTIC_PRIMID
:
1530 res
= bld
->system_values
.prim_id
;
1531 atype
= TGSI_TYPE_UNSIGNED
;
1535 assert(!"unexpected semantic in emit_fetch_system_value");
1536 res
= bld_base
->base
.zero
;
1537 atype
= TGSI_TYPE_FLOAT
;
1541 if (atype
!= stype
) {
1542 if (stype
== TGSI_TYPE_FLOAT
) {
1543 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1544 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1545 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1546 } else if (stype
== TGSI_TYPE_SIGNED
) {
1547 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1555 * Register fetch with derivatives.
1559 struct lp_build_tgsi_soa_context
*bld
,
1568 /* TODO: use interpolation coeffs for inputs */
1571 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
1574 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
1582 emit_fetch_predicate(
1583 struct lp_build_tgsi_soa_context
*bld
,
1584 const struct tgsi_full_instruction
*inst
,
1587 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1589 unsigned char swizzles
[4];
1590 LLVMValueRef unswizzled
[4] = {NULL
, NULL
, NULL
, NULL
};
1594 if (!inst
->Instruction
.Predicate
) {
1595 TGSI_FOR_EACH_CHANNEL( chan
) {
1601 swizzles
[0] = inst
->Predicate
.SwizzleX
;
1602 swizzles
[1] = inst
->Predicate
.SwizzleY
;
1603 swizzles
[2] = inst
->Predicate
.SwizzleZ
;
1604 swizzles
[3] = inst
->Predicate
.SwizzleW
;
1606 index
= inst
->Predicate
.Index
;
1607 assert(index
< LP_MAX_TGSI_PREDS
);
1609 TGSI_FOR_EACH_CHANNEL( chan
) {
1610 unsigned swizzle
= swizzles
[chan
];
1613 * Only fetch the predicate register channels that are actually listed
1616 if (!unswizzled
[swizzle
]) {
1617 value
= LLVMBuildLoad(builder
,
1618 bld
->preds
[index
][swizzle
], "");
1621 * Convert the value to an integer mask.
1623 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
1624 * is needlessly causing two comparisons due to storing the intermediate
1625 * result as float vector instead of an integer mask vector.
1627 value
= lp_build_compare(bld
->bld_base
.base
.gallivm
,
1628 bld
->bld_base
.base
.type
,
1631 bld
->bld_base
.base
.zero
);
1632 if (inst
->Predicate
.Negate
) {
1633 value
= LLVMBuildNot(builder
, value
, "");
1636 unswizzled
[swizzle
] = value
;
1638 value
= unswizzled
[swizzle
];
1651 struct lp_build_tgsi_context
*bld_base
,
1652 const struct tgsi_full_instruction
*inst
,
1654 unsigned chan_index
,
1658 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1659 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1660 LLVMBuilderRef builder
= gallivm
->builder
;
1661 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
1662 struct lp_build_context
*float_bld
= &bld_base
->base
;
1663 struct lp_build_context
*int_bld
= &bld_base
->int_bld
;
1664 LLVMValueRef indirect_index
= NULL
;
1665 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
);
1670 * It is always assumed to be float.
1672 switch( inst
->Instruction
.Saturate
) {
1676 case TGSI_SAT_ZERO_ONE
:
1677 assert(dtype
== TGSI_TYPE_FLOAT
||
1678 dtype
== TGSI_TYPE_UNTYPED
);
1679 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1680 value
= lp_build_clamp_zero_one_nanzero(float_bld
, value
);
1683 case TGSI_SAT_MINUS_PLUS_ONE
:
1684 assert(dtype
== TGSI_TYPE_FLOAT
||
1685 dtype
== TGSI_TYPE_UNTYPED
);
1686 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1687 /* This will give -1.0 for NaN which is probably not what we want. */
1688 value
= lp_build_max_ext(float_bld
, value
,
1689 lp_build_const_vec(gallivm
, float_bld
->type
, -1.0),
1690 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN
);
1691 value
= lp_build_min(float_bld
, value
, float_bld
->one
);
1698 if (reg
->Register
.Indirect
) {
1699 indirect_index
= get_indirect_index(bld
,
1701 reg
->Register
.Index
,
1704 assert(reg
->Register
.Index
<=
1705 bld_base
->info
->file_max
[reg
->Register
.File
]);
1708 if (DEBUG_EXECUTION
) {
1709 emit_dump_reg(gallivm
, reg
->Register
.File
, reg
->Register
.Index
, chan_index
, value
);
1712 switch( reg
->Register
.File
) {
1713 case TGSI_FILE_OUTPUT
:
1714 /* Outputs are always stored as floats */
1715 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1717 if (reg
->Register
.Indirect
) {
1718 LLVMValueRef index_vec
; /* indexes into the output registers */
1719 LLVMValueRef outputs_array
;
1720 LLVMTypeRef fptr_type
;
1722 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1727 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1728 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
, fptr_type
, "");
1730 /* Scatter store values into output registers */
1731 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1732 &bld
->exec_mask
, pred
);
1735 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1737 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, pred
, value
, out_ptr
);
1741 case TGSI_FILE_TEMPORARY
:
1742 /* Temporaries are always stored as floats */
1743 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1745 if (reg
->Register
.Indirect
) {
1746 LLVMValueRef index_vec
; /* indexes into the temp registers */
1747 LLVMValueRef temps_array
;
1748 LLVMTypeRef fptr_type
;
1750 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1755 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1756 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1758 /* Scatter store values into temp registers */
1759 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1760 &bld
->exec_mask
, pred
);
1763 LLVMValueRef temp_ptr
;
1764 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, chan_index
);
1765 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, pred
, value
, temp_ptr
);
1769 case TGSI_FILE_ADDRESS
:
1770 assert(dtype
== TGSI_TYPE_SIGNED
);
1771 assert(LLVMTypeOf(value
) == int_bld
->vec_type
);
1772 value
= LLVMBuildBitCast(builder
, value
, int_bld
->vec_type
, "");
1773 lp_exec_mask_store(&bld
->exec_mask
, int_bld
, pred
, value
,
1774 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1777 case TGSI_FILE_PREDICATE
:
1778 assert(LLVMTypeOf(value
) == float_bld
->vec_type
);
1779 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1780 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, pred
, value
,
1781 bld
->preds
[reg
->Register
.Index
][chan_index
]);
1792 * Called at the beginning of the translation of each TGSI instruction, to
1793 * emit some debug code.
1797 struct lp_build_tgsi_context
* bld_base
,
1798 const struct tgsi_full_instruction
* inst
,
1799 const struct tgsi_opcode_info
* info
)
1802 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1804 if (DEBUG_EXECUTION
) {
1806 * Dump the TGSI instruction.
1809 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1813 tgsi_dump_instruction_str(inst
, bld_base
->pc
, &buf
[2], sizeof buf
- 2);
1814 lp_build_printf(gallivm
, buf
);
1816 /* Dump the execution mask.
1818 if (bld
->exec_mask
.has_mask
) {
1819 lp_build_print_value(gallivm
, " mask = ", bld
->exec_mask
.exec_mask
);
1826 struct lp_build_tgsi_context
* bld_base
,
1827 const struct tgsi_full_instruction
* inst
,
1828 const struct tgsi_opcode_info
* info
,
1829 LLVMValueRef dst
[4])
1832 unsigned chan_index
;
1833 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1836 LLVMValueRef pred
[TGSI_NUM_CHANNELS
];
1838 emit_fetch_predicate( bld
, inst
, pred
);
1840 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1841 emit_store_chan(bld_base
, inst
, 0, chan_index
, pred
[chan_index
], dst
[chan_index
]);
1847 tgsi_to_pipe_tex_target(unsigned tgsi_target
)
1849 switch (tgsi_target
) {
1850 case TGSI_TEXTURE_BUFFER
:
1852 case TGSI_TEXTURE_1D
:
1853 case TGSI_TEXTURE_SHADOW1D
:
1854 return PIPE_TEXTURE_1D
;
1855 case TGSI_TEXTURE_2D
:
1856 case TGSI_TEXTURE_SHADOW2D
:
1857 case TGSI_TEXTURE_2D_MSAA
:
1858 return PIPE_TEXTURE_2D
;
1859 case TGSI_TEXTURE_3D
:
1860 return PIPE_TEXTURE_3D
;
1861 case TGSI_TEXTURE_CUBE
:
1862 case TGSI_TEXTURE_SHADOWCUBE
:
1863 return PIPE_TEXTURE_CUBE
;
1864 case TGSI_TEXTURE_RECT
:
1865 case TGSI_TEXTURE_SHADOWRECT
:
1866 return PIPE_TEXTURE_RECT
;
1867 case TGSI_TEXTURE_1D_ARRAY
:
1868 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1869 return PIPE_TEXTURE_1D_ARRAY
;
1870 case TGSI_TEXTURE_2D_ARRAY
:
1871 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1872 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
1873 return PIPE_TEXTURE_2D_ARRAY
;
1874 case TGSI_TEXTURE_CUBE_ARRAY
:
1875 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1876 return PIPE_TEXTURE_CUBE_ARRAY
;
1884 static enum lp_sampler_lod_property
1885 lp_build_lod_property(
1886 struct lp_build_tgsi_context
*bld_base
,
1887 const struct tgsi_full_instruction
*inst
,
1890 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
1891 enum lp_sampler_lod_property lod_property
;
1894 * Not much we can do here. We could try catching inputs declared
1895 * with constant interpolation but not sure it's worth it - since for
1896 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
1897 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
1898 * like the constant/immediate recognition below.
1899 * What seems to be of more value would be to recognize temps holding
1900 * broadcasted scalars but no way we can do it.
1901 * Tried asking llvm but without any success (using LLVMIsConstant
1902 * even though this isn't exactly what we'd need), even as simple as
1903 * IMM[0] UINT32 (0,-1,0,0)
1904 * MOV TEMP[0] IMM[0].yyyy
1905 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
1907 * This means there's ZERO chance this will ever catch a scalar lod
1908 * with traditional tex opcodes as well as texel fetches, since the lod
1909 * comes from the same reg as coords (except some test shaders using
1910 * constant coords maybe).
1911 * There's at least hope for sample opcodes as well as size queries.
1913 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
||
1914 reg
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
1915 lod_property
= LP_SAMPLER_LOD_SCALAR
;
1917 else if (bld_base
->info
->processor
== TGSI_PROCESSOR_FRAGMENT
) {
1918 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
1919 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
1922 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
1926 /* never use scalar (per-quad) lod the results are just too wrong. */
1927 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
1929 return lod_property
;
1934 * High-level instruction translators.
1938 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
1939 const struct tgsi_full_instruction
*inst
,
1940 enum lp_build_tex_modifier modifier
,
1941 LLVMValueRef
*texel
,
1942 unsigned sampler_reg
)
1944 unsigned unit
= inst
->Src
[sampler_reg
].Register
.Index
;
1945 LLVMValueRef lod_bias
, explicit_lod
;
1946 LLVMValueRef oow
= NULL
;
1947 LLVMValueRef coords
[5];
1948 LLVMValueRef offsets
[3] = { NULL
};
1949 struct lp_derivatives derivs
;
1950 struct lp_derivatives
*deriv_ptr
= NULL
;
1951 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
1952 unsigned num_derivs
, num_offsets
, i
;
1953 unsigned shadow_coord
= 0;
1954 unsigned layer_coord
= 0;
1956 if (!bld
->sampler
) {
1957 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1958 for (i
= 0; i
< 4; i
++) {
1959 texel
[i
] = bld
->bld_base
.base
.undef
;
1964 switch (inst
->Texture
.Texture
) {
1965 case TGSI_TEXTURE_1D_ARRAY
:
1968 case TGSI_TEXTURE_1D
:
1972 case TGSI_TEXTURE_2D_ARRAY
:
1975 case TGSI_TEXTURE_2D
:
1976 case TGSI_TEXTURE_RECT
:
1980 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1983 case TGSI_TEXTURE_SHADOW1D
:
1988 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1994 case TGSI_TEXTURE_SHADOW2D
:
1995 case TGSI_TEXTURE_SHADOWRECT
:
2000 case TGSI_TEXTURE_CUBE
:
2004 case TGSI_TEXTURE_3D
:
2008 case TGSI_TEXTURE_SHADOWCUBE
:
2013 case TGSI_TEXTURE_CUBE_ARRAY
:
2018 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2022 shadow_coord
= 4; /* shadow coord special different reg */
2024 case TGSI_TEXTURE_2D_MSAA
:
2025 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2031 /* Note lod and especially projected are illegal in a LOT of cases */
2032 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2033 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2035 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
2036 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
) {
2037 /* note that shadow cube array with bias/explicit lod does not exist */
2038 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2041 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2043 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2045 explicit_lod
= NULL
;
2047 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2051 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2055 explicit_lod
= NULL
;
2058 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
2059 oow
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2060 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
2063 for (i
= 0; i
< num_derivs
; i
++) {
2064 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2065 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2066 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
2068 for (i
= num_derivs
; i
< 5; i
++) {
2069 coords
[i
] = bld
->bld_base
.base
.undef
;
2072 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2074 if (layer_coord
== 3) {
2075 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2078 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2080 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2081 coords
[2] = lp_build_mul(&bld
->bld_base
.base
, coords
[2], oow
);
2083 /* Shadow coord occupies always 5th slot. */
2085 if (shadow_coord
== 4) {
2086 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2089 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, shadow_coord
);
2091 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2092 coords
[4] = lp_build_mul(&bld
->bld_base
.base
, coords
[4], oow
);
2095 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2097 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2098 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, dim
);
2099 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 2, dim
);
2101 deriv_ptr
= &derivs
;
2103 * could also check all src regs if constant but I doubt such
2104 * cases exist in practice.
2106 if (bld
->bld_base
.info
->processor
== TGSI_PROCESSOR_FRAGMENT
) {
2107 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
2108 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2111 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2115 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2119 /* some advanced gather instructions (txgo) would require 4 offsets */
2120 if (inst
->Texture
.NumOffsets
== 1) {
2122 for (dim
= 0; dim
< num_offsets
; dim
++) {
2123 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2127 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
2128 bld
->bld_base
.base
.gallivm
,
2129 bld
->bld_base
.base
.type
,
2135 lod_bias
, explicit_lod
, lod_property
,
2140 emit_sample(struct lp_build_tgsi_soa_context
*bld
,
2141 const struct tgsi_full_instruction
*inst
,
2142 enum lp_build_tex_modifier modifier
,
2144 LLVMValueRef
*texel
)
2146 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2147 unsigned texture_unit
, sampler_unit
;
2148 LLVMValueRef lod_bias
, explicit_lod
;
2149 LLVMValueRef coords
[5];
2150 LLVMValueRef offsets
[3] = { NULL
};
2151 struct lp_derivatives derivs
;
2152 struct lp_derivatives
*deriv_ptr
= NULL
;
2153 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2155 unsigned num_offsets
, num_derivs
, i
;
2156 unsigned layer_coord
= 0;
2158 if (!bld
->sampler
) {
2159 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2160 for (i
= 0; i
< 4; i
++) {
2161 texel
[i
] = bld
->bld_base
.base
.undef
;
2167 * unlike old-style tex opcodes the texture/sampler indices
2168 * always come from src1 and src2 respectively.
2170 texture_unit
= inst
->Src
[1].Register
.Index
;
2171 sampler_unit
= inst
->Src
[2].Register
.Index
;
2174 * Note inst->Texture.Texture will contain the number of offsets,
2175 * however the target information is NOT there and comes from the
2176 * declared sampler views instead.
2178 switch (bld
->sv
[texture_unit
].Resource
) {
2179 case TGSI_TEXTURE_1D
:
2183 case TGSI_TEXTURE_1D_ARRAY
:
2188 case TGSI_TEXTURE_2D
:
2189 case TGSI_TEXTURE_RECT
:
2193 case TGSI_TEXTURE_2D_ARRAY
:
2198 case TGSI_TEXTURE_CUBE
:
2202 case TGSI_TEXTURE_3D
:
2206 case TGSI_TEXTURE_CUBE_ARRAY
:
2216 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2217 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2218 LLVMValueRef lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2219 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2221 explicit_lod
= NULL
;
2223 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2227 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2229 else if (modifier
== LP_BLD_TEX_MODIFIER_LOD_ZERO
) {
2231 /* XXX might be better to explicitly pass the level zero information */
2232 explicit_lod
= lp_build_const_vec(gallivm
, bld
->bld_base
.base
.type
, 0.0F
);
2236 explicit_lod
= NULL
;
2239 for (i
= 0; i
< num_derivs
; i
++) {
2240 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2242 for (i
= num_derivs
; i
< 5; i
++) {
2243 coords
[i
] = bld
->bld_base
.base
.undef
;
2246 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2248 if (layer_coord
== 3)
2249 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2251 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2253 /* Shadow coord occupies always 5th slot. */
2255 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2258 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2260 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2261 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, dim
);
2262 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 4, dim
);
2264 deriv_ptr
= &derivs
;
2266 * could also check all src regs if constant but I doubt such
2267 * cases exist in practice.
2269 if (bld
->bld_base
.info
->processor
== TGSI_PROCESSOR_FRAGMENT
) {
2270 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
2271 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2274 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2278 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2282 /* some advanced gather instructions (txgo) would require 4 offsets */
2283 if (inst
->Texture
.NumOffsets
== 1) {
2285 for (dim
= 0; dim
< num_offsets
; dim
++) {
2286 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2290 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
2291 bld
->bld_base
.base
.gallivm
,
2292 bld
->bld_base
.base
.type
,
2294 texture_unit
, sampler_unit
,
2298 lod_bias
, explicit_lod
, lod_property
,
2301 if (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_RED
||
2302 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_GREEN
||
2303 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_BLUE
||
2304 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_ALPHA
) {
2305 unsigned char swizzles
[4];
2306 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2307 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2308 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2309 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2311 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2316 emit_fetch_texels( struct lp_build_tgsi_soa_context
*bld
,
2317 const struct tgsi_full_instruction
*inst
,
2318 LLVMValueRef
*texel
,
2321 unsigned unit
, target
;
2322 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
2323 LLVMValueRef explicit_lod
= NULL
;
2324 LLVMValueRef coords
[3];
2325 LLVMValueRef offsets
[3] = { NULL
};
2326 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2328 unsigned layer_coord
= 0;
2330 if (!bld
->sampler
) {
2331 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2332 for (i
= 0; i
< 4; i
++) {
2333 texel
[i
] = coord_undef
;
2338 unit
= inst
->Src
[1].Register
.Index
;
2341 target
= bld
->sv
[unit
].Resource
;
2344 target
= inst
->Texture
.Texture
;
2348 case TGSI_TEXTURE_1D
:
2349 case TGSI_TEXTURE_BUFFER
:
2352 case TGSI_TEXTURE_1D_ARRAY
:
2356 case TGSI_TEXTURE_2D
:
2357 case TGSI_TEXTURE_RECT
:
2360 case TGSI_TEXTURE_2D_ARRAY
:
2364 case TGSI_TEXTURE_3D
:
2372 /* always have lod except for buffers ? */
2373 if (target
!= TGSI_TEXTURE_BUFFER
) {
2374 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2375 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2378 for (i
= 0; i
< dims
; i
++) {
2379 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2381 for (i
= dims
; i
< 3; i
++) {
2382 coords
[i
] = coord_undef
;
2385 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2387 if (inst
->Texture
.NumOffsets
== 1) {
2389 for (dim
= 0; dim
< dims
; dim
++) {
2390 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2394 bld
->sampler
->emit_fetch_texel(bld
->sampler
,
2395 bld
->bld_base
.base
.gallivm
,
2396 bld
->bld_base
.base
.type
,
2402 NULL
, explicit_lod
, lod_property
,
2406 (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_RED
||
2407 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_GREEN
||
2408 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_BLUE
||
2409 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_ALPHA
)) {
2410 unsigned char swizzles
[4];
2411 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2412 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2413 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2414 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2416 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2421 emit_size_query( struct lp_build_tgsi_soa_context
*bld
,
2422 const struct tgsi_full_instruction
*inst
,
2423 LLVMValueRef
*sizes_out
,
2424 boolean is_sviewinfo
)
2426 LLVMValueRef explicit_lod
;
2427 enum lp_sampler_lod_property lod_property
;
2430 unsigned unit
= inst
->Src
[1].Register
.Index
;
2431 unsigned target
, pipe_target
;
2434 target
= bld
->sv
[unit
].Resource
;
2437 target
= inst
->Texture
.Texture
;
2440 case TGSI_TEXTURE_BUFFER
:
2441 case TGSI_TEXTURE_RECT
:
2442 case TGSI_TEXTURE_SHADOWRECT
:
2450 if (!bld
->sampler
) {
2451 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2452 for (i
= 0; i
< 4; i
++)
2453 sizes_out
[i
] = bld
->bld_base
.int_bld
.undef
;
2458 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 0);
2459 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2462 explicit_lod
= NULL
;
2463 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2467 pipe_target
= tgsi_to_pipe_tex_target(target
);
2469 bld
->sampler
->emit_size_query(bld
->sampler
,
2470 bld
->bld_base
.base
.gallivm
,
2471 bld
->bld_base
.int_bld
.type
,
2480 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
2485 for (i
= 0; i
< 5; i
++) {
2488 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
2491 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
2493 if (opcode
== TGSI_OPCODE_END
)
2496 if (opcode
== TGSI_OPCODE_TEX
||
2497 opcode
== TGSI_OPCODE_TXP
||
2498 opcode
== TGSI_OPCODE_TXD
||
2499 opcode
== TGSI_OPCODE_TXB
||
2500 opcode
== TGSI_OPCODE_TXL
||
2501 opcode
== TGSI_OPCODE_TXF
||
2502 opcode
== TGSI_OPCODE_TXQ
||
2503 opcode
== TGSI_OPCODE_TEX2
||
2504 opcode
== TGSI_OPCODE_TXB2
||
2505 opcode
== TGSI_OPCODE_TXL2
||
2506 opcode
== TGSI_OPCODE_SAMPLE
||
2507 opcode
== TGSI_OPCODE_SAMPLE_B
||
2508 opcode
== TGSI_OPCODE_SAMPLE_C
||
2509 opcode
== TGSI_OPCODE_SAMPLE_C_LZ
||
2510 opcode
== TGSI_OPCODE_SAMPLE_D
||
2511 opcode
== TGSI_OPCODE_SAMPLE_I
||
2512 opcode
== TGSI_OPCODE_SAMPLE_L
||
2513 opcode
== TGSI_OPCODE_SVIEWINFO
||
2514 opcode
== TGSI_OPCODE_CAL
||
2515 opcode
== TGSI_OPCODE_CALLNZ
||
2516 opcode
== TGSI_OPCODE_IF
||
2517 opcode
== TGSI_OPCODE_UIF
||
2518 opcode
== TGSI_OPCODE_BGNLOOP
||
2519 opcode
== TGSI_OPCODE_SWITCH
)
2529 * Kill fragment if any of the src register values are negative.
2533 struct lp_build_tgsi_soa_context
*bld
,
2534 const struct tgsi_full_instruction
*inst
,
2537 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2538 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
2539 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
2541 unsigned chan_index
;
2543 memset(&terms
, 0, sizeof terms
);
2545 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2548 /* Unswizzle channel */
2549 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
2551 /* Check if the component has not been already tested. */
2552 assert(swizzle
< TGSI_NUM_CHANNELS
);
2553 if( !terms
[swizzle
] )
2554 /* TODO: change the comparison operator instead of setting the sign */
2555 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
2559 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2560 if(terms
[chan_index
]) {
2561 LLVMValueRef chan_mask
;
2564 * If term < 0 then mask = 0 else mask = ~0.
2566 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
2569 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
2575 if (bld
->exec_mask
.has_mask
) {
2576 LLVMValueRef invmask
;
2577 invmask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2578 mask
= LLVMBuildOr(builder
, mask
, invmask
, "");
2581 lp_build_mask_update(bld
->mask
, mask
);
2582 if (!near_end_of_shader(bld
, pc
))
2583 lp_build_mask_check(bld
->mask
);
2588 * Unconditional fragment kill.
2589 * The only predication is the execution mask which will apply if
2590 * we're inside a loop or conditional.
2593 emit_kill(struct lp_build_tgsi_soa_context
*bld
,
2596 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2599 /* For those channels which are "alive", disable fragment shader
2602 if (bld
->exec_mask
.has_mask
) {
2603 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2606 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
2610 lp_build_mask_update(bld
->mask
, mask
);
2612 if (!near_end_of_shader(bld
, pc
))
2613 lp_build_mask_check(bld
->mask
);
2618 * Emit code which will dump the value of all the temporary registers
2622 emit_dump_file(struct lp_build_tgsi_soa_context
*bld
,
2625 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
2626 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2627 LLVMBuilderRef builder
= gallivm
->builder
;
2628 LLVMValueRef reg_ptr
;
2630 int max_index
= info
->file_max
[file
];
2633 * Some register files, particularly constants, can be very large,
2634 * and dumping everything could make this unusably slow.
2636 max_index
= MIN2(max_index
, 32);
2638 for (index
= 0; index
<= max_index
; index
++) {
2643 if (index
< 8 * sizeof(unsigned) &&
2644 (info
->file_mask
[file
] & (1 << index
)) == 0) {
2645 /* This was not declared.*/
2649 if (file
== TGSI_FILE_INPUT
) {
2650 mask
= info
->input_usage_mask
[index
];
2652 mask
= TGSI_WRITEMASK_XYZW
;
2655 for (chan
= 0; chan
< 4; chan
++) {
2656 if ((mask
& (1 << chan
)) == 0) {
2657 /* This channel is not used.*/
2661 if (file
== TGSI_FILE_CONSTANT
) {
2662 struct tgsi_full_src_register reg
;
2663 memset(®
, 0, sizeof reg
);
2664 reg
.Register
.File
= file
;
2665 reg
.Register
.Index
= index
;
2666 reg
.Register
.SwizzleX
= 0;
2667 reg
.Register
.SwizzleY
= 1;
2668 reg
.Register
.SwizzleZ
= 2;
2669 reg
.Register
.SwizzleW
= 3;
2671 res
= bld
->bld_base
.emit_fetch_funcs
[file
](&bld
->bld_base
, ®
, TGSI_TYPE_FLOAT
, chan
);
2675 } else if (file
== TGSI_FILE_INPUT
) {
2676 res
= bld
->inputs
[index
][chan
];
2680 } else if (file
== TGSI_FILE_TEMPORARY
) {
2681 reg_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
2683 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2684 } else if (file
== TGSI_FILE_OUTPUT
) {
2685 reg_ptr
= lp_get_output_ptr(bld
, index
, chan
);
2687 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2693 emit_dump_reg(gallivm
, file
, index
, chan
, res
);
2701 lp_emit_declaration_soa(
2702 struct lp_build_tgsi_context
*bld_base
,
2703 const struct tgsi_full_declaration
*decl
)
2705 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2706 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2707 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
2708 const unsigned first
= decl
->Range
.First
;
2709 const unsigned last
= decl
->Range
.Last
;
2712 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
2714 switch (decl
->Declaration
.File
) {
2715 case TGSI_FILE_TEMPORARY
:
2716 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
2717 assert(last
< LP_MAX_INLINED_TEMPS
);
2718 for (idx
= first
; idx
<= last
; ++idx
) {
2719 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2720 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
2725 case TGSI_FILE_OUTPUT
:
2726 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
2727 for (idx
= first
; idx
<= last
; ++idx
) {
2728 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2729 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
2730 vec_type
, "output");
2735 case TGSI_FILE_ADDRESS
:
2736 /* ADDR registers are only allocated with an integer LLVM IR type,
2737 * as they are guaranteed to always have integers.
2738 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2739 * an ADDR register for that matter).
2741 assert(last
< LP_MAX_TGSI_ADDRS
);
2742 for (idx
= first
; idx
<= last
; ++idx
) {
2743 assert(idx
< LP_MAX_TGSI_ADDRS
);
2744 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2745 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
2749 case TGSI_FILE_PREDICATE
:
2750 assert(last
< LP_MAX_TGSI_PREDS
);
2751 for (idx
= first
; idx
<= last
; ++idx
) {
2752 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2753 bld
->preds
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
,
2758 case TGSI_FILE_SAMPLER_VIEW
:
2760 * The target stored here MUST match whatever there actually
2761 * is in the set sampler views (what about return type?).
2763 assert(last
< PIPE_MAX_SHADER_SAMPLER_VIEWS
);
2764 for (idx
= first
; idx
<= last
; ++idx
) {
2765 bld
->sv
[idx
] = decl
->SamplerView
;
2769 case TGSI_FILE_CONSTANT
:
2772 * We could trivially fetch the per-buffer pointer when fetching the
2773 * constant, relying on llvm to figure out it's always the same pointer
2774 * anyway. However, doing so results in a huge (more than factor of 10)
2775 * slowdown in llvm compilation times for some (but not all) shaders
2776 * (more specifically, the IR optimization spends way more time in
2777 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
2779 unsigned idx2D
= decl
->Dim
.Index2D
;
2780 LLVMValueRef index2D
= lp_build_const_int32(gallivm
, idx2D
);
2781 assert(idx2D
< LP_MAX_TGSI_CONST_BUFFERS
);
2782 bld
->consts
[idx2D
] =
2783 lp_build_array_get(gallivm
, bld
->consts_ptr
, index2D
);
2784 bld
->consts_sizes
[idx2D
] =
2785 lp_build_array_get(gallivm
, bld
->const_sizes_ptr
, index2D
);
2790 /* don't need to declare other vars */
2796 void lp_emit_immediate_soa(
2797 struct lp_build_tgsi_context
*bld_base
,
2798 const struct tgsi_full_immediate
*imm
)
2800 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2801 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
2802 LLVMValueRef imms
[4];
2804 const uint size
= imm
->Immediate
.NrTokens
- 1;
2806 switch (imm
->Immediate
.DataType
) {
2807 case TGSI_IMM_FLOAT32
:
2808 for( i
= 0; i
< size
; ++i
)
2810 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
2813 case TGSI_IMM_UINT32
:
2814 for( i
= 0; i
< size
; ++i
) {
2815 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
2816 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2820 case TGSI_IMM_INT32
:
2821 for( i
= 0; i
< size
; ++i
) {
2822 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
2823 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2828 for( i
= size
; i
< 4; ++i
)
2829 imms
[i
] = bld_base
->base
.undef
;
2831 if (bld
->use_immediates_array
) {
2832 unsigned index
= bld
->num_immediates
;
2833 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2834 LLVMBuilderRef builder
= gallivm
->builder
;
2836 assert(bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
));
2837 for (i
= 0; i
< 4; ++i
) {
2838 LLVMValueRef lindex
= lp_build_const_int32(
2839 bld
->bld_base
.base
.gallivm
, index
* 4 + i
);
2840 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
2841 bld
->imms_array
, &lindex
, 1, "");
2842 LLVMBuildStore(builder
, imms
[i
], imm_ptr
);
2845 /* simply copy the immediate values into the next immediates[] slot */
2847 const uint size
= imm
->Immediate
.NrTokens
- 1;
2849 assert(bld
->num_immediates
< LP_MAX_INLINED_IMMEDIATES
);
2851 for(i
= 0; i
< 4; ++i
)
2852 bld
->immediates
[bld
->num_immediates
][i
] = imms
[i
];
2854 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
2855 unsigned index
= bld
->num_immediates
;
2856 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2857 LLVMBuilderRef builder
= gallivm
->builder
;
2858 for (i
= 0; i
< 4; ++i
) {
2859 LLVMValueRef lindex
= lp_build_const_int32(
2860 bld
->bld_base
.base
.gallivm
, index
* 4 + i
);
2861 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
2862 bld
->imms_array
, &lindex
, 1, "");
2863 LLVMBuildStore(builder
,
2864 bld
->immediates
[index
][i
],
2870 bld
->num_immediates
++;
2875 const struct lp_build_tgsi_action
* action
,
2876 struct lp_build_tgsi_context
* bld_base
,
2877 struct lp_build_emit_data
* emit_data
)
2879 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2881 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
2882 &emit_data
->output
[emit_data
->chan
], NULL
);
2887 const struct lp_build_tgsi_action
* action
,
2888 struct lp_build_tgsi_context
* bld_base
,
2889 struct lp_build_emit_data
* emit_data
)
2891 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2893 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
2894 &emit_data
->output
[emit_data
->chan
]);
2899 const struct lp_build_tgsi_action
* action
,
2900 struct lp_build_tgsi_context
* bld_base
,
2901 struct lp_build_emit_data
* emit_data
)
2903 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2905 emit_kill(bld
, bld_base
->pc
- 1);
2910 const struct lp_build_tgsi_action
* action
,
2911 struct lp_build_tgsi_context
* bld_base
,
2912 struct lp_build_emit_data
* emit_data
)
2914 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2916 emit_kill_if(bld
, emit_data
->inst
, bld_base
->pc
- 1);
2921 const struct lp_build_tgsi_action
* action
,
2922 struct lp_build_tgsi_context
* bld_base
,
2923 struct lp_build_emit_data
* emit_data
)
2925 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2927 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
2928 emit_data
->output
, 1);
2933 const struct lp_build_tgsi_action
* action
,
2934 struct lp_build_tgsi_context
* bld_base
,
2935 struct lp_build_emit_data
* emit_data
)
2937 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2939 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
2940 emit_data
->output
, 2);
2945 const struct lp_build_tgsi_action
* action
,
2946 struct lp_build_tgsi_context
* bld_base
,
2947 struct lp_build_emit_data
* emit_data
)
2949 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2951 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
2952 emit_data
->output
, 1);
2957 const struct lp_build_tgsi_action
* action
,
2958 struct lp_build_tgsi_context
* bld_base
,
2959 struct lp_build_emit_data
* emit_data
)
2961 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2963 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
2964 emit_data
->output
, 2);
2969 const struct lp_build_tgsi_action
* action
,
2970 struct lp_build_tgsi_context
* bld_base
,
2971 struct lp_build_emit_data
* emit_data
)
2973 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2975 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
2976 emit_data
->output
, 3);
2981 const struct lp_build_tgsi_action
* action
,
2982 struct lp_build_tgsi_context
* bld_base
,
2983 struct lp_build_emit_data
* emit_data
)
2985 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2987 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
2988 emit_data
->output
, 1);
2993 const struct lp_build_tgsi_action
* action
,
2994 struct lp_build_tgsi_context
* bld_base
,
2995 struct lp_build_emit_data
* emit_data
)
2997 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
2999 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3000 emit_data
->output
, 2);
3005 const struct lp_build_tgsi_action
* action
,
3006 struct lp_build_tgsi_context
* bld_base
,
3007 struct lp_build_emit_data
* emit_data
)
3009 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3011 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
3012 emit_data
->output
, 1);
3017 const struct lp_build_tgsi_action
* action
,
3018 struct lp_build_tgsi_context
* bld_base
,
3019 struct lp_build_emit_data
* emit_data
)
3021 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3023 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3028 const struct lp_build_tgsi_action
* action
,
3029 struct lp_build_tgsi_context
* bld_base
,
3030 struct lp_build_emit_data
* emit_data
)
3032 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3034 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3039 const struct lp_build_tgsi_action
* action
,
3040 struct lp_build_tgsi_context
* bld_base
,
3041 struct lp_build_emit_data
* emit_data
)
3043 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3045 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3050 const struct lp_build_tgsi_action
* action
,
3051 struct lp_build_tgsi_context
* bld_base
,
3052 struct lp_build_emit_data
* emit_data
)
3054 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3056 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3057 FALSE
, emit_data
->output
);
3062 const struct lp_build_tgsi_action
* action
,
3063 struct lp_build_tgsi_context
* bld_base
,
3064 struct lp_build_emit_data
* emit_data
)
3066 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3068 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3069 FALSE
, emit_data
->output
);
3074 const struct lp_build_tgsi_action
* action
,
3075 struct lp_build_tgsi_context
* bld_base
,
3076 struct lp_build_emit_data
* emit_data
)
3078 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3080 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3081 TRUE
, emit_data
->output
);
3086 const struct lp_build_tgsi_action
* action
,
3087 struct lp_build_tgsi_context
* bld_base
,
3088 struct lp_build_emit_data
* emit_data
)
3090 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3092 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_ZERO
,
3093 TRUE
, emit_data
->output
);
3098 const struct lp_build_tgsi_action
* action
,
3099 struct lp_build_tgsi_context
* bld_base
,
3100 struct lp_build_emit_data
* emit_data
)
3102 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3104 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3105 FALSE
, emit_data
->output
);
3110 const struct lp_build_tgsi_action
* action
,
3111 struct lp_build_tgsi_context
* bld_base
,
3112 struct lp_build_emit_data
* emit_data
)
3114 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3116 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3117 FALSE
, emit_data
->output
);
3122 const struct lp_build_tgsi_action
* action
,
3123 struct lp_build_tgsi_context
* bld_base
,
3124 struct lp_build_emit_data
* emit_data
)
3126 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3128 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3132 mask_vec(struct lp_build_tgsi_context
*bld_base
)
3134 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3135 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3136 struct lp_exec_mask
*exec_mask
= &bld
->exec_mask
;
3138 if (!exec_mask
->has_mask
) {
3139 return lp_build_mask_value(bld
->mask
);
3141 return LLVMBuildAnd(builder
, lp_build_mask_value(bld
->mask
),
3142 exec_mask
->exec_mask
, "");
3146 increment_vec_ptr_by_mask(struct lp_build_tgsi_context
* bld_base
,
3150 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3151 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3153 current_vec
= LLVMBuildSub(builder
, current_vec
, mask
, "");
3155 LLVMBuildStore(builder
, current_vec
, ptr
);
3159 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context
* bld_base
,
3163 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3164 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3166 current_vec
= lp_build_select(&bld_base
->uint_bld
,
3168 bld_base
->uint_bld
.zero
,
3171 LLVMBuildStore(builder
, current_vec
, ptr
);
3175 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context
* bld
,
3176 LLVMValueRef current_mask_vec
,
3177 LLVMValueRef total_emitted_vertices_vec
)
3179 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3180 struct lp_build_context
*int_bld
= &bld
->bld_base
.int_bld
;
3181 LLVMValueRef max_mask
= lp_build_cmp(int_bld
, PIPE_FUNC_LESS
,
3182 total_emitted_vertices_vec
,
3183 bld
->max_output_vertices_vec
);
3185 return LLVMBuildAnd(builder
, current_mask_vec
, max_mask
, "");
3190 const struct lp_build_tgsi_action
* action
,
3191 struct lp_build_tgsi_context
* bld_base
,
3192 struct lp_build_emit_data
* emit_data
)
3194 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3195 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3197 if (bld
->gs_iface
->emit_vertex
) {
3198 LLVMValueRef mask
= mask_vec(bld_base
);
3199 LLVMValueRef total_emitted_vertices_vec
=
3200 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3201 mask
= clamp_mask_to_max_output_vertices(bld
, mask
,
3202 total_emitted_vertices_vec
);
3203 gather_outputs(bld
);
3204 bld
->gs_iface
->emit_vertex(bld
->gs_iface
, &bld
->bld_base
,
3206 total_emitted_vertices_vec
);
3207 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3209 increment_vec_ptr_by_mask(bld_base
, bld
->total_emitted_vertices_vec_ptr
,
3212 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3213 " +++ emit vertex masked ones = ",
3215 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3216 " +++ emit vertex emitted = ",
3217 total_emitted_vertices_vec
);
3224 end_primitive_masked(struct lp_build_tgsi_context
* bld_base
,
3227 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3228 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3230 if (bld
->gs_iface
->end_primitive
) {
3231 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3232 LLVMValueRef emitted_vertices_vec
=
3233 LLVMBuildLoad(builder
, bld
->emitted_vertices_vec_ptr
, "");
3234 LLVMValueRef emitted_prims_vec
=
3235 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3237 LLVMValueRef emitted_mask
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3238 emitted_vertices_vec
,
3240 /* We need to combine the current execution mask with the mask
3241 telling us which, if any, execution slots actually have
3242 unemitted primitives, this way we make sure that end_primitives
3243 executes only on the paths that have unflushed vertices */
3244 mask
= LLVMBuildAnd(builder
, mask
, emitted_mask
, "");
3246 bld
->gs_iface
->end_primitive(bld
->gs_iface
, &bld
->bld_base
,
3247 emitted_vertices_vec
,
3251 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3252 " +++ end prim masked ones = ",
3254 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3255 " +++ end prim emitted verts1 = ",
3256 emitted_vertices_vec
);
3257 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3258 " +++ end prim emitted prims1 = ",
3259 LLVMBuildLoad(builder
,
3260 bld
->emitted_prims_vec_ptr
, ""));
3262 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_prims_vec_ptr
,
3264 clear_uint_vec_ptr_from_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3267 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3268 " +++ end prim emitted verts2 = ",
3269 LLVMBuildLoad(builder
,
3270 bld
->emitted_vertices_vec_ptr
, ""));
3278 const struct lp_build_tgsi_action
* action
,
3279 struct lp_build_tgsi_context
* bld_base
,
3280 struct lp_build_emit_data
* emit_data
)
3282 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3284 if (bld
->gs_iface
->end_primitive
) {
3285 LLVMValueRef mask
= mask_vec(bld_base
);
3286 end_primitive_masked(bld_base
, mask
);
3292 const struct lp_build_tgsi_action
* action
,
3293 struct lp_build_tgsi_context
* bld_base
,
3294 struct lp_build_emit_data
* emit_data
)
3296 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3298 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
3304 const struct lp_build_tgsi_action
* action
,
3305 struct lp_build_tgsi_context
* bld_base
,
3306 struct lp_build_emit_data
* emit_data
)
3308 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3310 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
3315 const struct lp_build_tgsi_action
* action
,
3316 struct lp_build_tgsi_context
* bld_base
,
3317 struct lp_build_emit_data
* emit_data
)
3319 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3321 lp_exec_break(&bld
->exec_mask
, bld_base
);
3326 const struct lp_build_tgsi_action
* action
,
3327 struct lp_build_tgsi_context
* bld_base
,
3328 struct lp_build_emit_data
* emit_data
)
3330 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3331 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3332 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3333 LLVMValueRef unsigned_cond
=
3334 LLVMBuildBitCast(builder
, emit_data
->args
[0], uint_bld
->vec_type
, "");
3335 LLVMValueRef cond
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3339 lp_exec_break_condition(&bld
->exec_mask
, cond
);
3344 const struct lp_build_tgsi_action
* action
,
3345 struct lp_build_tgsi_context
* bld_base
,
3346 struct lp_build_emit_data
* emit_data
)
3349 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3351 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
3352 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
3353 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3358 const struct lp_build_tgsi_action
* action
,
3359 struct lp_build_tgsi_context
* bld_base
,
3360 struct lp_build_emit_data
* emit_data
)
3363 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3364 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3366 tmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3367 emit_data
->args
[0], uint_bld
->zero
);
3368 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3373 const struct lp_build_tgsi_action
* action
,
3374 struct lp_build_tgsi_context
* bld_base
,
3375 struct lp_build_emit_data
* emit_data
)
3377 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3379 lp_exec_case(&bld
->exec_mask
, emit_data
->args
[0]);
3384 const struct lp_build_tgsi_action
* action
,
3385 struct lp_build_tgsi_context
* bld_base
,
3386 struct lp_build_emit_data
* emit_data
)
3388 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3390 lp_exec_default(&bld
->exec_mask
, bld_base
);
3395 const struct lp_build_tgsi_action
* action
,
3396 struct lp_build_tgsi_context
* bld_base
,
3397 struct lp_build_emit_data
* emit_data
)
3399 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3401 lp_exec_switch(&bld
->exec_mask
, emit_data
->args
[0]);
3406 const struct lp_build_tgsi_action
* action
,
3407 struct lp_build_tgsi_context
* bld_base
,
3408 struct lp_build_emit_data
* emit_data
)
3410 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3412 lp_exec_endswitch(&bld
->exec_mask
, bld_base
);
3417 const struct lp_build_tgsi_action
* action
,
3418 struct lp_build_tgsi_context
* bld_base
,
3419 struct lp_build_emit_data
* emit_data
)
3421 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3423 lp_exec_bgnloop(&bld
->exec_mask
);
3428 const struct lp_build_tgsi_action
* action
,
3429 struct lp_build_tgsi_context
* bld_base
,
3430 struct lp_build_emit_data
* emit_data
)
3432 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3434 lp_exec_mask_bgnsub(&bld
->exec_mask
);
3439 const struct lp_build_tgsi_action
* action
,
3440 struct lp_build_tgsi_context
* bld_base
,
3441 struct lp_build_emit_data
* emit_data
)
3443 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3445 lp_exec_mask_cond_invert(&bld
->exec_mask
);
3450 const struct lp_build_tgsi_action
* action
,
3451 struct lp_build_tgsi_context
* bld_base
,
3452 struct lp_build_emit_data
* emit_data
)
3454 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3456 lp_exec_mask_cond_pop(&bld
->exec_mask
);
3461 const struct lp_build_tgsi_action
* action
,
3462 struct lp_build_tgsi_context
* bld_base
,
3463 struct lp_build_emit_data
* emit_data
)
3465 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3467 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
3472 const struct lp_build_tgsi_action
* action
,
3473 struct lp_build_tgsi_context
* bld_base
,
3474 struct lp_build_emit_data
* emit_data
)
3476 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3478 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
3483 const struct lp_build_tgsi_action
* action
,
3484 struct lp_build_tgsi_context
* bld_base
,
3485 struct lp_build_emit_data
* emit_data
)
3487 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3489 lp_exec_continue(&bld
->exec_mask
);
3492 /* XXX: Refactor and move it to lp_bld_tgsi_action.c
3494 * XXX: What do the comments about xmm registers mean? Maybe they are left over
3495 * from old code, but there is no garauntee that LLVM will use those registers
3498 * XXX: There should be no calls to lp_build_emit_fetch in this function. This
3499 * should be handled by the emit_data->fetch_args function. */
3502 const struct lp_build_tgsi_action
* action
,
3503 struct lp_build_tgsi_context
* bld_base
,
3504 struct lp_build_emit_data
* emit_data
)
3506 LLVMValueRef tmp0
, tmp1
;
3507 LLVMValueRef tmp4
= NULL
;
3508 LLVMValueRef tmp5
= NULL
;
3509 LLVMValueRef tmp6
= NULL
;
3510 LLVMValueRef tmp7
= NULL
;
3511 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3513 uint dims
= (emit_data
->inst
->Instruction
.Opcode
== TGSI_OPCODE_NRM
) ? 3 : 4;
3515 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
) ||
3516 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
) ||
3517 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
) ||
3518 (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
) && dims
== 4)) {
3520 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
3523 /* xmm0 = src.x * src.x */
3524 tmp0
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_X
);
3525 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
)) {
3528 tmp0
= lp_build_mul( &bld
->bld_base
.base
, tmp0
, tmp0
);
3531 /* xmm0 = xmm0 + src.y * src.y */
3532 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_Y
);
3533 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
)) {
3536 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
3537 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
3540 /* xmm0 = xmm0 + src.z * src.z */
3541 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_Z
);
3542 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
)) {
3545 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
3546 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
3550 /* xmm0 = xmm0 + src.w * src.w */
3551 tmp1
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, TGSI_CHAN_W
);
3552 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
)) {
3555 tmp1
= lp_build_mul( &bld
->bld_base
.base
, tmp1
, tmp1
);
3556 tmp0
= lp_build_add( &bld
->bld_base
.base
, tmp0
, tmp1
);
3558 /* xmm1 = 1 / sqrt(xmm0) */
3559 tmp1
= lp_build_rsqrt( &bld
->bld_base
.base
, tmp0
);
3560 /* dst.x = xmm1 * src.x */
3561 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
)) {
3562 emit_data
->output
[TGSI_CHAN_X
] = lp_build_mul( &bld
->bld_base
.base
, tmp4
, tmp1
);
3564 /* dst.y = xmm1 * src.y */
3565 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Y
)) {
3566 emit_data
->output
[TGSI_CHAN_Y
] = lp_build_mul( &bld
->bld_base
.base
, tmp5
, tmp1
);
3569 /* dst.z = xmm1 * src.z */
3570 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_Z
)) {
3571 emit_data
->output
[TGSI_CHAN_Z
] = lp_build_mul( &bld
->bld_base
.base
, tmp6
, tmp1
);
3573 /* dst.w = xmm1 * src.w */
3574 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_X
) && dims
== 4) {
3575 emit_data
->output
[TGSI_CHAN_W
] = lp_build_mul( &bld
->bld_base
.base
, tmp7
, tmp1
);
3580 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data
->inst
, TGSI_CHAN_W
) && dims
== 3) {
3581 emit_data
->output
[TGSI_CHAN_W
] = bld
->bld_base
.base
.one
;
3585 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
3587 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3588 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3590 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
3591 LLVMValueRef array_size
=
3592 lp_build_const_int32(gallivm
,
3593 bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4);
3594 bld
->temps_array
= lp_build_array_alloca(gallivm
,
3595 bld_base
->base
.vec_type
, array_size
,
3599 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
3600 LLVMValueRef array_size
=
3601 lp_build_const_int32(gallivm
,
3602 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
3603 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
3604 bld_base
->base
.vec_type
, array_size
,
3608 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3609 LLVMValueRef array_size
=
3610 lp_build_const_int32(gallivm
,
3611 bld_base
->info
->file_max
[TGSI_FILE_IMMEDIATE
] * 4 + 4);
3612 bld
->imms_array
= lp_build_array_alloca(gallivm
,
3613 bld_base
->base
.vec_type
, array_size
,
3617 /* If we have indirect addressing in inputs we need to copy them into
3618 * our alloca array to be able to iterate over them */
3619 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
) && !bld
->gs_iface
) {
3620 unsigned index
, chan
;
3621 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
3622 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
3623 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
3624 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
3625 vec_type
, array_size
,
3628 assert(bld_base
->info
->num_inputs
3629 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
3631 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
3632 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
3633 LLVMValueRef lindex
=
3634 lp_build_const_int32(gallivm
, index
* 4 + chan
);
3635 LLVMValueRef input_ptr
=
3636 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
3638 LLVMValueRef value
= bld
->inputs
[index
][chan
];
3640 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
3645 if (bld
->gs_iface
) {
3646 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
3647 bld
->emitted_prims_vec_ptr
=
3648 lp_build_alloca(gallivm
,
3650 "emitted_prims_ptr");
3651 bld
->emitted_vertices_vec_ptr
=
3652 lp_build_alloca(gallivm
,
3654 "emitted_vertices_ptr");
3655 bld
->total_emitted_vertices_vec_ptr
=
3656 lp_build_alloca(gallivm
,
3658 "total_emitted_vertices_ptr");
3660 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3661 bld
->emitted_prims_vec_ptr
);
3662 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3663 bld
->emitted_vertices_vec_ptr
);
3664 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3665 bld
->total_emitted_vertices_vec_ptr
);
3668 if (DEBUG_EXECUTION
) {
3669 lp_build_printf(gallivm
, "\n");
3670 emit_dump_file(bld
, TGSI_FILE_CONSTANT
);
3672 emit_dump_file(bld
, TGSI_FILE_INPUT
);
3676 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
3678 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3679 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3681 if (DEBUG_EXECUTION
) {
3684 emit_dump_file(bld
, TGSI_FILE_TEMPORARY
);
3686 emit_dump_file(bld
, TGSI_FILE_OUTPUT
);
3687 lp_build_printf(bld_base
->base
.gallivm
, "\n");
3690 /* If we have indirect addressing in outputs we need to copy our alloca array
3691 * to the outputs slots specified by the caller */
3692 if (bld
->gs_iface
) {
3693 LLVMValueRef total_emitted_vertices_vec
;
3694 LLVMValueRef emitted_prims_vec
;
3695 /* implicit end_primitives, needed in case there are any unflushed
3696 vertices in the cache. Note must not call end_primitive here
3697 since the exec_mask is not valid at this point. */
3698 end_primitive_masked(bld_base
, lp_build_mask_value(bld
->mask
));
3700 total_emitted_vertices_vec
=
3701 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3703 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3705 bld
->gs_iface
->gs_epilogue(bld
->gs_iface
,
3707 total_emitted_vertices_vec
,
3710 gather_outputs(bld
);
3715 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
3716 const struct tgsi_token
*tokens
,
3717 struct lp_type type
,
3718 struct lp_build_mask_context
*mask
,
3719 LLVMValueRef consts_ptr
,
3720 LLVMValueRef const_sizes_ptr
,
3721 const struct lp_bld_tgsi_system_values
*system_values
,
3722 const LLVMValueRef (*inputs
)[TGSI_NUM_CHANNELS
],
3723 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
],
3724 struct lp_build_sampler_soa
*sampler
,
3725 const struct tgsi_shader_info
*info
,
3726 const struct lp_build_tgsi_gs_iface
*gs_iface
)
3728 struct lp_build_tgsi_soa_context bld
;
3730 struct lp_type res_type
;
3732 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
3733 memset(&res_type
, 0, sizeof res_type
);
3734 res_type
.width
= type
.width
;
3735 res_type
.length
= type
.length
;
3738 /* Setup build context */
3739 memset(&bld
, 0, sizeof bld
);
3740 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
3741 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
3742 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
3743 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
3745 bld
.inputs
= inputs
;
3746 bld
.outputs
= outputs
;
3747 bld
.consts_ptr
= consts_ptr
;
3748 bld
.const_sizes_ptr
= const_sizes_ptr
;
3749 bld
.sampler
= sampler
;
3750 bld
.bld_base
.info
= info
;
3751 bld
.indirect_files
= info
->indirect_files
;
3754 * If the number of temporaries is rather large then we just
3755 * allocate them as an array right from the start and treat
3756 * like indirect temporaries.
3758 if (info
->file_max
[TGSI_FILE_TEMPORARY
] >= LP_MAX_INLINED_TEMPS
) {
3759 bld
.indirect_files
|= (1 << TGSI_FILE_TEMPORARY
);
3762 * For performance reason immediates are always backed in a static
3763 * array, but if their number is too great, we have to use just
3764 * a dynamically allocated array.
3766 bld
.use_immediates_array
=
3767 (info
->file_max
[TGSI_FILE_IMMEDIATE
] >= LP_MAX_INLINED_IMMEDIATES
);
3768 if (bld
.use_immediates_array
) {
3769 bld
.indirect_files
|= (1 << TGSI_FILE_IMMEDIATE
);
3773 bld
.bld_base
.soa
= TRUE
;
3774 bld
.bld_base
.emit_debug
= emit_debug
;
3775 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
3776 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
3777 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
3778 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
3779 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
3780 bld
.bld_base
.emit_store
= emit_store
;
3782 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
3783 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
3785 bld
.bld_base
.emit_prologue
= emit_prologue
;
3786 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
3788 /* Set opcode actions */
3789 lp_set_default_actions_cpu(&bld
.bld_base
);
3791 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
3792 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
3793 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
3794 bld
.bld_base
.op_actions
[TGSI_OPCODE_BREAKC
].emit
= breakc_emit
;
3795 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
3796 bld
.bld_base
.op_actions
[TGSI_OPCODE_CASE
].emit
= case_emit
;
3797 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
3798 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
3799 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
3800 bld
.bld_base
.op_actions
[TGSI_OPCODE_DEFAULT
].emit
= default_emit
;
3801 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
3802 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
3803 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
3804 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
3805 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSWITCH
].emit
= endswitch_emit
;
3806 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
3807 bld
.bld_base
.op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
3808 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL_IF
].emit
= kill_if_emit
;
3809 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL
].emit
= kill_emit
;
3810 bld
.bld_base
.op_actions
[TGSI_OPCODE_NRM
].emit
= nrm_emit
;
3811 bld
.bld_base
.op_actions
[TGSI_OPCODE_NRM4
].emit
= nrm_emit
;
3812 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
3813 bld
.bld_base
.op_actions
[TGSI_OPCODE_SWITCH
].emit
= switch_emit
;
3814 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
3815 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
3816 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
3817 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
3818 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
3819 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
3820 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
3821 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX2
].emit
= tex2_emit
;
3822 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB2
].emit
= txb2_emit
;
3823 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL2
].emit
= txl2_emit
;
3824 /* DX10 sampling ops */
3825 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE
].emit
= sample_emit
;
3826 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_B
].emit
= sample_b_emit
;
3827 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C
].emit
= sample_c_emit
;
3828 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C_LZ
].emit
= sample_c_lz_emit
;
3829 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_D
].emit
= sample_d_emit
;
3830 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I
].emit
= sample_i_emit
;
3831 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_L
].emit
= sample_l_emit
;
3832 bld
.bld_base
.op_actions
[TGSI_OPCODE_SVIEWINFO
].emit
= sviewinfo_emit
;
3835 /* There's no specific value for this because it should always
3836 * be set, but apps using ext_geometry_shader4 quite often
3837 * were forgetting so we're using MAX_VERTEX_VARYING from
3838 * that spec even though we could debug_assert if it's not
3839 * set, but that's a lot uglier. */
3840 uint max_output_vertices
= 32;
3842 /* inputs are always indirect with gs */
3843 bld
.indirect_files
|= (1 << TGSI_FILE_INPUT
);
3844 bld
.gs_iface
= gs_iface
;
3845 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_gs_input
;
3846 bld
.bld_base
.op_actions
[TGSI_OPCODE_EMIT
].emit
= emit_vertex
;
3847 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDPRIM
].emit
= end_primitive
;
3849 for (i
= 0; i
< info
->num_properties
; ++i
) {
3850 if (info
->properties
[i
].name
==
3851 TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
) {
3852 max_output_vertices
= info
->properties
[i
].data
[0];
3855 bld
.max_output_vertices_vec
=
3856 lp_build_const_int_vec(gallivm
, bld
.bld_base
.int_bld
.type
,
3857 max_output_vertices
);
3860 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.int_bld
);
3862 bld
.system_values
= *system_values
;
3864 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
3867 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
3868 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
3869 debug_printf("11111111111111111111111111111 \n");
3870 tgsi_dump(tokens
, 0);
3871 lp_debug_dump_value(function
);
3872 debug_printf("2222222222222222222222222222 \n");
3876 LLVMModuleRef module
= LLVMGetGlobalParent(
3877 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
3878 LLVMDumpModule(module
);
3881 lp_exec_mask_fini(&bld
.exec_mask
);