1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "tgsi/tgsi_strings.h"
51 #include "lp_bld_tgsi_action.h"
52 #include "lp_bld_type.h"
53 #include "lp_bld_const.h"
54 #include "lp_bld_arit.h"
55 #include "lp_bld_bitarit.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_init.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_swizzle.h"
60 #include "lp_bld_flow.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_tgsi.h"
63 #include "lp_bld_limits.h"
64 #include "lp_bld_debug.h"
65 #include "lp_bld_printf.h"
66 #include "lp_bld_sample.h"
67 #include "lp_bld_struct.h"
69 /* SM 4.0 says that subroutines can nest 32 deep and
70 * we need one more for our main function */
71 #define LP_MAX_NUM_FUNCS 33
73 #define DUMP_GS_EMITS 0
76 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
80 * - take execution masks in consideration
81 * - debug control-flow instructions
83 #define DEBUG_EXECUTION 0
87 * Emit code to print a register value.
90 emit_dump_reg(struct gallivm_state
*gallivm
,
98 util_snprintf(buf
, sizeof buf
, " %s[%u].%c = ",
100 index
, "xyzw"[chan
]);
102 lp_build_print_value(gallivm
, buf
, value
);
106 * Return the context for the current function.
107 * (always 'main', if shader doesn't do any function calls)
109 static inline struct function_ctx
*
110 func_ctx(struct lp_exec_mask
*mask
)
112 assert(mask
->function_stack_size
> 0);
113 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
114 return &mask
->function_stack
[mask
->function_stack_size
- 1];
118 * Returns true if we're in a loop.
119 * It's global, meaning that it returns true even if there's
120 * no loop inside the current function, but we were inside
121 * a loop inside another function, from which this one was called.
123 static inline boolean
124 mask_has_loop(struct lp_exec_mask
*mask
)
127 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
128 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
129 if (ctx
->loop_stack_size
> 0)
136 * Returns true if we're inside a switch statement.
137 * It's global, meaning that it returns true even if there's
138 * no switch in the current function, but we were inside
139 * a switch inside another function, from which this one was called.
141 static inline boolean
142 mask_has_switch(struct lp_exec_mask
*mask
)
145 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
146 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
147 if (ctx
->switch_stack_size
> 0)
154 * Returns true if we're inside a conditional.
155 * It's global, meaning that it returns true even if there's
156 * no conditional in the current function, but we were inside
157 * a conditional inside another function, from which this one was called.
159 static inline boolean
160 mask_has_cond(struct lp_exec_mask
*mask
)
163 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
164 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
165 if (ctx
->cond_stack_size
> 0)
173 * Initialize a function context at the specified index.
176 lp_exec_mask_function_init(struct lp_exec_mask
*mask
, int function_idx
)
178 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
179 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
180 struct function_ctx
*ctx
= &mask
->function_stack
[function_idx
];
182 ctx
->cond_stack_size
= 0;
183 ctx
->loop_stack_size
= 0;
184 ctx
->switch_stack_size
= 0;
186 if (function_idx
== 0) {
187 ctx
->ret_mask
= mask
->ret_mask
;
190 ctx
->loop_limiter
= lp_build_alloca(mask
->bld
->gallivm
,
191 int_type
, "looplimiter");
194 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
198 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
201 mask
->has_mask
= FALSE
;
202 mask
->ret_in_main
= FALSE
;
203 /* For the main function */
204 mask
->function_stack_size
= 1;
206 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
207 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
=
208 mask
->cond_mask
= mask
->switch_mask
=
209 LLVMConstAllOnes(mask
->int_vec_type
);
211 mask
->function_stack
= CALLOC(LP_MAX_NUM_FUNCS
,
212 sizeof(mask
->function_stack
[0]));
213 lp_exec_mask_function_init(mask
, 0);
217 lp_exec_mask_fini(struct lp_exec_mask
*mask
)
219 FREE(mask
->function_stack
);
222 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
224 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
225 boolean has_loop_mask
= mask_has_loop(mask
);
226 boolean has_cond_mask
= mask_has_cond(mask
);
227 boolean has_switch_mask
= mask_has_switch(mask
);
228 boolean has_ret_mask
= mask
->function_stack_size
> 1 ||
232 /*for loops we need to update the entire mask at runtime */
234 assert(mask
->break_mask
);
235 tmp
= LLVMBuildAnd(builder
,
239 mask
->exec_mask
= LLVMBuildAnd(builder
,
244 mask
->exec_mask
= mask
->cond_mask
;
246 if (has_switch_mask
) {
247 mask
->exec_mask
= LLVMBuildAnd(builder
,
254 mask
->exec_mask
= LLVMBuildAnd(builder
,
260 mask
->has_mask
= (has_cond_mask
||
266 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
269 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
270 struct function_ctx
*ctx
= func_ctx(mask
);
272 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
) {
273 ctx
->cond_stack_size
++;
276 if (ctx
->cond_stack_size
== 0 && mask
->function_stack_size
== 1) {
277 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
279 ctx
->cond_stack
[ctx
->cond_stack_size
++] = mask
->cond_mask
;
280 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
281 mask
->cond_mask
= LLVMBuildAnd(builder
,
285 lp_exec_mask_update(mask
);
288 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
290 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
291 struct function_ctx
*ctx
= func_ctx(mask
);
292 LLVMValueRef prev_mask
;
293 LLVMValueRef inv_mask
;
295 assert(ctx
->cond_stack_size
);
296 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
298 prev_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
- 1];
299 if (ctx
->cond_stack_size
== 1 && mask
->function_stack_size
== 1) {
300 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
303 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
305 mask
->cond_mask
= LLVMBuildAnd(builder
,
308 lp_exec_mask_update(mask
);
311 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
313 struct function_ctx
*ctx
= func_ctx(mask
);
314 assert(ctx
->cond_stack_size
);
315 --ctx
->cond_stack_size
;
316 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
318 mask
->cond_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
];
319 lp_exec_mask_update(mask
);
322 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
324 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
325 struct function_ctx
*ctx
= func_ctx(mask
);
327 if (ctx
->loop_stack_size
>= LP_MAX_TGSI_NESTING
) {
328 ++ctx
->loop_stack_size
;
332 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
334 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_LOOP
;
336 ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
= ctx
->loop_block
;
337 ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
338 ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
= mask
->break_mask
;
339 ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
= ctx
->break_var
;
340 ++ctx
->loop_stack_size
;
342 ctx
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
343 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
345 ctx
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
347 LLVMBuildBr(builder
, ctx
->loop_block
);
348 LLVMPositionBuilderAtEnd(builder
, ctx
->loop_block
);
350 mask
->break_mask
= LLVMBuildLoad(builder
, ctx
->break_var
, "");
352 lp_exec_mask_update(mask
);
355 static void lp_exec_break(struct lp_exec_mask
*mask
,
356 struct lp_build_tgsi_context
* bld_base
)
358 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
359 struct function_ctx
*ctx
= func_ctx(mask
);
361 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
362 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
366 mask
->break_mask
= LLVMBuildAnd(builder
,
368 exec_mask
, "break_full");
371 unsigned opcode
= bld_base
->instructions
[bld_base
->pc
+ 1].Instruction
.Opcode
;
372 boolean break_always
= (opcode
== TGSI_OPCODE_ENDSWITCH
||
373 opcode
== TGSI_OPCODE_CASE
);
376 if (ctx
->switch_in_default
) {
378 * stop default execution but only if this is an unconditional switch.
379 * (The condition here is not perfect since dead code after break is
380 * allowed but should be sufficient since false negatives are just
381 * unoptimized - so we don't have to pre-evaluate that).
383 if(break_always
&& ctx
->switch_pc
) {
384 bld_base
->pc
= ctx
->switch_pc
;
390 mask
->switch_mask
= LLVMConstNull(mask
->bld
->int_vec_type
);
393 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
396 mask
->switch_mask
= LLVMBuildAnd(builder
,
398 exec_mask
, "break_switch");
402 lp_exec_mask_update(mask
);
405 static void lp_exec_continue(struct lp_exec_mask
*mask
)
407 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
408 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
412 mask
->cont_mask
= LLVMBuildAnd(builder
,
416 lp_exec_mask_update(mask
);
420 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
421 struct lp_exec_mask
*mask
)
423 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
424 struct function_ctx
*ctx
= func_ctx(mask
);
425 LLVMBasicBlockRef endloop
;
426 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
427 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
428 mask
->bld
->type
.width
*
429 mask
->bld
->type
.length
);
430 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
432 assert(mask
->break_mask
);
435 assert(ctx
->loop_stack_size
);
436 if (ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
437 --ctx
->loop_stack_size
;
442 * Restore the cont_mask, but don't pop
444 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
- 1].cont_mask
;
445 lp_exec_mask_update(mask
);
448 * Unlike the continue mask, the break_mask must be preserved across loop
451 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
453 /* Decrement the loop limiter */
454 limiter
= LLVMBuildLoad(builder
, ctx
->loop_limiter
, "");
456 limiter
= LLVMBuildSub(
459 LLVMConstInt(int_type
, 1, false),
462 LLVMBuildStore(builder
, limiter
, ctx
->loop_limiter
);
464 /* i1cond = (mask != 0) */
465 i1cond
= LLVMBuildICmp(
468 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
469 LLVMConstNull(reg_type
), "i1cond");
471 /* i2cond = (looplimiter > 0) */
472 i2cond
= LLVMBuildICmp(
476 LLVMConstNull(int_type
), "i2cond");
478 /* if( i1cond && i2cond ) */
479 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
481 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
483 LLVMBuildCondBr(builder
,
484 icond
, ctx
->loop_block
, endloop
);
486 LLVMPositionBuilderAtEnd(builder
, endloop
);
488 assert(ctx
->loop_stack_size
);
489 --ctx
->loop_stack_size
;
490 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
;
491 mask
->break_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
;
492 ctx
->loop_block
= ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
;
493 ctx
->break_var
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
;
494 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+
495 ctx
->switch_stack_size
];
497 lp_exec_mask_update(mask
);
500 static void lp_exec_switch(struct lp_exec_mask
*mask
,
501 LLVMValueRef switchval
)
503 struct function_ctx
*ctx
= func_ctx(mask
);
505 if (ctx
->switch_stack_size
>= LP_MAX_TGSI_NESTING
||
506 ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
507 ctx
->switch_stack_size
++;
511 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
513 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_SWITCH
;
515 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
= mask
->switch_mask
;
516 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
= ctx
->switch_val
;
517 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
= ctx
->switch_mask_default
;
518 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
= ctx
->switch_in_default
;
519 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
= ctx
->switch_pc
;
520 ctx
->switch_stack_size
++;
522 mask
->switch_mask
= LLVMConstNull(mask
->int_vec_type
);
523 ctx
->switch_val
= switchval
;
524 ctx
->switch_mask_default
= LLVMConstNull(mask
->int_vec_type
);
525 ctx
->switch_in_default
= false;
528 lp_exec_mask_update(mask
);
531 static void lp_exec_endswitch(struct lp_exec_mask
*mask
,
532 struct lp_build_tgsi_context
* bld_base
)
534 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
535 struct function_ctx
*ctx
= func_ctx(mask
);
537 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
538 ctx
->switch_stack_size
--;
542 /* check if there's deferred default if so do it now */
543 if (ctx
->switch_pc
&& !ctx
->switch_in_default
) {
544 LLVMValueRef prevmask
, defaultmask
;
546 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
547 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
548 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
549 ctx
->switch_in_default
= true;
551 lp_exec_mask_update(mask
);
553 assert(bld_base
->instructions
[ctx
->switch_pc
- 1].Instruction
.Opcode
==
554 TGSI_OPCODE_DEFAULT
);
556 tmp_pc
= bld_base
->pc
;
557 bld_base
->pc
= ctx
->switch_pc
;
559 * re-purpose switch_pc to point to here again, since we stop execution of
560 * the deferred default after next break.
562 ctx
->switch_pc
= tmp_pc
- 1;
567 else if (ctx
->switch_pc
&& ctx
->switch_in_default
) {
568 assert(bld_base
->pc
== ctx
->switch_pc
+ 1);
571 ctx
->switch_stack_size
--;
572 mask
->switch_mask
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
;
573 ctx
->switch_val
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
;
574 ctx
->switch_mask_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
;
575 ctx
->switch_in_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
;
576 ctx
->switch_pc
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
;
578 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
];
580 lp_exec_mask_update(mask
);
583 static void lp_exec_case(struct lp_exec_mask
*mask
,
584 LLVMValueRef caseval
)
586 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
587 struct function_ctx
*ctx
= func_ctx(mask
);
589 LLVMValueRef casemask
, prevmask
;
591 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
595 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
596 if (!ctx
->switch_in_default
) {
597 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
598 casemask
= lp_build_cmp(mask
->bld
, PIPE_FUNC_EQUAL
, caseval
, ctx
->switch_val
);
599 ctx
->switch_mask_default
= LLVMBuildOr(builder
, casemask
,
600 ctx
->switch_mask_default
, "sw_default_mask");
601 casemask
= LLVMBuildOr(builder
, casemask
, mask
->switch_mask
, "");
602 mask
->switch_mask
= LLVMBuildAnd(builder
, casemask
, prevmask
, "sw_mask");
604 lp_exec_mask_update(mask
);
609 * Analyse default statement in a switch.
610 * \return true if default is last statement, false otherwise
611 * \param default_pc_start contains pc of instruction to jump to
612 * if default wasn't last but there's no
613 * fallthrough into default.
615 static boolean
default_analyse_is_last(struct lp_exec_mask
*mask
,
616 struct lp_build_tgsi_context
* bld_base
,
617 int *default_pc_start
)
619 unsigned pc
= bld_base
->pc
;
620 struct function_ctx
*ctx
= func_ctx(mask
);
621 int curr_switch_stack
= ctx
->switch_stack_size
;
623 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
627 /* skip over case statements which are together with default */
628 while (bld_base
->instructions
[pc
].Instruction
.Opcode
== TGSI_OPCODE_CASE
) {
632 while (pc
!= ~0u && pc
< bld_base
->num_instructions
) {
633 unsigned opcode
= bld_base
->instructions
[pc
].Instruction
.Opcode
;
635 case TGSI_OPCODE_CASE
:
636 if (curr_switch_stack
== ctx
->switch_stack_size
) {
637 *default_pc_start
= pc
- 1;
641 case TGSI_OPCODE_SWITCH
:
644 case TGSI_OPCODE_ENDSWITCH
:
645 if (curr_switch_stack
== ctx
->switch_stack_size
) {
646 *default_pc_start
= pc
- 1;
654 /* should never arrive here */
659 static void lp_exec_default(struct lp_exec_mask
*mask
,
660 struct lp_build_tgsi_context
* bld_base
)
662 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
663 struct function_ctx
*ctx
= func_ctx(mask
);
666 boolean default_is_last
;
668 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
673 * This is a messy opcode, because it may not be always at the end and
674 * there can be fallthrough in and out of it.
677 default_is_last
= default_analyse_is_last(mask
, bld_base
, &default_exec_pc
);
679 * If it is last statement in switch (note that case statements appearing
680 * "at the same time" as default don't change that) everything is just fine,
681 * update switch mask and go on. This means we can handle default with
682 * fallthrough INTO it without overhead, if it is last.
684 if (default_is_last
) {
685 LLVMValueRef prevmask
, defaultmask
;
686 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
687 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
688 defaultmask
= LLVMBuildOr(builder
, defaultmask
, mask
->switch_mask
, "");
689 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
690 ctx
->switch_in_default
= true;
692 lp_exec_mask_update(mask
);
696 * Technically, "case" immediately before default isn't really a
697 * fallthrough, however we still have to count them as such as we
698 * already have updated the masks.
699 * If that happens in practice could add a switch optimizer pass
700 * which just gets rid of all case statements appearing together with
701 * default (or could do switch analysis at switch start time instead).
703 unsigned opcode
= bld_base
->instructions
[bld_base
->pc
- 1].Instruction
.Opcode
;
704 boolean ft_into
= (opcode
!= TGSI_OPCODE_BRK
&&
705 opcode
!= TGSI_OPCODE_SWITCH
);
707 * If it is not last statement and there was no fallthrough into it,
708 * we record the PC and continue execution at next case (again, those
709 * case encountered at the same time don't count). At endswitch
710 * time, we update switchmask, and go back executing the code we skipped
711 * until the next break (possibly re-executing some code with changed mask
712 * if there was a fallthrough out of default).
713 * Finally, if it is not last statement and there was a fallthrough into it,
714 * do the same as with the former case, except instead of skipping the code
715 * just execute it without updating the mask, then go back and re-execute.
717 ctx
->switch_pc
= bld_base
->pc
;
719 bld_base
->pc
= default_exec_pc
;
725 /* stores val into an address pointed to by dst_ptr.
726 * mask->exec_mask is used to figure out which bits of val
727 * should be stored into the address
728 * (0 means don't store this bit, 1 means do store).
730 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
731 struct lp_build_context
*bld_store
,
733 LLVMValueRef dst_ptr
)
735 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
736 LLVMValueRef exec_mask
= mask
->has_mask
? mask
->exec_mask
: NULL
;
738 assert(lp_check_value(bld_store
->type
, val
));
739 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr
)) == LLVMPointerTypeKind
);
740 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr
)) == LLVMTypeOf(val
));
743 LLVMValueRef res
, dst
;
745 dst
= LLVMBuildLoad(builder
, dst_ptr
, "");
746 res
= lp_build_select(bld_store
, exec_mask
, val
, dst
);
747 LLVMBuildStore(builder
, res
, dst_ptr
);
749 LLVMBuildStore(builder
, val
, dst_ptr
);
752 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
756 if (mask
->function_stack_size
>= LP_MAX_NUM_FUNCS
) {
760 lp_exec_mask_function_init(mask
, mask
->function_stack_size
);
761 mask
->function_stack
[mask
->function_stack_size
].pc
= *pc
;
762 mask
->function_stack
[mask
->function_stack_size
].ret_mask
= mask
->ret_mask
;
763 mask
->function_stack_size
++;
767 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
769 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
770 struct function_ctx
*ctx
= func_ctx(mask
);
771 LLVMValueRef exec_mask
;
773 if (ctx
->cond_stack_size
== 0 &&
774 ctx
->loop_stack_size
== 0 &&
775 ctx
->switch_stack_size
== 0 &&
776 mask
->function_stack_size
== 1) {
777 /* returning from main() */
782 if (mask
->function_stack_size
== 1) {
784 * This requires special handling since we need to ensure
785 * we don't drop the mask even if we have no call stack
786 * (e.g. after a ret in a if clause after the endif)
788 mask
->ret_in_main
= TRUE
;
791 exec_mask
= LLVMBuildNot(builder
,
795 mask
->ret_mask
= LLVMBuildAnd(builder
,
797 exec_mask
, "ret_full");
799 lp_exec_mask_update(mask
);
802 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
806 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
808 struct function_ctx
*ctx
;
810 assert(mask
->function_stack_size
> 1);
811 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
813 ctx
= func_ctx(mask
);
814 mask
->function_stack_size
--;
817 mask
->ret_mask
= ctx
->ret_mask
;
819 lp_exec_mask_update(mask
);
824 get_file_ptr(struct lp_build_tgsi_soa_context
*bld
,
829 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
830 LLVMValueRef (*array_of_vars
)[TGSI_NUM_CHANNELS
];
831 LLVMValueRef var_of_array
;
834 case TGSI_FILE_TEMPORARY
:
835 array_of_vars
= bld
->temps
;
836 var_of_array
= bld
->temps_array
;
838 case TGSI_FILE_OUTPUT
:
839 array_of_vars
= bld
->outputs
;
840 var_of_array
= bld
->outputs_array
;
849 if (bld
->indirect_files
& (1 << file
)) {
850 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
851 return LLVMBuildGEP(builder
, var_of_array
, &lindex
, 1, "");
854 assert(index
<= bld
->bld_base
.info
->file_max
[file
]);
855 return array_of_vars
[index
][chan
];
861 * Return pointer to a temporary register channel (src or dest).
862 * Note that indirect addressing cannot be handled here.
863 * \param index which temporary register
864 * \param chan which channel of the temp register.
867 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
871 return get_file_ptr(bld
, TGSI_FILE_TEMPORARY
, index
, chan
);
875 * Return pointer to a output register channel (src or dest).
876 * Note that indirect addressing cannot be handled here.
877 * \param index which output register
878 * \param chan which channel of the output register.
881 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
885 return get_file_ptr(bld
, TGSI_FILE_OUTPUT
, index
, chan
);
889 * If we have indirect addressing in outputs copy our alloca array
890 * to the outputs slots specified by the caller to make sure
891 * our outputs are delivered consistently via the same interface.
894 gather_outputs(struct lp_build_tgsi_soa_context
* bld
)
896 if ((bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
897 unsigned index
, chan
;
898 assert(bld
->bld_base
.info
->num_outputs
<=
899 bld
->bld_base
.info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
900 for (index
= 0; index
< bld
->bld_base
.info
->num_outputs
; ++index
) {
901 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
902 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
910 * XXX the lp_build_gather() function should be capable of doing this
911 * with a little work.
914 build_gather(struct lp_build_tgsi_context
*bld_base
,
915 LLVMValueRef base_ptr
,
916 LLVMValueRef indexes
,
917 LLVMValueRef overflow_mask
,
918 LLVMValueRef indexes2
)
920 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
921 LLVMBuilderRef builder
= gallivm
->builder
;
922 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
923 struct lp_build_context
*bld
= &bld_base
->base
;
928 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
932 * overflow_mask is a vector telling us which channels
933 * in the vector overflowed. We use the overflow behavior for
934 * constant buffers which is defined as:
935 * Out of bounds access to constant buffer returns 0 in all
936 * components. Out of bounds behavior is always with respect
937 * to the size of the buffer bound at that slot.
942 * We avoid per-element control flow here (also due to llvm going crazy,
943 * though I suspect it's better anyway since overflow is likely rare).
944 * Note that since we still fetch from buffers even if num_elements was
945 * zero (in this case we'll fetch from index zero) the jit func callers
946 * MUST provide valid fake constant buffers of size 4x32 (the values do
947 * not matter), otherwise we'd still need (not per element though)
950 indexes
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes
);
952 indexes2
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes2
);
956 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
958 for (i
= 0; i
< bld
->type
.length
* (indexes2
? 2 : 1); i
++) {
961 LLVMValueRef scalar_ptr
, scalar
;
963 di
= lp_build_const_int32(bld
->gallivm
, i
);
965 si
= lp_build_const_int32(bld
->gallivm
, i
>> 1);
969 if (indexes2
&& (i
& 1)) {
970 index
= LLVMBuildExtractElement(builder
,
973 index
= LLVMBuildExtractElement(builder
,
976 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
977 &index
, 1, "gather_ptr");
978 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
980 res
= LLVMBuildInsertElement(builder
, res
, scalar
, di
, "");
985 res
= LLVMBuildBitCast(builder
, res
, bld_base
->dbl_bld
.vec_type
, "");
986 overflow_mask
= LLVMBuildSExt(builder
, overflow_mask
,
987 bld_base
->dbl_bld
.int_vec_type
, "");
988 res
= lp_build_select(&bld_base
->dbl_bld
, overflow_mask
,
989 bld_base
->dbl_bld
.zero
, res
);
991 res
= lp_build_select(bld
, overflow_mask
, bld
->zero
, res
);
999 * Scatter/store vector.
1002 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
1003 LLVMValueRef base_ptr
,
1004 LLVMValueRef indexes
,
1005 LLVMValueRef values
,
1006 struct lp_exec_mask
*mask
)
1008 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1009 LLVMBuilderRef builder
= gallivm
->builder
;
1011 LLVMValueRef pred
= mask
->has_mask
? mask
->exec_mask
: NULL
;
1014 * Loop over elements of index_vec, store scalar value.
1016 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1017 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1018 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
1019 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
1020 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
1021 LLVMValueRef scalar_pred
= pred
?
1022 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
1025 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
1026 ii
, val
, index
, scalar_ptr
);
1029 LLVMValueRef real_val
, dst_val
;
1030 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1031 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
1032 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
1035 LLVMBuildStore(builder
, val
, scalar_ptr
);
1042 * Read the current value of the ADDR register, convert the floats to
1043 * ints, add the base index and return the vector of offsets.
1044 * The offsets will be used to index into the constant buffer or
1045 * temporary register file.
1048 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
1049 unsigned reg_file
, unsigned reg_index
,
1050 const struct tgsi_ind_register
*indirect_reg
)
1052 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1053 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
1054 /* always use X component of address register */
1055 unsigned swizzle
= indirect_reg
->Swizzle
;
1058 LLVMValueRef max_index
;
1061 assert(bld
->indirect_files
& (1 << reg_file
));
1063 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
1065 assert(swizzle
< 4);
1066 switch (indirect_reg
->File
) {
1067 case TGSI_FILE_ADDRESS
:
1068 rel
= LLVMBuildLoad(builder
,
1069 bld
->addr
[indirect_reg
->Index
][swizzle
],
1071 /* ADDR LLVM values already have LLVM integer type. */
1073 case TGSI_FILE_TEMPORARY
:
1074 rel
= lp_get_temp_ptr_soa(bld
, indirect_reg
->Index
, swizzle
);
1075 rel
= LLVMBuildLoad(builder
, rel
, "load temp reg");
1076 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1077 * value actually stored is expected to be an integer */
1078 rel
= LLVMBuildBitCast(builder
, rel
, uint_bld
->vec_type
, "");
1082 rel
= uint_bld
->zero
;
1085 index
= lp_build_add(uint_bld
, base
, rel
);
1088 * emit_fetch_constant handles constant buffer overflow so this code
1089 * is pointless for them.
1090 * Furthermore the D3D10 spec in section 6.5 says:
1091 * If the constant buffer bound to a slot is larger than the size
1092 * declared in the shader for that slot, implementations are allowed
1093 * to return incorrect data (not necessarily 0) for indices that are
1094 * larger than the declared size but smaller than the buffer size.
1096 if (reg_file
!= TGSI_FILE_CONSTANT
) {
1097 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
1099 bld
->bld_base
.info
->file_max
[reg_file
]);
1101 assert(!uint_bld
->type
.sign
);
1102 index
= lp_build_min(uint_bld
, index
, max_index
);
1108 static struct lp_build_context
*
1109 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
1110 enum tgsi_opcode_type stype
)
1112 struct lp_build_context
*bld_fetch
;
1115 case TGSI_TYPE_FLOAT
:
1116 case TGSI_TYPE_UNTYPED
:
1117 bld_fetch
= &bld_base
->base
;
1119 case TGSI_TYPE_UNSIGNED
:
1120 bld_fetch
= &bld_base
->uint_bld
;
1122 case TGSI_TYPE_SIGNED
:
1123 bld_fetch
= &bld_base
->int_bld
;
1125 case TGSI_TYPE_DOUBLE
:
1126 bld_fetch
= &bld_base
->dbl_bld
;
1128 case TGSI_TYPE_UNSIGNED64
:
1129 bld_fetch
= &bld_base
->uint64_bld
;
1131 case TGSI_TYPE_SIGNED64
:
1132 bld_fetch
= &bld_base
->int64_bld
;
1134 case TGSI_TYPE_VOID
:
1144 get_soa_array_offsets(struct lp_build_context
*uint_bld
,
1145 LLVMValueRef indirect_index
,
1146 unsigned chan_index
,
1147 boolean need_perelement_offset
)
1149 struct gallivm_state
*gallivm
= uint_bld
->gallivm
;
1150 LLVMValueRef chan_vec
=
1151 lp_build_const_int_vec(uint_bld
->gallivm
, uint_bld
->type
, chan_index
);
1152 LLVMValueRef length_vec
=
1153 lp_build_const_int_vec(gallivm
, uint_bld
->type
, uint_bld
->type
.length
);
1154 LLVMValueRef index_vec
;
1156 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1157 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1158 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1159 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1161 if (need_perelement_offset
) {
1162 LLVMValueRef pixel_offsets
;
1164 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1165 pixel_offsets
= uint_bld
->undef
;
1166 for (i
= 0; i
< uint_bld
->type
.length
; i
++) {
1167 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1168 pixel_offsets
= LLVMBuildInsertElement(gallivm
->builder
, pixel_offsets
,
1171 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1177 emit_fetch_constant(
1178 struct lp_build_tgsi_context
* bld_base
,
1179 const struct tgsi_full_src_register
* reg
,
1180 enum tgsi_opcode_type stype
,
1183 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1184 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1185 LLVMBuilderRef builder
= gallivm
->builder
;
1186 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1187 unsigned dimension
= 0;
1188 LLVMValueRef consts_ptr
;
1189 LLVMValueRef num_consts
;
1192 /* XXX: Handle fetching xyzw components as a vector */
1193 assert(swizzle
!= ~0u);
1195 if (reg
->Register
.Dimension
) {
1196 assert(!reg
->Dimension
.Indirect
);
1197 dimension
= reg
->Dimension
.Index
;
1198 assert(dimension
< LP_MAX_TGSI_CONST_BUFFERS
);
1201 consts_ptr
= bld
->consts
[dimension
];
1202 num_consts
= bld
->consts_sizes
[dimension
];
1204 if (reg
->Register
.Indirect
) {
1205 LLVMValueRef indirect_index
;
1206 LLVMValueRef swizzle_vec
=
1207 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
1208 LLVMValueRef index_vec
; /* index into the const buffer */
1209 LLVMValueRef overflow_mask
;
1210 LLVMValueRef index_vec2
= NULL
;
1212 indirect_index
= get_indirect_index(bld
,
1214 reg
->Register
.Index
,
1217 /* All fetches are from the same constant buffer, so
1218 * we need to propagate the size to a vector to do a
1219 * vector comparison */
1220 num_consts
= lp_build_broadcast_scalar(uint_bld
, num_consts
);
1221 /* Construct a boolean vector telling us which channels
1222 * overflow the bound constant buffer */
1223 overflow_mask
= lp_build_compare(gallivm
, uint_bld
->type
, PIPE_FUNC_GEQUAL
,
1224 indirect_index
, num_consts
);
1226 /* index_vec = indirect_index * 4 + swizzle */
1227 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1228 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
1230 if (tgsi_type_is_64bit(stype
)) {
1231 LLVMValueRef swizzle_vec2
;
1232 swizzle_vec2
= lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
+ 1);
1233 index_vec2
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1234 index_vec2
= lp_build_add(uint_bld
, index_vec2
, swizzle_vec2
);
1236 /* Gather values from the constant buffer */
1237 res
= build_gather(bld_base
, consts_ptr
, index_vec
, overflow_mask
, index_vec2
);
1240 LLVMValueRef index
; /* index into the const buffer */
1241 LLVMValueRef scalar
, scalar_ptr
;
1242 struct lp_build_context
*bld_broad
= &bld_base
->base
;
1243 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1245 scalar_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1247 if (stype
== TGSI_TYPE_DOUBLE
) {
1248 LLVMTypeRef dptr_type
= LLVMPointerType(LLVMDoubleTypeInContext(gallivm
->context
), 0);
1249 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, dptr_type
, "");
1250 bld_broad
= &bld_base
->dbl_bld
;
1251 } else if (stype
== TGSI_TYPE_UNSIGNED64
) {
1252 LLVMTypeRef u64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1253 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, u64ptr_type
, "");
1254 bld_broad
= &bld_base
->uint64_bld
;
1255 } else if (stype
== TGSI_TYPE_SIGNED64
) {
1256 LLVMTypeRef i64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1257 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, i64ptr_type
, "");
1258 bld_broad
= &bld_base
->int64_bld
;
1260 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1261 res
= lp_build_broadcast_scalar(bld_broad
, scalar
);
1264 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| stype
== TGSI_TYPE_DOUBLE
|| stype
== TGSI_TYPE_SIGNED64
|| stype
== TGSI_TYPE_UNSIGNED64
) {
1265 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1266 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1273 * Fetch 64-bit values from two separate channels.
1274 * 64-bit values are stored split across two channels, like xy and zw.
1275 * This function creates a set of 16 floats,
1276 * extracts the values from the two channels,
1277 * puts them in the correct place, then casts to 8 64-bits.
1281 struct lp_build_tgsi_context
* bld_base
,
1282 enum tgsi_opcode_type stype
,
1284 LLVMValueRef input2
)
1286 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1287 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1288 LLVMBuilderRef builder
= gallivm
->builder
;
1290 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1292 LLVMValueRef shuffles
[16];
1293 int len
= bld_base
->base
.type
.length
* 2;
1296 for (i
= 0; i
< bld_base
->base
.type
.length
* 2; i
+=2) {
1297 shuffles
[i
] = lp_build_const_int32(gallivm
, i
/ 2);
1298 shuffles
[i
+ 1] = lp_build_const_int32(gallivm
, i
/ 2 + bld_base
->base
.type
.length
);
1300 res
= LLVMBuildShuffleVector(builder
, input
, input2
, LLVMConstVector(shuffles
, len
), "");
1302 return LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1306 emit_fetch_immediate(
1307 struct lp_build_tgsi_context
* bld_base
,
1308 const struct tgsi_full_src_register
* reg
,
1309 enum tgsi_opcode_type stype
,
1312 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1313 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1314 LLVMBuilderRef builder
= gallivm
->builder
;
1315 LLVMValueRef res
= NULL
;
1317 if (bld
->use_immediates_array
|| reg
->Register
.Indirect
) {
1318 LLVMValueRef imms_array
;
1319 LLVMTypeRef fptr_type
;
1321 /* cast imms_array pointer to float* */
1322 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1323 imms_array
= LLVMBuildBitCast(builder
, bld
->imms_array
, fptr_type
, "");
1325 if (reg
->Register
.Indirect
) {
1326 LLVMValueRef indirect_index
;
1327 LLVMValueRef index_vec
; /* index into the immediate register array */
1328 LLVMValueRef index_vec2
= NULL
;
1329 indirect_index
= get_indirect_index(bld
,
1331 reg
->Register
.Index
,
1334 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1335 * immediates are stored as full vectors (FIXME??? - might be better
1336 * to store them the same as constants) but all elements are the same
1339 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1343 if (tgsi_type_is_64bit(stype
))
1344 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1348 /* Gather values from the immediate register array */
1349 res
= build_gather(bld_base
, imms_array
, index_vec
, NULL
, index_vec2
);
1351 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1352 reg
->Register
.Index
* 4 + swizzle
);
1353 LLVMValueRef imms_ptr
= LLVMBuildGEP(builder
,
1354 bld
->imms_array
, &lindex
, 1, "");
1355 res
= LLVMBuildLoad(builder
, imms_ptr
, "");
1357 if (tgsi_type_is_64bit(stype
)) {
1358 LLVMValueRef lindex1
;
1359 LLVMValueRef imms_ptr2
;
1362 lindex1
= lp_build_const_int32(gallivm
,
1363 reg
->Register
.Index
* 4 + swizzle
+ 1);
1364 imms_ptr2
= LLVMBuildGEP(builder
,
1365 bld
->imms_array
, &lindex1
, 1, "");
1366 res2
= LLVMBuildLoad(builder
, imms_ptr2
, "");
1367 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1372 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
1373 if (tgsi_type_is_64bit(stype
))
1374 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->immediates
[reg
->Register
.Index
][swizzle
+ 1]);
1377 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1378 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1379 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1386 struct lp_build_tgsi_context
* bld_base
,
1387 const struct tgsi_full_src_register
* reg
,
1388 enum tgsi_opcode_type stype
,
1391 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1392 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1393 LLVMBuilderRef builder
= gallivm
->builder
;
1396 if (reg
->Register
.Indirect
) {
1397 LLVMValueRef indirect_index
;
1398 LLVMValueRef index_vec
; /* index into the input reg array */
1399 LLVMValueRef index_vec2
= NULL
;
1400 LLVMValueRef inputs_array
;
1401 LLVMTypeRef fptr_type
;
1403 indirect_index
= get_indirect_index(bld
,
1405 reg
->Register
.Index
,
1408 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1412 if (tgsi_type_is_64bit(stype
)) {
1413 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1418 /* cast inputs_array pointer to float* */
1419 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1420 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
, fptr_type
, "");
1422 /* Gather values from the input register array */
1423 res
= build_gather(bld_base
, inputs_array
, index_vec
, NULL
, index_vec2
);
1425 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
1426 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1427 reg
->Register
.Index
* 4 + swizzle
);
1428 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
1429 bld
->inputs_array
, &lindex
, 1, "");
1431 res
= LLVMBuildLoad(builder
, input_ptr
, "");
1432 if (tgsi_type_is_64bit(stype
)) {
1433 LLVMValueRef lindex1
;
1434 LLVMValueRef input_ptr2
;
1437 lindex1
= lp_build_const_int32(gallivm
,
1438 reg
->Register
.Index
* 4 + swizzle
+ 1);
1439 input_ptr2
= LLVMBuildGEP(builder
,
1440 bld
->inputs_array
, &lindex1
, 1, "");
1441 res2
= LLVMBuildLoad(builder
, input_ptr2
, "");
1442 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1446 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
1447 if (tgsi_type_is_64bit(stype
))
1448 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->inputs
[reg
->Register
.Index
][swizzle
+ 1]);
1454 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1455 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1456 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1464 emit_fetch_gs_input(
1465 struct lp_build_tgsi_context
* bld_base
,
1466 const struct tgsi_full_src_register
* reg
,
1467 enum tgsi_opcode_type stype
,
1470 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1471 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1472 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1473 LLVMBuilderRef builder
= gallivm
->builder
;
1474 LLVMValueRef attrib_index
= NULL
;
1475 LLVMValueRef vertex_index
= NULL
;
1476 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
);
1479 if (info
->input_semantic_name
[reg
->Register
.Index
] == TGSI_SEMANTIC_PRIMID
) {
1480 /* This is really a system value not a regular input */
1481 assert(!reg
->Register
.Indirect
);
1482 assert(!reg
->Dimension
.Indirect
);
1483 res
= bld
->system_values
.prim_id
;
1484 if (stype
!= TGSI_TYPE_UNSIGNED
&& stype
!= TGSI_TYPE_SIGNED
) {
1485 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1490 if (reg
->Register
.Indirect
) {
1491 attrib_index
= get_indirect_index(bld
,
1493 reg
->Register
.Index
,
1496 attrib_index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
);
1499 if (reg
->Dimension
.Indirect
) {
1500 vertex_index
= get_indirect_index(bld
,
1502 reg
->Dimension
.Index
,
1505 vertex_index
= lp_build_const_int32(gallivm
, reg
->Dimension
.Index
);
1508 res
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1509 reg
->Dimension
.Indirect
,
1511 reg
->Register
.Indirect
,
1516 if (tgsi_type_is_64bit(stype
)) {
1517 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
+ 1);
1519 res2
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1520 reg
->Dimension
.Indirect
,
1522 reg
->Register
.Indirect
,
1526 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1527 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1528 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1529 } else if (stype
== TGSI_TYPE_SIGNED
) {
1530 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1537 emit_fetch_temporary(
1538 struct lp_build_tgsi_context
* bld_base
,
1539 const struct tgsi_full_src_register
* reg
,
1540 enum tgsi_opcode_type stype
,
1543 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1544 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1545 LLVMBuilderRef builder
= gallivm
->builder
;
1548 if (reg
->Register
.Indirect
) {
1549 LLVMValueRef indirect_index
;
1550 LLVMValueRef index_vec
, index_vec2
= NULL
; /* index into the temp reg array */
1551 LLVMValueRef temps_array
;
1552 LLVMTypeRef fptr_type
;
1554 indirect_index
= get_indirect_index(bld
,
1556 reg
->Register
.Index
,
1559 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1563 if (tgsi_type_is_64bit(stype
)) {
1564 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1570 /* cast temps_array pointer to float* */
1571 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1572 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1574 /* Gather values from the temporary register array */
1575 res
= build_gather(bld_base
, temps_array
, index_vec
, NULL
, index_vec2
);
1578 LLVMValueRef temp_ptr
;
1579 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
1580 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1582 if (tgsi_type_is_64bit(stype
)) {
1583 LLVMValueRef temp_ptr2
, res2
;
1585 temp_ptr2
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
+ 1);
1586 res2
= LLVMBuildLoad(builder
, temp_ptr2
, "");
1587 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1591 if (stype
== TGSI_TYPE_SIGNED
||
1592 stype
== TGSI_TYPE_UNSIGNED
||
1593 stype
== TGSI_TYPE_DOUBLE
||
1594 stype
== TGSI_TYPE_SIGNED64
||
1595 stype
== TGSI_TYPE_UNSIGNED64
) {
1596 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1597 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1604 emit_fetch_system_value(
1605 struct lp_build_tgsi_context
* bld_base
,
1606 const struct tgsi_full_src_register
* reg
,
1607 enum tgsi_opcode_type stype
,
1610 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1611 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1612 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1613 LLVMBuilderRef builder
= gallivm
->builder
;
1615 enum tgsi_opcode_type atype
; // Actual type of the value
1617 assert(!reg
->Register
.Indirect
);
1619 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
1620 case TGSI_SEMANTIC_INSTANCEID
:
1621 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
1622 atype
= TGSI_TYPE_UNSIGNED
;
1625 case TGSI_SEMANTIC_VERTEXID
:
1626 res
= bld
->system_values
.vertex_id
;
1627 atype
= TGSI_TYPE_UNSIGNED
;
1630 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
1631 res
= bld
->system_values
.vertex_id_nobase
;
1632 atype
= TGSI_TYPE_UNSIGNED
;
1635 case TGSI_SEMANTIC_BASEVERTEX
:
1636 res
= bld
->system_values
.basevertex
;
1637 atype
= TGSI_TYPE_UNSIGNED
;
1640 case TGSI_SEMANTIC_PRIMID
:
1641 res
= bld
->system_values
.prim_id
;
1642 atype
= TGSI_TYPE_UNSIGNED
;
1645 case TGSI_SEMANTIC_INVOCATIONID
:
1646 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.invocation_id
);
1647 atype
= TGSI_TYPE_UNSIGNED
;
1651 assert(!"unexpected semantic in emit_fetch_system_value");
1652 res
= bld_base
->base
.zero
;
1653 atype
= TGSI_TYPE_FLOAT
;
1657 if (atype
!= stype
) {
1658 if (stype
== TGSI_TYPE_FLOAT
) {
1659 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1660 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1661 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1662 } else if (stype
== TGSI_TYPE_SIGNED
) {
1663 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1671 * Register fetch with derivatives.
1675 struct lp_build_tgsi_soa_context
*bld
,
1684 /* TODO: use interpolation coeffs for inputs */
1687 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
1690 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
1694 * store an array of 8 64-bit into two arrays of 8 floats
1696 * value is d0, d1, d2, d3 etc.
1697 * each 64-bit has high and low pieces x, y
1698 * so gets stored into the separate channels as:
1699 * chan_ptr = d0.x, d1.x, d2.x, d3.x
1700 * chan_ptr2 = d0.y, d1.y, d2.y, d3.y
1703 emit_store_64bit_chan(struct lp_build_tgsi_context
*bld_base
,
1704 LLVMValueRef chan_ptr
, LLVMValueRef chan_ptr2
,
1707 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1708 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1709 LLVMBuilderRef builder
= gallivm
->builder
;
1710 struct lp_build_context
*float_bld
= &bld_base
->base
;
1712 LLVMValueRef temp
, temp2
;
1713 LLVMValueRef shuffles
[8];
1714 LLVMValueRef shuffles2
[8];
1716 for (i
= 0; i
< bld_base
->base
.type
.length
; i
++) {
1717 shuffles
[i
] = lp_build_const_int32(gallivm
, i
* 2);
1718 shuffles2
[i
] = lp_build_const_int32(gallivm
, (i
* 2) + 1);
1721 temp
= LLVMBuildShuffleVector(builder
, value
,
1722 LLVMGetUndef(LLVMTypeOf(value
)),
1723 LLVMConstVector(shuffles
,
1724 bld_base
->base
.type
.length
),
1726 temp2
= LLVMBuildShuffleVector(builder
, value
,
1727 LLVMGetUndef(LLVMTypeOf(value
)),
1728 LLVMConstVector(shuffles2
,
1729 bld_base
->base
.type
.length
),
1732 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp
, chan_ptr
);
1733 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp2
, chan_ptr2
);
1741 struct lp_build_tgsi_context
*bld_base
,
1742 const struct tgsi_full_instruction
*inst
,
1744 unsigned chan_index
,
1747 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1748 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1749 LLVMBuilderRef builder
= gallivm
->builder
;
1750 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
1751 struct lp_build_context
*float_bld
= &bld_base
->base
;
1752 struct lp_build_context
*int_bld
= &bld_base
->int_bld
;
1753 LLVMValueRef indirect_index
= NULL
;
1754 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
);
1759 * It is always assumed to be float.
1761 if (inst
->Instruction
.Saturate
) {
1762 assert(dtype
== TGSI_TYPE_FLOAT
||
1763 dtype
== TGSI_TYPE_UNTYPED
);
1764 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1765 value
= lp_build_clamp_zero_one_nanzero(float_bld
, value
);
1768 if (reg
->Register
.Indirect
) {
1770 * Currently the mesa/st doesn't generate indirect stores
1771 * to 64-bit values, it normally uses MOV to do indirect stores.
1773 assert(!tgsi_type_is_64bit(dtype
));
1774 indirect_index
= get_indirect_index(bld
,
1776 reg
->Register
.Index
,
1779 assert(reg
->Register
.Index
<=
1780 bld_base
->info
->file_max
[reg
->Register
.File
]);
1783 if (DEBUG_EXECUTION
) {
1784 emit_dump_reg(gallivm
, reg
->Register
.File
, reg
->Register
.Index
, chan_index
, value
);
1787 switch( reg
->Register
.File
) {
1788 case TGSI_FILE_OUTPUT
:
1789 /* Outputs are always stored as floats */
1790 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1792 if (reg
->Register
.Indirect
) {
1793 LLVMValueRef index_vec
; /* indexes into the output registers */
1794 LLVMValueRef outputs_array
;
1795 LLVMTypeRef fptr_type
;
1797 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1802 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1803 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
, fptr_type
, "");
1805 /* Scatter store values into output registers */
1806 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1810 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1813 if (tgsi_type_is_64bit(dtype
)) {
1814 LLVMValueRef out_ptr2
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1816 emit_store_64bit_chan(bld_base
, out_ptr
, out_ptr2
,
1819 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, out_ptr
);
1823 case TGSI_FILE_TEMPORARY
:
1824 /* Temporaries are always stored as floats */
1825 if (!tgsi_type_is_64bit(dtype
))
1826 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1828 value
= LLVMBuildBitCast(builder
, value
, LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2), "");
1830 if (reg
->Register
.Indirect
) {
1831 LLVMValueRef index_vec
; /* indexes into the temp registers */
1832 LLVMValueRef temps_array
;
1833 LLVMTypeRef fptr_type
;
1835 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1840 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1841 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1843 /* Scatter store values into temp registers */
1844 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1848 LLVMValueRef temp_ptr
;
1849 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, chan_index
);
1851 if (tgsi_type_is_64bit(dtype
)) {
1852 LLVMValueRef temp_ptr2
= lp_get_temp_ptr_soa(bld
,
1853 reg
->Register
.Index
,
1855 emit_store_64bit_chan(bld_base
, temp_ptr
, temp_ptr2
,
1859 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, temp_ptr
);
1863 case TGSI_FILE_ADDRESS
:
1864 assert(dtype
== TGSI_TYPE_SIGNED
);
1865 assert(LLVMTypeOf(value
) == int_bld
->vec_type
);
1866 value
= LLVMBuildBitCast(builder
, value
, int_bld
->vec_type
, "");
1867 lp_exec_mask_store(&bld
->exec_mask
, int_bld
, value
,
1868 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1879 * Called at the beginning of the translation of each TGSI instruction, to
1880 * emit some debug code.
1884 struct lp_build_tgsi_context
* bld_base
,
1885 const struct tgsi_full_instruction
* inst
,
1886 const struct tgsi_opcode_info
* info
)
1889 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1891 if (DEBUG_EXECUTION
) {
1893 * Dump the TGSI instruction.
1896 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1900 tgsi_dump_instruction_str(inst
, bld_base
->pc
, &buf
[2], sizeof buf
- 2);
1901 lp_build_printf(gallivm
, buf
);
1903 /* Dump the execution mask.
1905 if (bld
->exec_mask
.has_mask
) {
1906 lp_build_print_value(gallivm
, " mask = ", bld
->exec_mask
.exec_mask
);
1913 struct lp_build_tgsi_context
* bld_base
,
1914 const struct tgsi_full_instruction
* inst
,
1915 const struct tgsi_opcode_info
* info
,
1916 LLVMValueRef dst
[4])
1919 unsigned chan_index
;
1920 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
);
1923 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
1925 if (tgsi_type_is_64bit(dtype
) && (chan_index
== 1 || chan_index
== 3))
1927 emit_store_chan(bld_base
, inst
, 0, chan_index
, dst
[chan_index
]);
1933 tgsi_to_pipe_tex_target(unsigned tgsi_target
)
1935 switch (tgsi_target
) {
1936 case TGSI_TEXTURE_BUFFER
:
1938 case TGSI_TEXTURE_1D
:
1939 case TGSI_TEXTURE_SHADOW1D
:
1940 return PIPE_TEXTURE_1D
;
1941 case TGSI_TEXTURE_2D
:
1942 case TGSI_TEXTURE_SHADOW2D
:
1943 case TGSI_TEXTURE_2D_MSAA
:
1944 return PIPE_TEXTURE_2D
;
1945 case TGSI_TEXTURE_3D
:
1946 return PIPE_TEXTURE_3D
;
1947 case TGSI_TEXTURE_CUBE
:
1948 case TGSI_TEXTURE_SHADOWCUBE
:
1949 return PIPE_TEXTURE_CUBE
;
1950 case TGSI_TEXTURE_RECT
:
1951 case TGSI_TEXTURE_SHADOWRECT
:
1952 return PIPE_TEXTURE_RECT
;
1953 case TGSI_TEXTURE_1D_ARRAY
:
1954 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1955 return PIPE_TEXTURE_1D_ARRAY
;
1956 case TGSI_TEXTURE_2D_ARRAY
:
1957 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1958 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
1959 return PIPE_TEXTURE_2D_ARRAY
;
1960 case TGSI_TEXTURE_CUBE_ARRAY
:
1961 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1962 return PIPE_TEXTURE_CUBE_ARRAY
;
1970 static enum lp_sampler_lod_property
1971 lp_build_lod_property(
1972 struct lp_build_tgsi_context
*bld_base
,
1973 const struct tgsi_full_instruction
*inst
,
1976 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
1977 enum lp_sampler_lod_property lod_property
;
1980 * Not much we can do here. We could try catching inputs declared
1981 * with constant interpolation but not sure it's worth it - since for
1982 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
1983 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
1984 * like the constant/immediate recognition below.
1985 * What seems to be of more value would be to recognize temps holding
1986 * broadcasted scalars but no way we can do it.
1987 * Tried asking llvm but without any success (using LLVMIsConstant
1988 * even though this isn't exactly what we'd need), even as simple as
1989 * IMM[0] UINT32 (0,-1,0,0)
1990 * MOV TEMP[0] IMM[0].yyyy
1991 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
1993 * This means there's ZERO chance this will ever catch a scalar lod
1994 * with traditional tex opcodes as well as texel fetches, since the lod
1995 * comes from the same reg as coords (except some test shaders using
1996 * constant coords maybe).
1997 * There's at least hope for sample opcodes as well as size queries.
1999 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
||
2000 reg
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
2001 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2003 else if (bld_base
->info
->processor
== PIPE_SHADER_FRAGMENT
) {
2004 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
2005 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2008 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2012 /* never use scalar (per-quad) lod the results are just too wrong. */
2013 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2015 return lod_property
;
2020 * High-level instruction translators.
2024 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
2025 const struct tgsi_full_instruction
*inst
,
2026 enum lp_build_tex_modifier modifier
,
2027 LLVMValueRef
*texel
,
2028 unsigned sampler_reg
,
2029 enum lp_sampler_op_type sampler_op
)
2031 unsigned unit
= inst
->Src
[sampler_reg
].Register
.Index
;
2032 LLVMValueRef oow
= NULL
;
2033 LLVMValueRef lod
= NULL
;
2034 LLVMValueRef coords
[5];
2035 LLVMValueRef offsets
[3] = { NULL
};
2036 struct lp_derivatives derivs
;
2037 struct lp_sampler_params params
;
2038 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2039 unsigned num_derivs
, num_offsets
, i
;
2040 unsigned shadow_coord
= 0;
2041 unsigned layer_coord
= 0;
2042 unsigned sample_key
= sampler_op
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2044 memset(¶ms
, 0, sizeof(params
));
2046 if (!bld
->sampler
) {
2047 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2048 for (i
= 0; i
< 4; i
++) {
2049 texel
[i
] = bld
->bld_base
.base
.undef
;
2054 switch (inst
->Texture
.Texture
) {
2055 case TGSI_TEXTURE_1D_ARRAY
:
2058 case TGSI_TEXTURE_1D
:
2062 case TGSI_TEXTURE_2D_ARRAY
:
2065 case TGSI_TEXTURE_2D
:
2066 case TGSI_TEXTURE_RECT
:
2070 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2073 case TGSI_TEXTURE_SHADOW1D
:
2078 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2084 case TGSI_TEXTURE_SHADOW2D
:
2085 case TGSI_TEXTURE_SHADOWRECT
:
2090 case TGSI_TEXTURE_CUBE
:
2094 case TGSI_TEXTURE_3D
:
2098 case TGSI_TEXTURE_SHADOWCUBE
:
2103 case TGSI_TEXTURE_CUBE_ARRAY
:
2108 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2112 shadow_coord
= 4; /* shadow coord special different reg */
2114 case TGSI_TEXTURE_2D_MSAA
:
2115 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2121 /* Note lod and especially projected are illegal in a LOT of cases */
2122 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2123 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2124 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
2125 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
) {
2126 /* note that shadow cube array with bias/explicit lod does not exist */
2127 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2130 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2132 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2133 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2135 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2136 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2138 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2141 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
2142 oow
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2143 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
2146 for (i
= 0; i
< num_derivs
; i
++) {
2147 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2148 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2149 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
2151 for (i
= num_derivs
; i
< 5; i
++) {
2152 coords
[i
] = bld
->bld_base
.base
.undef
;
2155 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2157 if (layer_coord
== 3) {
2158 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2161 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2163 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2164 coords
[2] = lp_build_mul(&bld
->bld_base
.base
, coords
[2], oow
);
2166 /* Shadow coord occupies always 5th slot. */
2168 sample_key
|= LP_SAMPLER_SHADOW
;
2169 if (shadow_coord
== 4) {
2170 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2173 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, shadow_coord
);
2175 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2176 coords
[4] = lp_build_mul(&bld
->bld_base
.base
, coords
[4], oow
);
2179 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2181 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2182 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2183 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, dim
);
2184 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 2, dim
);
2186 params
.derivs
= &derivs
;
2188 * could also check all src regs if constant but I doubt such
2189 * cases exist in practice.
2191 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2192 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
2193 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2196 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2200 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2203 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2205 /* we don't handle the 4 offset version of tg4 */
2206 if (inst
->Texture
.NumOffsets
== 1) {
2208 sample_key
|= LP_SAMPLER_OFFSETS
;
2209 for (dim
= 0; dim
< num_offsets
; dim
++) {
2210 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2214 params
.type
= bld
->bld_base
.base
.type
;
2215 params
.sample_key
= sample_key
;
2216 params
.texture_index
= unit
;
2217 params
.sampler_index
= unit
;
2218 params
.context_ptr
= bld
->context_ptr
;
2219 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2220 params
.coords
= coords
;
2221 params
.offsets
= offsets
;
2223 params
.texel
= texel
;
2225 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2226 bld
->bld_base
.base
.gallivm
,
2231 emit_sample(struct lp_build_tgsi_soa_context
*bld
,
2232 const struct tgsi_full_instruction
*inst
,
2233 enum lp_build_tex_modifier modifier
,
2235 LLVMValueRef
*texel
)
2237 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2238 unsigned texture_unit
, sampler_unit
;
2239 LLVMValueRef lod
= NULL
;
2240 LLVMValueRef coords
[5];
2241 LLVMValueRef offsets
[3] = { NULL
};
2242 struct lp_derivatives derivs
;
2243 struct lp_sampler_params params
;
2244 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2246 unsigned num_offsets
, num_derivs
, i
;
2247 unsigned layer_coord
= 0;
2248 unsigned sample_key
= LP_SAMPLER_OP_TEXTURE
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2250 memset(¶ms
, 0, sizeof(params
));
2252 if (!bld
->sampler
) {
2253 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2254 for (i
= 0; i
< 4; i
++) {
2255 texel
[i
] = bld
->bld_base
.base
.undef
;
2261 * unlike old-style tex opcodes the texture/sampler indices
2262 * always come from src1 and src2 respectively.
2264 texture_unit
= inst
->Src
[1].Register
.Index
;
2265 sampler_unit
= inst
->Src
[2].Register
.Index
;
2268 * Note inst->Texture.Texture will contain the number of offsets,
2269 * however the target information is NOT there and comes from the
2270 * declared sampler views instead.
2272 switch (bld
->sv
[texture_unit
].Resource
) {
2273 case TGSI_TEXTURE_1D
:
2277 case TGSI_TEXTURE_1D_ARRAY
:
2282 case TGSI_TEXTURE_2D
:
2283 case TGSI_TEXTURE_RECT
:
2287 case TGSI_TEXTURE_2D_ARRAY
:
2292 case TGSI_TEXTURE_CUBE
:
2296 case TGSI_TEXTURE_3D
:
2300 case TGSI_TEXTURE_CUBE_ARRAY
:
2310 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2311 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2312 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2313 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2314 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2316 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2317 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2319 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2321 else if (modifier
== LP_BLD_TEX_MODIFIER_LOD_ZERO
) {
2322 /* XXX might be better to explicitly pass the level zero information */
2323 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2324 lod
= lp_build_const_vec(gallivm
, bld
->bld_base
.base
.type
, 0.0F
);
2327 for (i
= 0; i
< num_derivs
; i
++) {
2328 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2330 for (i
= num_derivs
; i
< 5; i
++) {
2331 coords
[i
] = bld
->bld_base
.base
.undef
;
2334 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2336 if (layer_coord
== 3)
2337 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2339 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2341 /* Shadow coord occupies always 5th slot. */
2343 sample_key
|= LP_SAMPLER_SHADOW
;
2344 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2347 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2349 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2350 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2351 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, dim
);
2352 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 4, dim
);
2354 params
.derivs
= &derivs
;
2356 * could also check all src regs if constant but I doubt such
2357 * cases exist in practice.
2359 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2360 if (gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) {
2361 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2364 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2368 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2372 /* some advanced gather instructions (txgo) would require 4 offsets */
2373 if (inst
->Texture
.NumOffsets
== 1) {
2375 sample_key
|= LP_SAMPLER_OFFSETS
;
2376 for (dim
= 0; dim
< num_offsets
; dim
++) {
2377 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2380 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2382 params
.type
= bld
->bld_base
.base
.type
;
2383 params
.sample_key
= sample_key
;
2384 params
.texture_index
= texture_unit
;
2385 params
.sampler_index
= sampler_unit
;
2386 params
.context_ptr
= bld
->context_ptr
;
2387 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2388 params
.coords
= coords
;
2389 params
.offsets
= offsets
;
2391 params
.texel
= texel
;
2393 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2394 bld
->bld_base
.base
.gallivm
,
2397 if (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2398 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2399 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2400 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
) {
2401 unsigned char swizzles
[4];
2402 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2403 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2404 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2405 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2407 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2412 emit_fetch_texels( struct lp_build_tgsi_soa_context
*bld
,
2413 const struct tgsi_full_instruction
*inst
,
2414 LLVMValueRef
*texel
,
2417 unsigned unit
, target
;
2418 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
2419 LLVMValueRef explicit_lod
= NULL
;
2420 LLVMValueRef coords
[5];
2421 LLVMValueRef offsets
[3] = { NULL
};
2422 struct lp_sampler_params params
;
2423 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2425 unsigned layer_coord
= 0;
2426 unsigned sample_key
= LP_SAMPLER_OP_FETCH
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2428 memset(¶ms
, 0, sizeof(params
));
2430 if (!bld
->sampler
) {
2431 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2432 for (i
= 0; i
< 4; i
++) {
2433 texel
[i
] = coord_undef
;
2438 unit
= inst
->Src
[1].Register
.Index
;
2441 target
= bld
->sv
[unit
].Resource
;
2444 target
= inst
->Texture
.Texture
;
2448 case TGSI_TEXTURE_1D
:
2449 case TGSI_TEXTURE_BUFFER
:
2452 case TGSI_TEXTURE_1D_ARRAY
:
2456 case TGSI_TEXTURE_2D
:
2457 case TGSI_TEXTURE_RECT
:
2458 case TGSI_TEXTURE_2D_MSAA
:
2461 case TGSI_TEXTURE_2D_ARRAY
:
2462 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2466 case TGSI_TEXTURE_3D
:
2474 /* always have lod except for buffers and msaa targets ? */
2475 if (target
!= TGSI_TEXTURE_BUFFER
&&
2476 target
!= TGSI_TEXTURE_2D_MSAA
&&
2477 target
!= TGSI_TEXTURE_2D_ARRAY_MSAA
) {
2478 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2479 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2480 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2483 * XXX: for real msaa support, the w component (or src2.x for sample_i_ms)
2484 * would be the sample index.
2487 for (i
= 0; i
< dims
; i
++) {
2488 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2490 /* never use more than 3 coords here but emit_fetch_texel copies all 5 anyway */
2491 for (i
= dims
; i
< 5; i
++) {
2492 coords
[i
] = coord_undef
;
2495 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2497 if (inst
->Texture
.NumOffsets
== 1) {
2499 sample_key
|= LP_SAMPLER_OFFSETS
;
2500 for (dim
= 0; dim
< dims
; dim
++) {
2501 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2504 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2506 params
.type
= bld
->bld_base
.base
.type
;
2507 params
.sample_key
= sample_key
;
2508 params
.texture_index
= unit
;
2510 * sampler not actually used, set to 0 so it won't exceed PIPE_MAX_SAMPLERS
2511 * and trigger some assertions with d3d10 where the sampler view number
2514 params
.sampler_index
= 0;
2515 params
.context_ptr
= bld
->context_ptr
;
2516 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2517 params
.coords
= coords
;
2518 params
.offsets
= offsets
;
2519 params
.derivs
= NULL
;
2520 params
.lod
= explicit_lod
;
2521 params
.texel
= texel
;
2523 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2524 bld
->bld_base
.base
.gallivm
,
2528 (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2529 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2530 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2531 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
)) {
2532 unsigned char swizzles
[4];
2533 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2534 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2535 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2536 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2538 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2543 emit_size_query( struct lp_build_tgsi_soa_context
*bld
,
2544 const struct tgsi_full_instruction
*inst
,
2545 LLVMValueRef
*sizes_out
,
2546 boolean is_sviewinfo
)
2548 LLVMValueRef explicit_lod
;
2549 enum lp_sampler_lod_property lod_property
;
2552 unsigned unit
= inst
->Src
[1].Register
.Index
;
2553 unsigned target
, pipe_target
;
2554 struct lp_sampler_size_query_params params
;
2557 target
= bld
->sv
[unit
].Resource
;
2560 target
= inst
->Texture
.Texture
;
2563 case TGSI_TEXTURE_BUFFER
:
2564 case TGSI_TEXTURE_RECT
:
2565 case TGSI_TEXTURE_SHADOWRECT
:
2573 if (!bld
->sampler
) {
2574 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2575 for (i
= 0; i
< 4; i
++)
2576 sizes_out
[i
] = bld
->bld_base
.int_bld
.undef
;
2581 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 0);
2582 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2585 explicit_lod
= NULL
;
2586 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2590 pipe_target
= tgsi_to_pipe_tex_target(target
);
2592 params
.int_type
= bld
->bld_base
.int_bld
.type
;
2593 params
.texture_unit
= unit
;
2594 params
.target
= pipe_target
;
2595 params
.context_ptr
= bld
->context_ptr
;
2596 params
.is_sviewinfo
= TRUE
;
2597 params
.lod_property
= lod_property
;
2598 params
.explicit_lod
= explicit_lod
;
2599 params
.sizes_out
= sizes_out
;
2601 bld
->sampler
->emit_size_query(bld
->sampler
,
2602 bld
->bld_base
.base
.gallivm
,
2607 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
2612 for (i
= 0; i
< 5; i
++) {
2615 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
2618 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
2620 if (opcode
== TGSI_OPCODE_END
)
2623 if (opcode
== TGSI_OPCODE_TEX
||
2624 opcode
== TGSI_OPCODE_TXP
||
2625 opcode
== TGSI_OPCODE_TXD
||
2626 opcode
== TGSI_OPCODE_TXB
||
2627 opcode
== TGSI_OPCODE_TXL
||
2628 opcode
== TGSI_OPCODE_TXF
||
2629 opcode
== TGSI_OPCODE_TXQ
||
2630 opcode
== TGSI_OPCODE_TEX2
||
2631 opcode
== TGSI_OPCODE_TXB2
||
2632 opcode
== TGSI_OPCODE_TXL2
||
2633 opcode
== TGSI_OPCODE_SAMPLE
||
2634 opcode
== TGSI_OPCODE_SAMPLE_B
||
2635 opcode
== TGSI_OPCODE_SAMPLE_C
||
2636 opcode
== TGSI_OPCODE_SAMPLE_C_LZ
||
2637 opcode
== TGSI_OPCODE_SAMPLE_D
||
2638 opcode
== TGSI_OPCODE_SAMPLE_I
||
2639 opcode
== TGSI_OPCODE_SAMPLE_I_MS
||
2640 opcode
== TGSI_OPCODE_SAMPLE_L
||
2641 opcode
== TGSI_OPCODE_SVIEWINFO
||
2642 opcode
== TGSI_OPCODE_CAL
||
2643 opcode
== TGSI_OPCODE_IF
||
2644 opcode
== TGSI_OPCODE_UIF
||
2645 opcode
== TGSI_OPCODE_BGNLOOP
||
2646 opcode
== TGSI_OPCODE_SWITCH
)
2656 * Kill fragment if any of the src register values are negative.
2660 struct lp_build_tgsi_soa_context
*bld
,
2661 const struct tgsi_full_instruction
*inst
,
2664 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2665 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
2666 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
2668 unsigned chan_index
;
2670 memset(&terms
, 0, sizeof terms
);
2672 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2675 /* Unswizzle channel */
2676 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
2678 /* Check if the component has not been already tested. */
2679 assert(swizzle
< TGSI_NUM_CHANNELS
);
2680 if( !terms
[swizzle
] )
2681 /* TODO: change the comparison operator instead of setting the sign */
2682 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
2686 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2687 if(terms
[chan_index
]) {
2688 LLVMValueRef chan_mask
;
2691 * If term < 0 then mask = 0 else mask = ~0.
2693 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
2696 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
2702 if (bld
->exec_mask
.has_mask
) {
2703 LLVMValueRef invmask
;
2704 invmask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2705 mask
= LLVMBuildOr(builder
, mask
, invmask
, "");
2708 lp_build_mask_update(bld
->mask
, mask
);
2709 if (!near_end_of_shader(bld
, pc
))
2710 lp_build_mask_check(bld
->mask
);
2715 * Unconditional fragment kill.
2716 * The only predication is the execution mask which will apply if
2717 * we're inside a loop or conditional.
2720 emit_kill(struct lp_build_tgsi_soa_context
*bld
,
2723 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2726 /* For those channels which are "alive", disable fragment shader
2729 if (bld
->exec_mask
.has_mask
) {
2730 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2733 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
2737 lp_build_mask_update(bld
->mask
, mask
);
2739 if (!near_end_of_shader(bld
, pc
))
2740 lp_build_mask_check(bld
->mask
);
2745 * Emit code which will dump the value of all the temporary registers
2749 emit_dump_file(struct lp_build_tgsi_soa_context
*bld
,
2752 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
2753 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2754 LLVMBuilderRef builder
= gallivm
->builder
;
2755 LLVMValueRef reg_ptr
;
2757 int max_index
= info
->file_max
[file
];
2760 * Some register files, particularly constants, can be very large,
2761 * and dumping everything could make this unusably slow.
2763 max_index
= MIN2(max_index
, 32);
2765 for (index
= 0; index
<= max_index
; index
++) {
2770 if (index
< 8 * sizeof(unsigned) &&
2771 (info
->file_mask
[file
] & (1u << index
)) == 0) {
2772 /* This was not declared.*/
2776 if (file
== TGSI_FILE_INPUT
) {
2777 mask
= info
->input_usage_mask
[index
];
2779 mask
= TGSI_WRITEMASK_XYZW
;
2782 for (chan
= 0; chan
< 4; chan
++) {
2783 if ((mask
& (1 << chan
)) == 0) {
2784 /* This channel is not used.*/
2788 if (file
== TGSI_FILE_CONSTANT
) {
2789 struct tgsi_full_src_register reg
;
2790 memset(®
, 0, sizeof reg
);
2791 reg
.Register
.File
= file
;
2792 reg
.Register
.Index
= index
;
2793 reg
.Register
.SwizzleX
= 0;
2794 reg
.Register
.SwizzleY
= 1;
2795 reg
.Register
.SwizzleZ
= 2;
2796 reg
.Register
.SwizzleW
= 3;
2798 res
= bld
->bld_base
.emit_fetch_funcs
[file
](&bld
->bld_base
, ®
, TGSI_TYPE_FLOAT
, chan
);
2802 } else if (file
== TGSI_FILE_INPUT
) {
2803 res
= bld
->inputs
[index
][chan
];
2807 } else if (file
== TGSI_FILE_TEMPORARY
) {
2808 reg_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
2810 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2811 } else if (file
== TGSI_FILE_OUTPUT
) {
2812 reg_ptr
= lp_get_output_ptr(bld
, index
, chan
);
2814 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2820 emit_dump_reg(gallivm
, file
, index
, chan
, res
);
2828 lp_emit_declaration_soa(
2829 struct lp_build_tgsi_context
*bld_base
,
2830 const struct tgsi_full_declaration
*decl
)
2832 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2833 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2834 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
2835 const unsigned first
= decl
->Range
.First
;
2836 const unsigned last
= decl
->Range
.Last
;
2839 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
2841 switch (decl
->Declaration
.File
) {
2842 case TGSI_FILE_TEMPORARY
:
2843 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
2844 assert(last
< LP_MAX_INLINED_TEMPS
);
2845 for (idx
= first
; idx
<= last
; ++idx
) {
2846 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2847 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
2852 case TGSI_FILE_OUTPUT
:
2853 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
2854 for (idx
= first
; idx
<= last
; ++idx
) {
2855 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2856 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
2857 vec_type
, "output");
2862 case TGSI_FILE_ADDRESS
:
2863 /* ADDR registers are only allocated with an integer LLVM IR type,
2864 * as they are guaranteed to always have integers.
2865 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2866 * an ADDR register for that matter).
2868 assert(last
< LP_MAX_TGSI_ADDRS
);
2869 for (idx
= first
; idx
<= last
; ++idx
) {
2870 assert(idx
< LP_MAX_TGSI_ADDRS
);
2871 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2872 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
2876 case TGSI_FILE_SAMPLER_VIEW
:
2878 * The target stored here MUST match whatever there actually
2879 * is in the set sampler views (what about return type?).
2881 assert(last
< PIPE_MAX_SHADER_SAMPLER_VIEWS
);
2882 for (idx
= first
; idx
<= last
; ++idx
) {
2883 bld
->sv
[idx
] = decl
->SamplerView
;
2887 case TGSI_FILE_CONSTANT
:
2890 * We could trivially fetch the per-buffer pointer when fetching the
2891 * constant, relying on llvm to figure out it's always the same pointer
2892 * anyway. However, doing so results in a huge (more than factor of 10)
2893 * slowdown in llvm compilation times for some (but not all) shaders
2894 * (more specifically, the IR optimization spends way more time in
2895 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
2897 unsigned idx2D
= decl
->Dim
.Index2D
;
2898 LLVMValueRef index2D
= lp_build_const_int32(gallivm
, idx2D
);
2899 assert(idx2D
< LP_MAX_TGSI_CONST_BUFFERS
);
2900 bld
->consts
[idx2D
] =
2901 lp_build_array_get(gallivm
, bld
->consts_ptr
, index2D
);
2902 bld
->consts_sizes
[idx2D
] =
2903 lp_build_array_get(gallivm
, bld
->const_sizes_ptr
, index2D
);
2908 /* don't need to declare other vars */
2914 void lp_emit_immediate_soa(
2915 struct lp_build_tgsi_context
*bld_base
,
2916 const struct tgsi_full_immediate
*imm
)
2918 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2919 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
2920 LLVMValueRef imms
[4];
2922 const uint size
= imm
->Immediate
.NrTokens
- 1;
2924 switch (imm
->Immediate
.DataType
) {
2925 case TGSI_IMM_FLOAT32
:
2926 for( i
= 0; i
< size
; ++i
)
2928 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
2931 case TGSI_IMM_FLOAT64
:
2932 case TGSI_IMM_UINT64
:
2933 case TGSI_IMM_INT64
:
2934 case TGSI_IMM_UINT32
:
2935 for( i
= 0; i
< size
; ++i
) {
2936 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
2937 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2941 case TGSI_IMM_INT32
:
2942 for( i
= 0; i
< size
; ++i
) {
2943 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
2944 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2949 for( i
= size
; i
< 4; ++i
)
2950 imms
[i
] = bld_base
->base
.undef
;
2952 if (bld
->use_immediates_array
) {
2953 unsigned index
= bld
->num_immediates
;
2954 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2955 LLVMBuilderRef builder
= gallivm
->builder
;
2957 assert(bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
));
2958 for (i
= 0; i
< 4; ++i
) {
2959 LLVMValueRef lindex
= lp_build_const_int32(
2960 bld
->bld_base
.base
.gallivm
, index
* 4 + i
);
2961 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
2962 bld
->imms_array
, &lindex
, 1, "");
2963 LLVMBuildStore(builder
, imms
[i
], imm_ptr
);
2966 /* simply copy the immediate values into the next immediates[] slot */
2968 assert(imm
->Immediate
.NrTokens
- 1 <= 4);
2969 assert(bld
->num_immediates
< LP_MAX_INLINED_IMMEDIATES
);
2971 for(i
= 0; i
< 4; ++i
)
2972 bld
->immediates
[bld
->num_immediates
][i
] = imms
[i
];
2974 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
2975 unsigned index
= bld
->num_immediates
;
2976 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2977 LLVMBuilderRef builder
= gallivm
->builder
;
2978 for (i
= 0; i
< 4; ++i
) {
2979 LLVMValueRef lindex
= lp_build_const_int32(
2980 bld
->bld_base
.base
.gallivm
, index
* 4 + i
);
2981 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
2982 bld
->imms_array
, &lindex
, 1, "");
2983 LLVMBuildStore(builder
,
2984 bld
->immediates
[index
][i
],
2990 bld
->num_immediates
++;
2995 const struct lp_build_tgsi_action
* action
,
2996 struct lp_build_tgsi_context
* bld_base
,
2997 struct lp_build_emit_data
* emit_data
)
2999 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3001 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
3002 &emit_data
->output
[emit_data
->chan
], NULL
);
3007 const struct lp_build_tgsi_action
* action
,
3008 struct lp_build_tgsi_context
* bld_base
,
3009 struct lp_build_emit_data
* emit_data
)
3011 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3013 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
3014 &emit_data
->output
[emit_data
->chan
]);
3019 const struct lp_build_tgsi_action
* action
,
3020 struct lp_build_tgsi_context
* bld_base
,
3021 struct lp_build_emit_data
* emit_data
)
3023 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3025 emit_kill(bld
, bld_base
->pc
- 1);
3030 const struct lp_build_tgsi_action
* action
,
3031 struct lp_build_tgsi_context
* bld_base
,
3032 struct lp_build_emit_data
* emit_data
)
3034 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3036 emit_kill_if(bld
, emit_data
->inst
, bld_base
->pc
- 1);
3041 const struct lp_build_tgsi_action
* action
,
3042 struct lp_build_tgsi_context
* bld_base
,
3043 struct lp_build_emit_data
* emit_data
)
3045 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3047 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3048 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3053 const struct lp_build_tgsi_action
* action
,
3054 struct lp_build_tgsi_context
* bld_base
,
3055 struct lp_build_emit_data
* emit_data
)
3057 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3059 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3060 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3065 const struct lp_build_tgsi_action
* action
,
3066 struct lp_build_tgsi_context
* bld_base
,
3067 struct lp_build_emit_data
* emit_data
)
3069 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3071 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3072 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3077 const struct lp_build_tgsi_action
* action
,
3078 struct lp_build_tgsi_context
* bld_base
,
3079 struct lp_build_emit_data
* emit_data
)
3081 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3083 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3084 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3089 const struct lp_build_tgsi_action
* action
,
3090 struct lp_build_tgsi_context
* bld_base
,
3091 struct lp_build_emit_data
* emit_data
)
3093 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3095 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3096 emit_data
->output
, 3, LP_SAMPLER_OP_TEXTURE
);
3101 const struct lp_build_tgsi_action
* action
,
3102 struct lp_build_tgsi_context
* bld_base
,
3103 struct lp_build_emit_data
* emit_data
)
3105 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3107 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3108 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3113 const struct lp_build_tgsi_action
* action
,
3114 struct lp_build_tgsi_context
* bld_base
,
3115 struct lp_build_emit_data
* emit_data
)
3117 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3119 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3120 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3125 const struct lp_build_tgsi_action
* action
,
3126 struct lp_build_tgsi_context
* bld_base
,
3127 struct lp_build_emit_data
* emit_data
)
3129 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3131 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
3132 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3137 const struct lp_build_tgsi_action
* action
,
3138 struct lp_build_tgsi_context
* bld_base
,
3139 struct lp_build_emit_data
* emit_data
)
3141 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3143 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3144 emit_data
->output
, 2, LP_SAMPLER_OP_GATHER
);
3149 const struct lp_build_tgsi_action
* action
,
3150 struct lp_build_tgsi_context
* bld_base
,
3151 struct lp_build_emit_data
* emit_data
)
3153 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3155 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3160 const struct lp_build_tgsi_action
* action
,
3161 struct lp_build_tgsi_context
* bld_base
,
3162 struct lp_build_emit_data
* emit_data
)
3164 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3166 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3171 const struct lp_build_tgsi_action
* action
,
3172 struct lp_build_tgsi_context
* bld_base
,
3173 struct lp_build_emit_data
* emit_data
)
3175 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3177 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3182 const struct lp_build_tgsi_action
* action
,
3183 struct lp_build_tgsi_context
* bld_base
,
3184 struct lp_build_emit_data
* emit_data
)
3186 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3188 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3189 FALSE
, emit_data
->output
);
3194 const struct lp_build_tgsi_action
* action
,
3195 struct lp_build_tgsi_context
* bld_base
,
3196 struct lp_build_emit_data
* emit_data
)
3198 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3200 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3201 FALSE
, emit_data
->output
);
3206 const struct lp_build_tgsi_action
* action
,
3207 struct lp_build_tgsi_context
* bld_base
,
3208 struct lp_build_emit_data
* emit_data
)
3210 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3212 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3213 TRUE
, emit_data
->output
);
3218 const struct lp_build_tgsi_action
* action
,
3219 struct lp_build_tgsi_context
* bld_base
,
3220 struct lp_build_emit_data
* emit_data
)
3222 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3224 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_ZERO
,
3225 TRUE
, emit_data
->output
);
3230 const struct lp_build_tgsi_action
* action
,
3231 struct lp_build_tgsi_context
* bld_base
,
3232 struct lp_build_emit_data
* emit_data
)
3234 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3236 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3237 FALSE
, emit_data
->output
);
3242 const struct lp_build_tgsi_action
* action
,
3243 struct lp_build_tgsi_context
* bld_base
,
3244 struct lp_build_emit_data
* emit_data
)
3246 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3248 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3249 FALSE
, emit_data
->output
);
3254 const struct lp_build_tgsi_action
* action
,
3255 struct lp_build_tgsi_context
* bld_base
,
3256 struct lp_build_emit_data
* emit_data
)
3258 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3260 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3264 mask_vec(struct lp_build_tgsi_context
*bld_base
)
3266 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3267 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3268 struct lp_exec_mask
*exec_mask
= &bld
->exec_mask
;
3270 if (!exec_mask
->has_mask
) {
3271 return lp_build_mask_value(bld
->mask
);
3273 return LLVMBuildAnd(builder
, lp_build_mask_value(bld
->mask
),
3274 exec_mask
->exec_mask
, "");
3278 increment_vec_ptr_by_mask(struct lp_build_tgsi_context
* bld_base
,
3282 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3283 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3285 current_vec
= LLVMBuildSub(builder
, current_vec
, mask
, "");
3287 LLVMBuildStore(builder
, current_vec
, ptr
);
3291 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context
* bld_base
,
3295 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3296 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3298 current_vec
= lp_build_select(&bld_base
->uint_bld
,
3300 bld_base
->uint_bld
.zero
,
3303 LLVMBuildStore(builder
, current_vec
, ptr
);
3307 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context
* bld
,
3308 LLVMValueRef current_mask_vec
,
3309 LLVMValueRef total_emitted_vertices_vec
)
3311 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3312 struct lp_build_context
*int_bld
= &bld
->bld_base
.int_bld
;
3313 LLVMValueRef max_mask
= lp_build_cmp(int_bld
, PIPE_FUNC_LESS
,
3314 total_emitted_vertices_vec
,
3315 bld
->max_output_vertices_vec
);
3317 return LLVMBuildAnd(builder
, current_mask_vec
, max_mask
, "");
3322 const struct lp_build_tgsi_action
* action
,
3323 struct lp_build_tgsi_context
* bld_base
,
3324 struct lp_build_emit_data
* emit_data
)
3326 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3327 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3329 if (bld
->gs_iface
->emit_vertex
) {
3330 LLVMValueRef mask
= mask_vec(bld_base
);
3331 LLVMValueRef total_emitted_vertices_vec
=
3332 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3333 mask
= clamp_mask_to_max_output_vertices(bld
, mask
,
3334 total_emitted_vertices_vec
);
3335 gather_outputs(bld
);
3336 bld
->gs_iface
->emit_vertex(bld
->gs_iface
, &bld
->bld_base
,
3338 total_emitted_vertices_vec
);
3339 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3341 increment_vec_ptr_by_mask(bld_base
, bld
->total_emitted_vertices_vec_ptr
,
3344 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3345 " +++ emit vertex masked ones = ",
3347 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3348 " +++ emit vertex emitted = ",
3349 total_emitted_vertices_vec
);
3356 end_primitive_masked(struct lp_build_tgsi_context
* bld_base
,
3359 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3360 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3362 if (bld
->gs_iface
->end_primitive
) {
3363 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3364 LLVMValueRef emitted_vertices_vec
=
3365 LLVMBuildLoad(builder
, bld
->emitted_vertices_vec_ptr
, "");
3366 LLVMValueRef emitted_prims_vec
=
3367 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3369 LLVMValueRef emitted_mask
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3370 emitted_vertices_vec
,
3372 /* We need to combine the current execution mask with the mask
3373 telling us which, if any, execution slots actually have
3374 unemitted primitives, this way we make sure that end_primitives
3375 executes only on the paths that have unflushed vertices */
3376 mask
= LLVMBuildAnd(builder
, mask
, emitted_mask
, "");
3378 bld
->gs_iface
->end_primitive(bld
->gs_iface
, &bld
->bld_base
,
3379 emitted_vertices_vec
,
3383 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3384 " +++ end prim masked ones = ",
3386 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3387 " +++ end prim emitted verts1 = ",
3388 emitted_vertices_vec
);
3389 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3390 " +++ end prim emitted prims1 = ",
3391 LLVMBuildLoad(builder
,
3392 bld
->emitted_prims_vec_ptr
, ""));
3394 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_prims_vec_ptr
,
3396 clear_uint_vec_ptr_from_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3399 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3400 " +++ end prim emitted verts2 = ",
3401 LLVMBuildLoad(builder
,
3402 bld
->emitted_vertices_vec_ptr
, ""));
3410 const struct lp_build_tgsi_action
* action
,
3411 struct lp_build_tgsi_context
* bld_base
,
3412 struct lp_build_emit_data
* emit_data
)
3414 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3416 if (bld
->gs_iface
->end_primitive
) {
3417 LLVMValueRef mask
= mask_vec(bld_base
);
3418 end_primitive_masked(bld_base
, mask
);
3424 const struct lp_build_tgsi_action
* action
,
3425 struct lp_build_tgsi_context
* bld_base
,
3426 struct lp_build_emit_data
* emit_data
)
3428 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3430 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
3436 const struct lp_build_tgsi_action
* action
,
3437 struct lp_build_tgsi_context
* bld_base
,
3438 struct lp_build_emit_data
* emit_data
)
3440 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3442 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
3447 const struct lp_build_tgsi_action
* action
,
3448 struct lp_build_tgsi_context
* bld_base
,
3449 struct lp_build_emit_data
* emit_data
)
3451 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3453 lp_exec_break(&bld
->exec_mask
, bld_base
);
3458 const struct lp_build_tgsi_action
* action
,
3459 struct lp_build_tgsi_context
* bld_base
,
3460 struct lp_build_emit_data
* emit_data
)
3463 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3465 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
3466 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
3467 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3472 const struct lp_build_tgsi_action
* action
,
3473 struct lp_build_tgsi_context
* bld_base
,
3474 struct lp_build_emit_data
* emit_data
)
3477 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3478 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3480 tmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3481 emit_data
->args
[0], uint_bld
->zero
);
3482 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3487 const struct lp_build_tgsi_action
* action
,
3488 struct lp_build_tgsi_context
* bld_base
,
3489 struct lp_build_emit_data
* emit_data
)
3491 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3493 lp_exec_case(&bld
->exec_mask
, emit_data
->args
[0]);
3498 const struct lp_build_tgsi_action
* action
,
3499 struct lp_build_tgsi_context
* bld_base
,
3500 struct lp_build_emit_data
* emit_data
)
3502 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3504 lp_exec_default(&bld
->exec_mask
, bld_base
);
3509 const struct lp_build_tgsi_action
* action
,
3510 struct lp_build_tgsi_context
* bld_base
,
3511 struct lp_build_emit_data
* emit_data
)
3513 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3515 lp_exec_switch(&bld
->exec_mask
, emit_data
->args
[0]);
3520 const struct lp_build_tgsi_action
* action
,
3521 struct lp_build_tgsi_context
* bld_base
,
3522 struct lp_build_emit_data
* emit_data
)
3524 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3526 lp_exec_endswitch(&bld
->exec_mask
, bld_base
);
3531 const struct lp_build_tgsi_action
* action
,
3532 struct lp_build_tgsi_context
* bld_base
,
3533 struct lp_build_emit_data
* emit_data
)
3535 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3537 lp_exec_bgnloop(&bld
->exec_mask
);
3542 const struct lp_build_tgsi_action
* action
,
3543 struct lp_build_tgsi_context
* bld_base
,
3544 struct lp_build_emit_data
* emit_data
)
3546 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3548 lp_exec_mask_bgnsub(&bld
->exec_mask
);
3553 const struct lp_build_tgsi_action
* action
,
3554 struct lp_build_tgsi_context
* bld_base
,
3555 struct lp_build_emit_data
* emit_data
)
3557 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3559 lp_exec_mask_cond_invert(&bld
->exec_mask
);
3564 const struct lp_build_tgsi_action
* action
,
3565 struct lp_build_tgsi_context
* bld_base
,
3566 struct lp_build_emit_data
* emit_data
)
3568 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3570 lp_exec_mask_cond_pop(&bld
->exec_mask
);
3575 const struct lp_build_tgsi_action
* action
,
3576 struct lp_build_tgsi_context
* bld_base
,
3577 struct lp_build_emit_data
* emit_data
)
3579 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3581 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
3586 const struct lp_build_tgsi_action
* action
,
3587 struct lp_build_tgsi_context
* bld_base
,
3588 struct lp_build_emit_data
* emit_data
)
3590 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3592 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
3597 const struct lp_build_tgsi_action
* action
,
3598 struct lp_build_tgsi_context
* bld_base
,
3599 struct lp_build_emit_data
* emit_data
)
3601 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3603 lp_exec_continue(&bld
->exec_mask
);
3606 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
3608 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3609 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3611 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
3612 LLVMValueRef array_size
=
3613 lp_build_const_int32(gallivm
,
3614 bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4);
3615 bld
->temps_array
= lp_build_array_alloca(gallivm
,
3616 bld_base
->base
.vec_type
, array_size
,
3620 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
3621 LLVMValueRef array_size
=
3622 lp_build_const_int32(gallivm
,
3623 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
3624 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
3625 bld_base
->base
.vec_type
, array_size
,
3629 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3630 LLVMValueRef array_size
=
3631 lp_build_const_int32(gallivm
,
3632 bld_base
->info
->file_max
[TGSI_FILE_IMMEDIATE
] * 4 + 4);
3633 bld
->imms_array
= lp_build_array_alloca(gallivm
,
3634 bld_base
->base
.vec_type
, array_size
,
3638 /* If we have indirect addressing in inputs we need to copy them into
3639 * our alloca array to be able to iterate over them */
3640 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
) && !bld
->gs_iface
) {
3641 unsigned index
, chan
;
3642 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
3643 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
3644 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
3645 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
3646 vec_type
, array_size
,
3649 assert(bld_base
->info
->num_inputs
3650 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
3652 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
3653 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
3654 LLVMValueRef lindex
=
3655 lp_build_const_int32(gallivm
, index
* 4 + chan
);
3656 LLVMValueRef input_ptr
=
3657 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
3659 LLVMValueRef value
= bld
->inputs
[index
][chan
];
3661 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
3666 if (bld
->gs_iface
) {
3667 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
3668 bld
->emitted_prims_vec_ptr
=
3669 lp_build_alloca(gallivm
,
3671 "emitted_prims_ptr");
3672 bld
->emitted_vertices_vec_ptr
=
3673 lp_build_alloca(gallivm
,
3675 "emitted_vertices_ptr");
3676 bld
->total_emitted_vertices_vec_ptr
=
3677 lp_build_alloca(gallivm
,
3679 "total_emitted_vertices_ptr");
3681 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3682 bld
->emitted_prims_vec_ptr
);
3683 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3684 bld
->emitted_vertices_vec_ptr
);
3685 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3686 bld
->total_emitted_vertices_vec_ptr
);
3689 if (DEBUG_EXECUTION
) {
3690 lp_build_printf(gallivm
, "\n");
3691 emit_dump_file(bld
, TGSI_FILE_CONSTANT
);
3693 emit_dump_file(bld
, TGSI_FILE_INPUT
);
3697 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
3699 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3700 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3702 if (DEBUG_EXECUTION
) {
3705 emit_dump_file(bld
, TGSI_FILE_TEMPORARY
);
3707 emit_dump_file(bld
, TGSI_FILE_OUTPUT
);
3708 lp_build_printf(bld_base
->base
.gallivm
, "\n");
3711 /* If we have indirect addressing in outputs we need to copy our alloca array
3712 * to the outputs slots specified by the caller */
3713 if (bld
->gs_iface
) {
3714 LLVMValueRef total_emitted_vertices_vec
;
3715 LLVMValueRef emitted_prims_vec
;
3716 /* implicit end_primitives, needed in case there are any unflushed
3717 vertices in the cache. Note must not call end_primitive here
3718 since the exec_mask is not valid at this point. */
3719 end_primitive_masked(bld_base
, lp_build_mask_value(bld
->mask
));
3721 total_emitted_vertices_vec
=
3722 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3724 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3726 bld
->gs_iface
->gs_epilogue(bld
->gs_iface
,
3728 total_emitted_vertices_vec
,
3731 gather_outputs(bld
);
3736 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
3737 const struct tgsi_token
*tokens
,
3738 struct lp_type type
,
3739 struct lp_build_mask_context
*mask
,
3740 LLVMValueRef consts_ptr
,
3741 LLVMValueRef const_sizes_ptr
,
3742 const struct lp_bld_tgsi_system_values
*system_values
,
3743 const LLVMValueRef (*inputs
)[TGSI_NUM_CHANNELS
],
3744 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
],
3745 LLVMValueRef context_ptr
,
3746 LLVMValueRef thread_data_ptr
,
3747 struct lp_build_sampler_soa
*sampler
,
3748 const struct tgsi_shader_info
*info
,
3749 const struct lp_build_tgsi_gs_iface
*gs_iface
)
3751 struct lp_build_tgsi_soa_context bld
;
3753 struct lp_type res_type
;
3755 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
3756 memset(&res_type
, 0, sizeof res_type
);
3757 res_type
.width
= type
.width
;
3758 res_type
.length
= type
.length
;
3761 /* Setup build context */
3762 memset(&bld
, 0, sizeof bld
);
3763 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
3764 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
3765 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
3766 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
3768 struct lp_type dbl_type
;
3770 dbl_type
.width
*= 2;
3771 lp_build_context_init(&bld
.bld_base
.dbl_bld
, gallivm
, dbl_type
);
3774 struct lp_type uint64_type
;
3775 uint64_type
= lp_uint_type(type
);
3776 uint64_type
.width
*= 2;
3777 lp_build_context_init(&bld
.bld_base
.uint64_bld
, gallivm
, uint64_type
);
3780 struct lp_type int64_type
;
3781 int64_type
= lp_int_type(type
);
3782 int64_type
.width
*= 2;
3783 lp_build_context_init(&bld
.bld_base
.int64_bld
, gallivm
, int64_type
);
3786 bld
.inputs
= inputs
;
3787 bld
.outputs
= outputs
;
3788 bld
.consts_ptr
= consts_ptr
;
3789 bld
.const_sizes_ptr
= const_sizes_ptr
;
3790 bld
.sampler
= sampler
;
3791 bld
.bld_base
.info
= info
;
3792 bld
.indirect_files
= info
->indirect_files
;
3793 bld
.context_ptr
= context_ptr
;
3794 bld
.thread_data_ptr
= thread_data_ptr
;
3797 * If the number of temporaries is rather large then we just
3798 * allocate them as an array right from the start and treat
3799 * like indirect temporaries.
3801 if (info
->file_max
[TGSI_FILE_TEMPORARY
] >= LP_MAX_INLINED_TEMPS
) {
3802 bld
.indirect_files
|= (1 << TGSI_FILE_TEMPORARY
);
3805 * For performance reason immediates are always backed in a static
3806 * array, but if their number is too great, we have to use just
3807 * a dynamically allocated array.
3809 bld
.use_immediates_array
=
3810 (info
->file_max
[TGSI_FILE_IMMEDIATE
] >= LP_MAX_INLINED_IMMEDIATES
);
3811 if (bld
.use_immediates_array
) {
3812 bld
.indirect_files
|= (1 << TGSI_FILE_IMMEDIATE
);
3816 bld
.bld_base
.soa
= TRUE
;
3817 bld
.bld_base
.emit_debug
= emit_debug
;
3818 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
3819 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
3820 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
3821 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
3822 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
3823 bld
.bld_base
.emit_store
= emit_store
;
3825 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
3826 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
3828 bld
.bld_base
.emit_prologue
= emit_prologue
;
3829 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
3831 /* Set opcode actions */
3832 lp_set_default_actions_cpu(&bld
.bld_base
);
3834 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
3835 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
3836 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
3837 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
3838 bld
.bld_base
.op_actions
[TGSI_OPCODE_CASE
].emit
= case_emit
;
3839 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
3840 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
3841 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
3842 bld
.bld_base
.op_actions
[TGSI_OPCODE_DEFAULT
].emit
= default_emit
;
3843 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
3844 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
3845 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
3846 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
3847 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSWITCH
].emit
= endswitch_emit
;
3848 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
3849 bld
.bld_base
.op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
3850 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL_IF
].emit
= kill_if_emit
;
3851 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL
].emit
= kill_emit
;
3852 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
3853 bld
.bld_base
.op_actions
[TGSI_OPCODE_SWITCH
].emit
= switch_emit
;
3854 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
3855 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
3856 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
3857 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
3858 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
3859 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
3860 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
3861 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX2
].emit
= tex2_emit
;
3862 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB2
].emit
= txb2_emit
;
3863 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL2
].emit
= txl2_emit
;
3864 bld
.bld_base
.op_actions
[TGSI_OPCODE_TG4
].emit
= tg4_emit
;
3865 /* DX10 sampling ops */
3866 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE
].emit
= sample_emit
;
3867 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_B
].emit
= sample_b_emit
;
3868 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C
].emit
= sample_c_emit
;
3869 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C_LZ
].emit
= sample_c_lz_emit
;
3870 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_D
].emit
= sample_d_emit
;
3871 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I
].emit
= sample_i_emit
;
3872 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I_MS
].emit
= sample_i_emit
;
3873 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_L
].emit
= sample_l_emit
;
3874 bld
.bld_base
.op_actions
[TGSI_OPCODE_SVIEWINFO
].emit
= sviewinfo_emit
;
3877 /* There's no specific value for this because it should always
3878 * be set, but apps using ext_geometry_shader4 quite often
3879 * were forgetting so we're using MAX_VERTEX_VARYING from
3880 * that spec even though we could debug_assert if it's not
3881 * set, but that's a lot uglier. */
3882 uint max_output_vertices
;
3884 /* inputs are always indirect with gs */
3885 bld
.indirect_files
|= (1 << TGSI_FILE_INPUT
);
3886 bld
.gs_iface
= gs_iface
;
3887 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_gs_input
;
3888 bld
.bld_base
.op_actions
[TGSI_OPCODE_EMIT
].emit
= emit_vertex
;
3889 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDPRIM
].emit
= end_primitive
;
3891 max_output_vertices
=
3892 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
3893 if (!max_output_vertices
)
3894 max_output_vertices
= 32;
3896 bld
.max_output_vertices_vec
=
3897 lp_build_const_int_vec(gallivm
, bld
.bld_base
.int_bld
.type
,
3898 max_output_vertices
);
3901 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.int_bld
);
3903 bld
.system_values
= *system_values
;
3905 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
3908 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
3909 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
3910 debug_printf("11111111111111111111111111111 \n");
3911 tgsi_dump(tokens
, 0);
3912 lp_debug_dump_value(function
);
3913 debug_printf("2222222222222222222222222222 \n");
3917 LLVMModuleRef module
= LLVMGetGlobalParent(
3918 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
3919 LLVMDumpModule(module
);
3922 lp_exec_mask_fini(&bld
.exec_mask
);