1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "tgsi/tgsi_strings.h"
51 #include "lp_bld_tgsi_action.h"
52 #include "lp_bld_type.h"
53 #include "lp_bld_const.h"
54 #include "lp_bld_arit.h"
55 #include "lp_bld_bitarit.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_init.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_swizzle.h"
60 #include "lp_bld_flow.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_tgsi.h"
63 #include "lp_bld_limits.h"
64 #include "lp_bld_debug.h"
65 #include "lp_bld_printf.h"
66 #include "lp_bld_sample.h"
67 #include "lp_bld_struct.h"
69 /* SM 4.0 says that subroutines can nest 32 deep and
70 * we need one more for our main function */
71 #define LP_MAX_NUM_FUNCS 33
73 #define DUMP_GS_EMITS 0
76 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
80 * - take execution masks in consideration
81 * - debug control-flow instructions
83 #define DEBUG_EXECUTION 0
87 * Emit code to print a register value.
90 emit_dump_reg(struct gallivm_state
*gallivm
,
98 util_snprintf(buf
, sizeof buf
, " %s[%u].%c = ",
100 index
, "xyzw"[chan
]);
102 lp_build_print_value(gallivm
, buf
, value
);
106 * Return the context for the current function.
107 * (always 'main', if shader doesn't do any function calls)
109 static inline struct function_ctx
*
110 func_ctx(struct lp_exec_mask
*mask
)
112 assert(mask
->function_stack_size
> 0);
113 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
114 return &mask
->function_stack
[mask
->function_stack_size
- 1];
118 * Returns true if we're in a loop.
119 * It's global, meaning that it returns true even if there's
120 * no loop inside the current function, but we were inside
121 * a loop inside another function, from which this one was called.
123 static inline boolean
124 mask_has_loop(struct lp_exec_mask
*mask
)
127 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
128 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
129 if (ctx
->loop_stack_size
> 0)
136 * Returns true if we're inside a switch statement.
137 * It's global, meaning that it returns true even if there's
138 * no switch in the current function, but we were inside
139 * a switch inside another function, from which this one was called.
141 static inline boolean
142 mask_has_switch(struct lp_exec_mask
*mask
)
145 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
146 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
147 if (ctx
->switch_stack_size
> 0)
154 * Returns true if we're inside a conditional.
155 * It's global, meaning that it returns true even if there's
156 * no conditional in the current function, but we were inside
157 * a conditional inside another function, from which this one was called.
159 static inline boolean
160 mask_has_cond(struct lp_exec_mask
*mask
)
163 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
164 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
165 if (ctx
->cond_stack_size
> 0)
173 * Initialize a function context at the specified index.
176 lp_exec_mask_function_init(struct lp_exec_mask
*mask
, int function_idx
)
178 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
179 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
180 struct function_ctx
*ctx
= &mask
->function_stack
[function_idx
];
182 ctx
->cond_stack_size
= 0;
183 ctx
->loop_stack_size
= 0;
184 ctx
->switch_stack_size
= 0;
186 if (function_idx
== 0) {
187 ctx
->ret_mask
= mask
->ret_mask
;
190 ctx
->loop_limiter
= lp_build_alloca(mask
->bld
->gallivm
,
191 int_type
, "looplimiter");
194 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
198 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
201 mask
->has_mask
= FALSE
;
202 mask
->ret_in_main
= FALSE
;
203 /* For the main function */
204 mask
->function_stack_size
= 1;
206 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
207 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
=
208 mask
->cond_mask
= mask
->switch_mask
=
209 LLVMConstAllOnes(mask
->int_vec_type
);
211 mask
->function_stack
= CALLOC(LP_MAX_NUM_FUNCS
,
212 sizeof(mask
->function_stack
[0]));
213 lp_exec_mask_function_init(mask
, 0);
217 lp_exec_mask_fini(struct lp_exec_mask
*mask
)
219 FREE(mask
->function_stack
);
222 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
224 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
225 boolean has_loop_mask
= mask_has_loop(mask
);
226 boolean has_cond_mask
= mask_has_cond(mask
);
227 boolean has_switch_mask
= mask_has_switch(mask
);
228 boolean has_ret_mask
= mask
->function_stack_size
> 1 ||
232 /*for loops we need to update the entire mask at runtime */
234 assert(mask
->break_mask
);
235 tmp
= LLVMBuildAnd(builder
,
239 mask
->exec_mask
= LLVMBuildAnd(builder
,
244 mask
->exec_mask
= mask
->cond_mask
;
246 if (has_switch_mask
) {
247 mask
->exec_mask
= LLVMBuildAnd(builder
,
254 mask
->exec_mask
= LLVMBuildAnd(builder
,
260 mask
->has_mask
= (has_cond_mask
||
266 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
269 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
270 struct function_ctx
*ctx
= func_ctx(mask
);
272 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
) {
273 ctx
->cond_stack_size
++;
276 if (ctx
->cond_stack_size
== 0 && mask
->function_stack_size
== 1) {
277 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
279 ctx
->cond_stack
[ctx
->cond_stack_size
++] = mask
->cond_mask
;
280 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
281 mask
->cond_mask
= LLVMBuildAnd(builder
,
285 lp_exec_mask_update(mask
);
288 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
290 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
291 struct function_ctx
*ctx
= func_ctx(mask
);
292 LLVMValueRef prev_mask
;
293 LLVMValueRef inv_mask
;
295 assert(ctx
->cond_stack_size
);
296 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
298 prev_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
- 1];
299 if (ctx
->cond_stack_size
== 1 && mask
->function_stack_size
== 1) {
300 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
303 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
305 mask
->cond_mask
= LLVMBuildAnd(builder
,
308 lp_exec_mask_update(mask
);
311 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
313 struct function_ctx
*ctx
= func_ctx(mask
);
314 assert(ctx
->cond_stack_size
);
315 --ctx
->cond_stack_size
;
316 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
318 mask
->cond_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
];
319 lp_exec_mask_update(mask
);
322 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
324 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
325 struct function_ctx
*ctx
= func_ctx(mask
);
327 if (ctx
->loop_stack_size
>= LP_MAX_TGSI_NESTING
) {
328 ++ctx
->loop_stack_size
;
332 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
334 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_LOOP
;
336 ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
= ctx
->loop_block
;
337 ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
338 ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
= mask
->break_mask
;
339 ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
= ctx
->break_var
;
340 ++ctx
->loop_stack_size
;
342 ctx
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
343 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
345 ctx
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
347 LLVMBuildBr(builder
, ctx
->loop_block
);
348 LLVMPositionBuilderAtEnd(builder
, ctx
->loop_block
);
350 mask
->break_mask
= LLVMBuildLoad(builder
, ctx
->break_var
, "");
352 lp_exec_mask_update(mask
);
355 static void lp_exec_break(struct lp_exec_mask
*mask
,
356 struct lp_build_tgsi_context
* bld_base
)
358 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
359 struct function_ctx
*ctx
= func_ctx(mask
);
361 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
362 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
366 mask
->break_mask
= LLVMBuildAnd(builder
,
368 exec_mask
, "break_full");
371 enum tgsi_opcode opcode
=
372 bld_base
->instructions
[bld_base
->pc
+ 1].Instruction
.Opcode
;
373 boolean break_always
= (opcode
== TGSI_OPCODE_ENDSWITCH
||
374 opcode
== TGSI_OPCODE_CASE
);
377 if (ctx
->switch_in_default
) {
379 * stop default execution but only if this is an unconditional switch.
380 * (The condition here is not perfect since dead code after break is
381 * allowed but should be sufficient since false negatives are just
382 * unoptimized - so we don't have to pre-evaluate that).
384 if(break_always
&& ctx
->switch_pc
) {
385 bld_base
->pc
= ctx
->switch_pc
;
391 mask
->switch_mask
= LLVMConstNull(mask
->bld
->int_vec_type
);
394 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
397 mask
->switch_mask
= LLVMBuildAnd(builder
,
399 exec_mask
, "break_switch");
403 lp_exec_mask_update(mask
);
406 static void lp_exec_continue(struct lp_exec_mask
*mask
)
408 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
409 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
413 mask
->cont_mask
= LLVMBuildAnd(builder
,
417 lp_exec_mask_update(mask
);
421 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
422 struct lp_exec_mask
*mask
)
424 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
425 struct function_ctx
*ctx
= func_ctx(mask
);
426 LLVMBasicBlockRef endloop
;
427 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
428 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
429 mask
->bld
->type
.width
*
430 mask
->bld
->type
.length
);
431 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
433 assert(mask
->break_mask
);
436 assert(ctx
->loop_stack_size
);
437 if (ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
438 --ctx
->loop_stack_size
;
443 * Restore the cont_mask, but don't pop
445 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
- 1].cont_mask
;
446 lp_exec_mask_update(mask
);
449 * Unlike the continue mask, the break_mask must be preserved across loop
452 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
454 /* Decrement the loop limiter */
455 limiter
= LLVMBuildLoad(builder
, ctx
->loop_limiter
, "");
457 limiter
= LLVMBuildSub(
460 LLVMConstInt(int_type
, 1, false),
463 LLVMBuildStore(builder
, limiter
, ctx
->loop_limiter
);
465 /* i1cond = (mask != 0) */
466 i1cond
= LLVMBuildICmp(
469 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
470 LLVMConstNull(reg_type
), "i1cond");
472 /* i2cond = (looplimiter > 0) */
473 i2cond
= LLVMBuildICmp(
477 LLVMConstNull(int_type
), "i2cond");
479 /* if( i1cond && i2cond ) */
480 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
482 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
484 LLVMBuildCondBr(builder
,
485 icond
, ctx
->loop_block
, endloop
);
487 LLVMPositionBuilderAtEnd(builder
, endloop
);
489 assert(ctx
->loop_stack_size
);
490 --ctx
->loop_stack_size
;
491 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
;
492 mask
->break_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
;
493 ctx
->loop_block
= ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
;
494 ctx
->break_var
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
;
495 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+
496 ctx
->switch_stack_size
];
498 lp_exec_mask_update(mask
);
501 static void lp_exec_switch(struct lp_exec_mask
*mask
,
502 LLVMValueRef switchval
)
504 struct function_ctx
*ctx
= func_ctx(mask
);
506 if (ctx
->switch_stack_size
>= LP_MAX_TGSI_NESTING
||
507 ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
508 ctx
->switch_stack_size
++;
512 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
514 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_SWITCH
;
516 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
= mask
->switch_mask
;
517 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
= ctx
->switch_val
;
518 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
= ctx
->switch_mask_default
;
519 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
= ctx
->switch_in_default
;
520 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
= ctx
->switch_pc
;
521 ctx
->switch_stack_size
++;
523 mask
->switch_mask
= LLVMConstNull(mask
->int_vec_type
);
524 ctx
->switch_val
= switchval
;
525 ctx
->switch_mask_default
= LLVMConstNull(mask
->int_vec_type
);
526 ctx
->switch_in_default
= false;
529 lp_exec_mask_update(mask
);
532 static void lp_exec_endswitch(struct lp_exec_mask
*mask
,
533 struct lp_build_tgsi_context
* bld_base
)
535 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
536 struct function_ctx
*ctx
= func_ctx(mask
);
538 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
539 ctx
->switch_stack_size
--;
543 /* check if there's deferred default if so do it now */
544 if (ctx
->switch_pc
&& !ctx
->switch_in_default
) {
545 LLVMValueRef prevmask
, defaultmask
;
547 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
548 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
549 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
550 ctx
->switch_in_default
= true;
552 lp_exec_mask_update(mask
);
554 assert(bld_base
->instructions
[ctx
->switch_pc
- 1].Instruction
.Opcode
==
555 TGSI_OPCODE_DEFAULT
);
557 tmp_pc
= bld_base
->pc
;
558 bld_base
->pc
= ctx
->switch_pc
;
560 * re-purpose switch_pc to point to here again, since we stop execution of
561 * the deferred default after next break.
563 ctx
->switch_pc
= tmp_pc
- 1;
568 else if (ctx
->switch_pc
&& ctx
->switch_in_default
) {
569 assert(bld_base
->pc
== ctx
->switch_pc
+ 1);
572 ctx
->switch_stack_size
--;
573 mask
->switch_mask
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
;
574 ctx
->switch_val
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
;
575 ctx
->switch_mask_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
;
576 ctx
->switch_in_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
;
577 ctx
->switch_pc
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
;
579 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
];
581 lp_exec_mask_update(mask
);
584 static void lp_exec_case(struct lp_exec_mask
*mask
,
585 LLVMValueRef caseval
)
587 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
588 struct function_ctx
*ctx
= func_ctx(mask
);
590 LLVMValueRef casemask
, prevmask
;
592 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
596 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
597 if (!ctx
->switch_in_default
) {
598 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
599 casemask
= lp_build_cmp(mask
->bld
, PIPE_FUNC_EQUAL
, caseval
, ctx
->switch_val
);
600 ctx
->switch_mask_default
= LLVMBuildOr(builder
, casemask
,
601 ctx
->switch_mask_default
, "sw_default_mask");
602 casemask
= LLVMBuildOr(builder
, casemask
, mask
->switch_mask
, "");
603 mask
->switch_mask
= LLVMBuildAnd(builder
, casemask
, prevmask
, "sw_mask");
605 lp_exec_mask_update(mask
);
610 * Analyse default statement in a switch.
611 * \return true if default is last statement, false otherwise
612 * \param default_pc_start contains pc of instruction to jump to
613 * if default wasn't last but there's no
614 * fallthrough into default.
616 static boolean
default_analyse_is_last(struct lp_exec_mask
*mask
,
617 struct lp_build_tgsi_context
* bld_base
,
618 int *default_pc_start
)
620 unsigned pc
= bld_base
->pc
;
621 struct function_ctx
*ctx
= func_ctx(mask
);
622 int curr_switch_stack
= ctx
->switch_stack_size
;
624 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
628 /* skip over case statements which are together with default */
629 while (bld_base
->instructions
[pc
].Instruction
.Opcode
== TGSI_OPCODE_CASE
) {
633 while (pc
!= ~0u && pc
< bld_base
->num_instructions
) {
634 enum tgsi_opcode opcode
= bld_base
->instructions
[pc
].Instruction
.Opcode
;
636 case TGSI_OPCODE_CASE
:
637 if (curr_switch_stack
== ctx
->switch_stack_size
) {
638 *default_pc_start
= pc
- 1;
642 case TGSI_OPCODE_SWITCH
:
645 case TGSI_OPCODE_ENDSWITCH
:
646 if (curr_switch_stack
== ctx
->switch_stack_size
) {
647 *default_pc_start
= pc
- 1;
657 /* should never arrive here */
662 static void lp_exec_default(struct lp_exec_mask
*mask
,
663 struct lp_build_tgsi_context
* bld_base
)
665 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
666 struct function_ctx
*ctx
= func_ctx(mask
);
669 boolean default_is_last
;
671 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
676 * This is a messy opcode, because it may not be always at the end and
677 * there can be fallthrough in and out of it.
680 default_is_last
= default_analyse_is_last(mask
, bld_base
, &default_exec_pc
);
682 * If it is last statement in switch (note that case statements appearing
683 * "at the same time" as default don't change that) everything is just fine,
684 * update switch mask and go on. This means we can handle default with
685 * fallthrough INTO it without overhead, if it is last.
687 if (default_is_last
) {
688 LLVMValueRef prevmask
, defaultmask
;
689 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
690 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
691 defaultmask
= LLVMBuildOr(builder
, defaultmask
, mask
->switch_mask
, "");
692 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
693 ctx
->switch_in_default
= true;
695 lp_exec_mask_update(mask
);
699 * Technically, "case" immediately before default isn't really a
700 * fallthrough, however we still have to count them as such as we
701 * already have updated the masks.
702 * If that happens in practice could add a switch optimizer pass
703 * which just gets rid of all case statements appearing together with
704 * default (or could do switch analysis at switch start time instead).
706 enum tgsi_opcode opcode
=
707 bld_base
->instructions
[bld_base
->pc
- 1].Instruction
.Opcode
;
708 boolean ft_into
= (opcode
!= TGSI_OPCODE_BRK
&&
709 opcode
!= TGSI_OPCODE_SWITCH
);
711 * If it is not last statement and there was no fallthrough into it,
712 * we record the PC and continue execution at next case (again, those
713 * case encountered at the same time don't count). At endswitch
714 * time, we update switchmask, and go back executing the code we skipped
715 * until the next break (possibly re-executing some code with changed mask
716 * if there was a fallthrough out of default).
717 * Finally, if it is not last statement and there was a fallthrough into it,
718 * do the same as with the former case, except instead of skipping the code
719 * just execute it without updating the mask, then go back and re-execute.
721 ctx
->switch_pc
= bld_base
->pc
;
723 bld_base
->pc
= default_exec_pc
;
729 /* stores val into an address pointed to by dst_ptr.
730 * mask->exec_mask is used to figure out which bits of val
731 * should be stored into the address
732 * (0 means don't store this bit, 1 means do store).
734 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
735 struct lp_build_context
*bld_store
,
737 LLVMValueRef dst_ptr
)
739 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
740 LLVMValueRef exec_mask
= mask
->has_mask
? mask
->exec_mask
: NULL
;
742 assert(lp_check_value(bld_store
->type
, val
));
743 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr
)) == LLVMPointerTypeKind
);
744 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr
)) == LLVMTypeOf(val
) ||
745 LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(dst_ptr
))) == LLVMArrayTypeKind
);
748 LLVMValueRef res
, dst
;
750 dst
= LLVMBuildLoad(builder
, dst_ptr
, "");
751 res
= lp_build_select(bld_store
, exec_mask
, val
, dst
);
752 LLVMBuildStore(builder
, res
, dst_ptr
);
754 LLVMBuildStore(builder
, val
, dst_ptr
);
757 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
761 if (mask
->function_stack_size
>= LP_MAX_NUM_FUNCS
) {
765 lp_exec_mask_function_init(mask
, mask
->function_stack_size
);
766 mask
->function_stack
[mask
->function_stack_size
].pc
= *pc
;
767 mask
->function_stack
[mask
->function_stack_size
].ret_mask
= mask
->ret_mask
;
768 mask
->function_stack_size
++;
772 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
774 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
775 struct function_ctx
*ctx
= func_ctx(mask
);
776 LLVMValueRef exec_mask
;
778 if (ctx
->cond_stack_size
== 0 &&
779 ctx
->loop_stack_size
== 0 &&
780 ctx
->switch_stack_size
== 0 &&
781 mask
->function_stack_size
== 1) {
782 /* returning from main() */
787 if (mask
->function_stack_size
== 1) {
789 * This requires special handling since we need to ensure
790 * we don't drop the mask even if we have no call stack
791 * (e.g. after a ret in a if clause after the endif)
793 mask
->ret_in_main
= TRUE
;
796 exec_mask
= LLVMBuildNot(builder
,
800 mask
->ret_mask
= LLVMBuildAnd(builder
,
802 exec_mask
, "ret_full");
804 lp_exec_mask_update(mask
);
807 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
811 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
813 struct function_ctx
*ctx
;
815 assert(mask
->function_stack_size
> 1);
816 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
818 ctx
= func_ctx(mask
);
819 mask
->function_stack_size
--;
822 mask
->ret_mask
= ctx
->ret_mask
;
824 lp_exec_mask_update(mask
);
829 get_file_ptr(struct lp_build_tgsi_soa_context
*bld
,
834 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
835 LLVMValueRef (*array_of_vars
)[TGSI_NUM_CHANNELS
];
836 LLVMValueRef var_of_array
;
839 case TGSI_FILE_TEMPORARY
:
840 array_of_vars
= bld
->temps
;
841 var_of_array
= bld
->temps_array
;
843 case TGSI_FILE_OUTPUT
:
844 array_of_vars
= bld
->outputs
;
845 var_of_array
= bld
->outputs_array
;
854 if (bld
->indirect_files
& (1 << file
)) {
855 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
856 if (LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(var_of_array
))) == LLVMArrayTypeKind
) {
858 gep
[0] = lp_build_const_int32(bld
->bld_base
.base
.gallivm
, 0);
860 return LLVMBuildGEP(builder
, var_of_array
, gep
, 2, "");
862 return LLVMBuildGEP(builder
, var_of_array
, &lindex
, 1, "");
866 assert(index
<= bld
->bld_base
.info
->file_max
[file
]);
867 return array_of_vars
[index
][chan
];
873 * Return pointer to a temporary register channel (src or dest).
874 * Note that indirect addressing cannot be handled here.
875 * \param index which temporary register
876 * \param chan which channel of the temp register.
879 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
883 return get_file_ptr(bld
, TGSI_FILE_TEMPORARY
, index
, chan
);
887 * Return pointer to a output register channel (src or dest).
888 * Note that indirect addressing cannot be handled here.
889 * \param index which output register
890 * \param chan which channel of the output register.
893 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
897 return get_file_ptr(bld
, TGSI_FILE_OUTPUT
, index
, chan
);
901 * If we have indirect addressing in outputs copy our alloca array
902 * to the outputs slots specified by the caller to make sure
903 * our outputs are delivered consistently via the same interface.
906 gather_outputs(struct lp_build_tgsi_soa_context
* bld
)
908 if ((bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
909 unsigned index
, chan
;
910 assert(bld
->bld_base
.info
->num_outputs
<=
911 bld
->bld_base
.info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
912 for (index
= 0; index
< bld
->bld_base
.info
->num_outputs
; ++index
) {
913 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
914 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
922 * XXX the lp_build_gather() function should be capable of doing this
923 * with a little work.
926 build_gather(struct lp_build_tgsi_context
*bld_base
,
927 LLVMValueRef base_ptr
,
928 LLVMValueRef indexes
,
929 LLVMValueRef overflow_mask
,
930 LLVMValueRef indexes2
)
932 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
933 LLVMBuilderRef builder
= gallivm
->builder
;
934 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
935 struct lp_build_context
*bld
= &bld_base
->base
;
940 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
944 * overflow_mask is a vector telling us which channels
945 * in the vector overflowed. We use the overflow behavior for
946 * constant buffers which is defined as:
947 * Out of bounds access to constant buffer returns 0 in all
948 * components. Out of bounds behavior is always with respect
949 * to the size of the buffer bound at that slot.
954 * We avoid per-element control flow here (also due to llvm going crazy,
955 * though I suspect it's better anyway since overflow is likely rare).
956 * Note that since we still fetch from buffers even if num_elements was
957 * zero (in this case we'll fetch from index zero) the jit func callers
958 * MUST provide valid fake constant buffers of size 4x32 (the values do
959 * not matter), otherwise we'd still need (not per element though)
962 indexes
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes
);
964 indexes2
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes2
);
968 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
970 for (i
= 0; i
< bld
->type
.length
* (indexes2
? 2 : 1); i
++) {
973 LLVMValueRef scalar_ptr
, scalar
;
975 di
= lp_build_const_int32(bld
->gallivm
, i
);
977 si
= lp_build_const_int32(bld
->gallivm
, i
>> 1);
981 if (indexes2
&& (i
& 1)) {
982 index
= LLVMBuildExtractElement(builder
,
985 index
= LLVMBuildExtractElement(builder
,
988 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
989 &index
, 1, "gather_ptr");
990 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
992 res
= LLVMBuildInsertElement(builder
, res
, scalar
, di
, "");
997 res
= LLVMBuildBitCast(builder
, res
, bld_base
->dbl_bld
.vec_type
, "");
998 overflow_mask
= LLVMBuildSExt(builder
, overflow_mask
,
999 bld_base
->dbl_bld
.int_vec_type
, "");
1000 res
= lp_build_select(&bld_base
->dbl_bld
, overflow_mask
,
1001 bld_base
->dbl_bld
.zero
, res
);
1003 res
= lp_build_select(bld
, overflow_mask
, bld
->zero
, res
);
1011 * Scatter/store vector.
1014 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
1015 LLVMValueRef base_ptr
,
1016 LLVMValueRef indexes
,
1017 LLVMValueRef values
,
1018 struct lp_exec_mask
*mask
)
1020 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1021 LLVMBuilderRef builder
= gallivm
->builder
;
1023 LLVMValueRef pred
= mask
->has_mask
? mask
->exec_mask
: NULL
;
1026 * Loop over elements of index_vec, store scalar value.
1028 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1029 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1030 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
1031 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
1032 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
1033 LLVMValueRef scalar_pred
= pred
?
1034 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
1037 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
1038 ii
, val
, index
, scalar_ptr
);
1041 LLVMValueRef real_val
, dst_val
;
1042 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1043 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
1044 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
1047 LLVMBuildStore(builder
, val
, scalar_ptr
);
1054 * Read the current value of the ADDR register, convert the floats to
1055 * ints, add the base index and return the vector of offsets.
1056 * The offsets will be used to index into the constant buffer or
1057 * temporary register file.
1060 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
1061 unsigned reg_file
, unsigned reg_index
,
1062 const struct tgsi_ind_register
*indirect_reg
)
1064 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1065 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
1066 /* always use X component of address register */
1067 unsigned swizzle
= indirect_reg
->Swizzle
;
1070 LLVMValueRef max_index
;
1073 assert(bld
->indirect_files
& (1 << reg_file
));
1075 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
1077 assert(swizzle
< 4);
1078 switch (indirect_reg
->File
) {
1079 case TGSI_FILE_ADDRESS
:
1080 rel
= LLVMBuildLoad(builder
,
1081 bld
->addr
[indirect_reg
->Index
][swizzle
],
1083 /* ADDR LLVM values already have LLVM integer type. */
1085 case TGSI_FILE_TEMPORARY
:
1086 rel
= lp_get_temp_ptr_soa(bld
, indirect_reg
->Index
, swizzle
);
1087 rel
= LLVMBuildLoad(builder
, rel
, "load temp reg");
1088 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1089 * value actually stored is expected to be an integer */
1090 rel
= LLVMBuildBitCast(builder
, rel
, uint_bld
->vec_type
, "");
1094 rel
= uint_bld
->zero
;
1097 index
= lp_build_add(uint_bld
, base
, rel
);
1100 * emit_fetch_constant handles constant buffer overflow so this code
1101 * is pointless for them.
1102 * Furthermore the D3D10 spec in section 6.5 says:
1103 * If the constant buffer bound to a slot is larger than the size
1104 * declared in the shader for that slot, implementations are allowed
1105 * to return incorrect data (not necessarily 0) for indices that are
1106 * larger than the declared size but smaller than the buffer size.
1108 if (reg_file
!= TGSI_FILE_CONSTANT
) {
1109 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
1111 bld
->bld_base
.info
->file_max
[reg_file
]);
1113 assert(!uint_bld
->type
.sign
);
1114 index
= lp_build_min(uint_bld
, index
, max_index
);
1120 static struct lp_build_context
*
1121 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
1122 enum tgsi_opcode_type stype
)
1124 struct lp_build_context
*bld_fetch
;
1127 case TGSI_TYPE_FLOAT
:
1128 case TGSI_TYPE_UNTYPED
:
1129 bld_fetch
= &bld_base
->base
;
1131 case TGSI_TYPE_UNSIGNED
:
1132 bld_fetch
= &bld_base
->uint_bld
;
1134 case TGSI_TYPE_SIGNED
:
1135 bld_fetch
= &bld_base
->int_bld
;
1137 case TGSI_TYPE_DOUBLE
:
1138 bld_fetch
= &bld_base
->dbl_bld
;
1140 case TGSI_TYPE_UNSIGNED64
:
1141 bld_fetch
= &bld_base
->uint64_bld
;
1143 case TGSI_TYPE_SIGNED64
:
1144 bld_fetch
= &bld_base
->int64_bld
;
1146 case TGSI_TYPE_VOID
:
1156 get_soa_array_offsets(struct lp_build_context
*uint_bld
,
1157 LLVMValueRef indirect_index
,
1158 unsigned chan_index
,
1159 boolean need_perelement_offset
)
1161 struct gallivm_state
*gallivm
= uint_bld
->gallivm
;
1162 LLVMValueRef chan_vec
=
1163 lp_build_const_int_vec(uint_bld
->gallivm
, uint_bld
->type
, chan_index
);
1164 LLVMValueRef length_vec
=
1165 lp_build_const_int_vec(gallivm
, uint_bld
->type
, uint_bld
->type
.length
);
1166 LLVMValueRef index_vec
;
1168 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1169 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1170 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1171 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1173 if (need_perelement_offset
) {
1174 LLVMValueRef pixel_offsets
;
1176 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1177 pixel_offsets
= uint_bld
->undef
;
1178 for (i
= 0; i
< uint_bld
->type
.length
; i
++) {
1179 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1180 pixel_offsets
= LLVMBuildInsertElement(gallivm
->builder
, pixel_offsets
,
1183 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1189 emit_fetch_constant(
1190 struct lp_build_tgsi_context
* bld_base
,
1191 const struct tgsi_full_src_register
* reg
,
1192 enum tgsi_opcode_type stype
,
1193 unsigned swizzle_in
)
1195 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1196 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1197 LLVMBuilderRef builder
= gallivm
->builder
;
1198 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1199 unsigned dimension
= 0;
1200 LLVMValueRef consts_ptr
;
1201 LLVMValueRef num_consts
;
1203 unsigned swizzle
= swizzle_in
& 0xffff;
1205 /* XXX: Handle fetching xyzw components as a vector */
1206 assert(swizzle
!= ~0u);
1208 if (reg
->Register
.Dimension
) {
1209 assert(!reg
->Dimension
.Indirect
);
1210 dimension
= reg
->Dimension
.Index
;
1211 assert(dimension
< LP_MAX_TGSI_CONST_BUFFERS
);
1214 consts_ptr
= bld
->consts
[dimension
];
1215 num_consts
= bld
->consts_sizes
[dimension
];
1217 if (reg
->Register
.Indirect
) {
1218 LLVMValueRef indirect_index
;
1219 LLVMValueRef swizzle_vec
=
1220 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
1221 LLVMValueRef index_vec
; /* index into the const buffer */
1222 LLVMValueRef overflow_mask
;
1223 LLVMValueRef index_vec2
= NULL
;
1225 indirect_index
= get_indirect_index(bld
,
1227 reg
->Register
.Index
,
1230 /* All fetches are from the same constant buffer, so
1231 * we need to propagate the size to a vector to do a
1232 * vector comparison */
1233 num_consts
= lp_build_broadcast_scalar(uint_bld
, num_consts
);
1234 /* Construct a boolean vector telling us which channels
1235 * overflow the bound constant buffer */
1236 overflow_mask
= lp_build_compare(gallivm
, uint_bld
->type
, PIPE_FUNC_GEQUAL
,
1237 indirect_index
, num_consts
);
1239 /* index_vec = indirect_index * 4 + swizzle */
1240 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1241 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
1243 if (tgsi_type_is_64bit(stype
)) {
1244 LLVMValueRef swizzle_vec2
;
1245 swizzle_vec2
= lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle_in
>> 16);
1246 index_vec2
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1247 index_vec2
= lp_build_add(uint_bld
, index_vec2
, swizzle_vec2
);
1249 /* Gather values from the constant buffer */
1250 res
= build_gather(bld_base
, consts_ptr
, index_vec
, overflow_mask
, index_vec2
);
1253 LLVMValueRef index
; /* index into the const buffer */
1254 LLVMValueRef scalar
, scalar_ptr
;
1255 struct lp_build_context
*bld_broad
= &bld_base
->base
;
1256 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1258 scalar_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1261 if (tgsi_type_is_64bit(stype
) && ((swizzle_in
>> 16) != swizzle
+ 1)) {
1263 LLVMValueRef scalar2
, scalar2_ptr
;
1264 LLVMValueRef shuffles
[2];
1265 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1267 scalar2_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1270 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1271 scalar2
= LLVMBuildLoad(builder
, scalar2_ptr
, "");
1272 shuffles
[0] = lp_build_const_int32(gallivm
, 0);
1273 shuffles
[1] = lp_build_const_int32(gallivm
, 1);
1275 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
1276 res
= LLVMBuildInsertElement(builder
, res
, scalar
, shuffles
[0], "");
1277 res
= LLVMBuildInsertElement(builder
, res
, scalar2
, shuffles
[1], "");
1279 if (stype
== TGSI_TYPE_DOUBLE
) {
1280 LLVMTypeRef dptr_type
= LLVMPointerType(LLVMDoubleTypeInContext(gallivm
->context
), 0);
1281 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, dptr_type
, "");
1282 bld_broad
= &bld_base
->dbl_bld
;
1283 } else if (stype
== TGSI_TYPE_UNSIGNED64
) {
1284 LLVMTypeRef u64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1285 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, u64ptr_type
, "");
1286 bld_broad
= &bld_base
->uint64_bld
;
1287 } else if (stype
== TGSI_TYPE_SIGNED64
) {
1288 LLVMTypeRef i64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1289 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, i64ptr_type
, "");
1290 bld_broad
= &bld_base
->int64_bld
;
1292 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1293 res
= lp_build_broadcast_scalar(bld_broad
, scalar
);
1298 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| stype
== TGSI_TYPE_DOUBLE
|| stype
== TGSI_TYPE_SIGNED64
|| stype
== TGSI_TYPE_UNSIGNED64
) {
1299 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1300 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1307 * Fetch 64-bit values from two separate channels.
1308 * 64-bit values are stored split across two channels, like xy and zw.
1309 * This function creates a set of vec_length*2 floats,
1310 * extracts the values from the two channels,
1311 * puts them in the correct place, then casts to vec_length 64-bits.
1315 struct lp_build_tgsi_context
* bld_base
,
1316 enum tgsi_opcode_type stype
,
1318 LLVMValueRef input2
)
1320 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1321 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1322 LLVMBuilderRef builder
= gallivm
->builder
;
1324 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1326 LLVMValueRef shuffles
[2 * (LP_MAX_VECTOR_WIDTH
/32)];
1327 int len
= bld_base
->base
.type
.length
* 2;
1328 assert(len
<= (2 * (LP_MAX_VECTOR_WIDTH
/32)));
1330 for (i
= 0; i
< bld_base
->base
.type
.length
* 2; i
+=2) {
1331 shuffles
[i
] = lp_build_const_int32(gallivm
, i
/ 2);
1332 shuffles
[i
+ 1] = lp_build_const_int32(gallivm
, i
/ 2 + bld_base
->base
.type
.length
);
1334 res
= LLVMBuildShuffleVector(builder
, input
, input2
, LLVMConstVector(shuffles
, len
), "");
1336 return LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1340 emit_fetch_immediate(
1341 struct lp_build_tgsi_context
* bld_base
,
1342 const struct tgsi_full_src_register
* reg
,
1343 enum tgsi_opcode_type stype
,
1344 unsigned swizzle_in
)
1346 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1347 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1348 LLVMBuilderRef builder
= gallivm
->builder
;
1349 LLVMValueRef res
= NULL
;
1350 unsigned swizzle
= swizzle_in
& 0xffff;
1352 if (bld
->use_immediates_array
|| reg
->Register
.Indirect
) {
1353 LLVMValueRef imms_array
;
1354 LLVMTypeRef fptr_type
;
1356 /* cast imms_array pointer to float* */
1357 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1358 imms_array
= LLVMBuildBitCast(builder
, bld
->imms_array
, fptr_type
, "");
1360 if (reg
->Register
.Indirect
) {
1361 LLVMValueRef indirect_index
;
1362 LLVMValueRef index_vec
; /* index into the immediate register array */
1363 LLVMValueRef index_vec2
= NULL
;
1364 indirect_index
= get_indirect_index(bld
,
1366 reg
->Register
.Index
,
1369 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1370 * immediates are stored as full vectors (FIXME??? - might be better
1371 * to store them the same as constants) but all elements are the same
1374 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1378 if (tgsi_type_is_64bit(stype
))
1379 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1383 /* Gather values from the immediate register array */
1384 res
= build_gather(bld_base
, imms_array
, index_vec
, NULL
, index_vec2
);
1386 LLVMValueRef gep
[2];
1387 gep
[0] = lp_build_const_int32(gallivm
, 0);
1388 gep
[1] = lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1389 LLVMValueRef imms_ptr
= LLVMBuildGEP(builder
,
1390 bld
->imms_array
, gep
, 2, "");
1391 res
= LLVMBuildLoad(builder
, imms_ptr
, "");
1393 if (tgsi_type_is_64bit(stype
)) {
1394 LLVMValueRef imms_ptr2
;
1396 gep
[1] = lp_build_const_int32(gallivm
,
1397 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1398 imms_ptr2
= LLVMBuildGEP(builder
,
1399 bld
->imms_array
, gep
, 2, "");
1400 res2
= LLVMBuildLoad(builder
, imms_ptr2
, "");
1401 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1406 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
1407 if (tgsi_type_is_64bit(stype
))
1408 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->immediates
[reg
->Register
.Index
][swizzle_in
>> 16]);
1411 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1412 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1413 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1420 struct lp_build_tgsi_context
* bld_base
,
1421 const struct tgsi_full_src_register
* reg
,
1422 enum tgsi_opcode_type stype
,
1423 unsigned swizzle_in
)
1425 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1426 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1427 LLVMBuilderRef builder
= gallivm
->builder
;
1429 unsigned swizzle
= swizzle_in
& 0xffff;
1431 if (reg
->Register
.Indirect
) {
1432 LLVMValueRef indirect_index
;
1433 LLVMValueRef index_vec
; /* index into the input reg array */
1434 LLVMValueRef index_vec2
= NULL
;
1435 LLVMValueRef inputs_array
;
1436 LLVMTypeRef fptr_type
;
1438 indirect_index
= get_indirect_index(bld
,
1440 reg
->Register
.Index
,
1443 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1447 if (tgsi_type_is_64bit(stype
)) {
1448 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1453 /* cast inputs_array pointer to float* */
1454 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1455 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
, fptr_type
, "");
1457 /* Gather values from the input register array */
1458 res
= build_gather(bld_base
, inputs_array
, index_vec
, NULL
, index_vec2
);
1460 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
1461 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1462 reg
->Register
.Index
* 4 + swizzle
);
1463 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
1464 bld
->inputs_array
, &lindex
, 1, "");
1466 res
= LLVMBuildLoad(builder
, input_ptr
, "");
1467 if (tgsi_type_is_64bit(stype
)) {
1468 LLVMValueRef lindex1
;
1469 LLVMValueRef input_ptr2
;
1472 lindex1
= lp_build_const_int32(gallivm
,
1473 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1474 input_ptr2
= LLVMBuildGEP(builder
,
1475 bld
->inputs_array
, &lindex1
, 1, "");
1476 res2
= LLVMBuildLoad(builder
, input_ptr2
, "");
1477 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1481 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
1482 if (tgsi_type_is_64bit(stype
))
1483 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->inputs
[reg
->Register
.Index
][swizzle_in
>> 16]);
1489 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1490 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1491 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1499 emit_fetch_gs_input(
1500 struct lp_build_tgsi_context
* bld_base
,
1501 const struct tgsi_full_src_register
* reg
,
1502 enum tgsi_opcode_type stype
,
1503 unsigned swizzle_in
)
1505 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1506 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1507 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1508 LLVMBuilderRef builder
= gallivm
->builder
;
1509 LLVMValueRef attrib_index
= NULL
;
1510 LLVMValueRef vertex_index
= NULL
;
1511 unsigned swizzle
= swizzle_in
& 0xffff;
1512 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
);
1515 if (info
->input_semantic_name
[reg
->Register
.Index
] == TGSI_SEMANTIC_PRIMID
) {
1516 /* This is really a system value not a regular input */
1517 assert(!reg
->Register
.Indirect
);
1518 assert(!reg
->Dimension
.Indirect
);
1519 res
= bld
->system_values
.prim_id
;
1520 if (stype
!= TGSI_TYPE_UNSIGNED
&& stype
!= TGSI_TYPE_SIGNED
) {
1521 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1526 if (reg
->Register
.Indirect
) {
1527 attrib_index
= get_indirect_index(bld
,
1529 reg
->Register
.Index
,
1532 attrib_index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
);
1535 if (reg
->Dimension
.Indirect
) {
1536 vertex_index
= get_indirect_index(bld
,
1538 reg
->Dimension
.Index
,
1541 vertex_index
= lp_build_const_int32(gallivm
, reg
->Dimension
.Index
);
1544 res
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1545 reg
->Dimension
.Indirect
,
1547 reg
->Register
.Indirect
,
1552 if (tgsi_type_is_64bit(stype
)) {
1553 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle_in
>> 16);
1555 res2
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1556 reg
->Dimension
.Indirect
,
1558 reg
->Register
.Indirect
,
1562 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1563 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1564 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1565 } else if (stype
== TGSI_TYPE_SIGNED
) {
1566 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1573 emit_fetch_temporary(
1574 struct lp_build_tgsi_context
* bld_base
,
1575 const struct tgsi_full_src_register
* reg
,
1576 enum tgsi_opcode_type stype
,
1577 unsigned swizzle_in
)
1579 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1580 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1581 LLVMBuilderRef builder
= gallivm
->builder
;
1583 unsigned swizzle
= swizzle_in
& 0xffff;
1585 if (reg
->Register
.Indirect
) {
1586 LLVMValueRef indirect_index
;
1587 LLVMValueRef index_vec
, index_vec2
= NULL
; /* index into the temp reg array */
1588 LLVMValueRef temps_array
;
1589 LLVMTypeRef fptr_type
;
1591 indirect_index
= get_indirect_index(bld
,
1593 reg
->Register
.Index
,
1596 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1600 if (tgsi_type_is_64bit(stype
)) {
1601 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1607 /* cast temps_array pointer to float* */
1608 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1609 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1611 /* Gather values from the temporary register array */
1612 res
= build_gather(bld_base
, temps_array
, index_vec
, NULL
, index_vec2
);
1615 LLVMValueRef temp_ptr
;
1616 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
1617 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1619 if (tgsi_type_is_64bit(stype
)) {
1620 LLVMValueRef temp_ptr2
, res2
;
1622 temp_ptr2
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle_in
>> 16);
1623 res2
= LLVMBuildLoad(builder
, temp_ptr2
, "");
1624 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1628 if (stype
== TGSI_TYPE_SIGNED
||
1629 stype
== TGSI_TYPE_UNSIGNED
||
1630 stype
== TGSI_TYPE_DOUBLE
||
1631 stype
== TGSI_TYPE_SIGNED64
||
1632 stype
== TGSI_TYPE_UNSIGNED64
) {
1633 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1634 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1641 emit_fetch_system_value(
1642 struct lp_build_tgsi_context
* bld_base
,
1643 const struct tgsi_full_src_register
* reg
,
1644 enum tgsi_opcode_type stype
,
1645 unsigned swizzle_in
)
1647 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1648 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1649 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1650 LLVMBuilderRef builder
= gallivm
->builder
;
1652 enum tgsi_opcode_type atype
; // Actual type of the value
1654 assert(!reg
->Register
.Indirect
);
1656 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
1657 case TGSI_SEMANTIC_INSTANCEID
:
1658 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
1659 atype
= TGSI_TYPE_UNSIGNED
;
1662 case TGSI_SEMANTIC_VERTEXID
:
1663 res
= bld
->system_values
.vertex_id
;
1664 atype
= TGSI_TYPE_UNSIGNED
;
1667 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
1668 res
= bld
->system_values
.vertex_id_nobase
;
1669 atype
= TGSI_TYPE_UNSIGNED
;
1672 case TGSI_SEMANTIC_BASEVERTEX
:
1673 res
= bld
->system_values
.basevertex
;
1674 atype
= TGSI_TYPE_UNSIGNED
;
1677 case TGSI_SEMANTIC_PRIMID
:
1678 res
= bld
->system_values
.prim_id
;
1679 atype
= TGSI_TYPE_UNSIGNED
;
1682 case TGSI_SEMANTIC_INVOCATIONID
:
1683 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.invocation_id
);
1684 atype
= TGSI_TYPE_UNSIGNED
;
1688 assert(!"unexpected semantic in emit_fetch_system_value");
1689 res
= bld_base
->base
.zero
;
1690 atype
= TGSI_TYPE_FLOAT
;
1694 if (atype
!= stype
) {
1695 if (stype
== TGSI_TYPE_FLOAT
) {
1696 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1697 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1698 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1699 } else if (stype
== TGSI_TYPE_SIGNED
) {
1700 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1708 * Register fetch with derivatives.
1712 struct lp_build_tgsi_soa_context
*bld
,
1721 /* TODO: use interpolation coeffs for inputs */
1724 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
1727 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
1731 * store an array of vec-length 64-bit into two arrays of vec_length floats
1733 * value is d0, d1, d2, d3 etc.
1734 * each 64-bit has high and low pieces x, y
1735 * so gets stored into the separate channels as:
1736 * chan_ptr = d0.x, d1.x, d2.x, d3.x
1737 * chan_ptr2 = d0.y, d1.y, d2.y, d3.y
1740 emit_store_64bit_chan(struct lp_build_tgsi_context
*bld_base
,
1741 LLVMValueRef chan_ptr
, LLVMValueRef chan_ptr2
,
1744 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1745 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1746 LLVMBuilderRef builder
= gallivm
->builder
;
1747 struct lp_build_context
*float_bld
= &bld_base
->base
;
1749 LLVMValueRef temp
, temp2
;
1750 LLVMValueRef shuffles
[LP_MAX_VECTOR_WIDTH
/32];
1751 LLVMValueRef shuffles2
[LP_MAX_VECTOR_WIDTH
/32];
1753 for (i
= 0; i
< bld_base
->base
.type
.length
; i
++) {
1754 shuffles
[i
] = lp_build_const_int32(gallivm
, i
* 2);
1755 shuffles2
[i
] = lp_build_const_int32(gallivm
, (i
* 2) + 1);
1758 temp
= LLVMBuildShuffleVector(builder
, value
,
1759 LLVMGetUndef(LLVMTypeOf(value
)),
1760 LLVMConstVector(shuffles
,
1761 bld_base
->base
.type
.length
),
1763 temp2
= LLVMBuildShuffleVector(builder
, value
,
1764 LLVMGetUndef(LLVMTypeOf(value
)),
1765 LLVMConstVector(shuffles2
,
1766 bld_base
->base
.type
.length
),
1769 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp
, chan_ptr
);
1770 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp2
, chan_ptr2
);
1778 struct lp_build_tgsi_context
*bld_base
,
1779 const struct tgsi_full_instruction
*inst
,
1781 unsigned chan_index
,
1784 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1785 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1786 LLVMBuilderRef builder
= gallivm
->builder
;
1787 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
1788 struct lp_build_context
*float_bld
= &bld_base
->base
;
1789 struct lp_build_context
*int_bld
= &bld_base
->int_bld
;
1790 LLVMValueRef indirect_index
= NULL
;
1791 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
1796 * It is always assumed to be float.
1798 if (inst
->Instruction
.Saturate
) {
1799 assert(dtype
== TGSI_TYPE_FLOAT
||
1800 dtype
== TGSI_TYPE_UNTYPED
);
1801 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1802 value
= lp_build_clamp_zero_one_nanzero(float_bld
, value
);
1805 if (reg
->Register
.Indirect
) {
1807 * Currently the mesa/st doesn't generate indirect stores
1808 * to 64-bit values, it normally uses MOV to do indirect stores.
1810 assert(!tgsi_type_is_64bit(dtype
));
1811 indirect_index
= get_indirect_index(bld
,
1813 reg
->Register
.Index
,
1816 assert(reg
->Register
.Index
<=
1817 bld_base
->info
->file_max
[reg
->Register
.File
]);
1820 if (DEBUG_EXECUTION
) {
1821 emit_dump_reg(gallivm
, reg
->Register
.File
, reg
->Register
.Index
, chan_index
, value
);
1824 switch( reg
->Register
.File
) {
1825 case TGSI_FILE_OUTPUT
:
1826 /* Outputs are always stored as floats */
1827 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1829 if (reg
->Register
.Indirect
) {
1830 LLVMValueRef index_vec
; /* indexes into the output registers */
1831 LLVMValueRef outputs_array
;
1832 LLVMTypeRef fptr_type
;
1834 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1839 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1840 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
, fptr_type
, "");
1842 /* Scatter store values into output registers */
1843 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1847 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1850 if (tgsi_type_is_64bit(dtype
)) {
1851 LLVMValueRef out_ptr2
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1853 emit_store_64bit_chan(bld_base
, out_ptr
, out_ptr2
,
1856 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, out_ptr
);
1860 case TGSI_FILE_TEMPORARY
:
1861 /* Temporaries are always stored as floats */
1862 if (!tgsi_type_is_64bit(dtype
))
1863 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1865 value
= LLVMBuildBitCast(builder
, value
, LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2), "");
1867 if (reg
->Register
.Indirect
) {
1868 LLVMValueRef index_vec
; /* indexes into the temp registers */
1869 LLVMValueRef temps_array
;
1870 LLVMTypeRef fptr_type
;
1872 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1877 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1878 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1880 /* Scatter store values into temp registers */
1881 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1885 LLVMValueRef temp_ptr
;
1886 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, chan_index
);
1888 if (tgsi_type_is_64bit(dtype
)) {
1889 LLVMValueRef temp_ptr2
= lp_get_temp_ptr_soa(bld
,
1890 reg
->Register
.Index
,
1892 emit_store_64bit_chan(bld_base
, temp_ptr
, temp_ptr2
,
1896 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, temp_ptr
);
1900 case TGSI_FILE_ADDRESS
:
1901 assert(dtype
== TGSI_TYPE_SIGNED
);
1902 assert(LLVMTypeOf(value
) == int_bld
->vec_type
);
1903 value
= LLVMBuildBitCast(builder
, value
, int_bld
->vec_type
, "");
1904 lp_exec_mask_store(&bld
->exec_mask
, int_bld
, value
,
1905 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1916 * Called at the beginning of the translation of each TGSI instruction, to
1917 * emit some debug code.
1921 struct lp_build_tgsi_context
* bld_base
,
1922 const struct tgsi_full_instruction
* inst
,
1923 const struct tgsi_opcode_info
* info
)
1926 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1928 if (DEBUG_EXECUTION
) {
1930 * Dump the TGSI instruction.
1933 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1937 tgsi_dump_instruction_str(inst
, bld_base
->pc
, &buf
[2], sizeof buf
- 2);
1938 lp_build_printf(gallivm
, buf
);
1940 /* Dump the execution mask.
1942 if (bld
->exec_mask
.has_mask
) {
1943 lp_build_print_value(gallivm
, " mask = ", bld
->exec_mask
.exec_mask
);
1950 struct lp_build_tgsi_context
* bld_base
,
1951 const struct tgsi_full_instruction
* inst
,
1952 const struct tgsi_opcode_info
* info
,
1954 LLVMValueRef dst
[4])
1957 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
1959 unsigned writemask
= inst
->Dst
[index
].Register
.WriteMask
;
1961 unsigned chan_index
= u_bit_scan(&writemask
);
1962 if (tgsi_type_is_64bit(dtype
) && (chan_index
== 1 || chan_index
== 3))
1964 emit_store_chan(bld_base
, inst
, index
, chan_index
, dst
[chan_index
]);
1969 tgsi_to_pipe_tex_target(unsigned tgsi_target
)
1971 switch (tgsi_target
) {
1972 case TGSI_TEXTURE_BUFFER
:
1974 case TGSI_TEXTURE_1D
:
1975 case TGSI_TEXTURE_SHADOW1D
:
1976 return PIPE_TEXTURE_1D
;
1977 case TGSI_TEXTURE_2D
:
1978 case TGSI_TEXTURE_SHADOW2D
:
1979 case TGSI_TEXTURE_2D_MSAA
:
1980 return PIPE_TEXTURE_2D
;
1981 case TGSI_TEXTURE_3D
:
1982 return PIPE_TEXTURE_3D
;
1983 case TGSI_TEXTURE_CUBE
:
1984 case TGSI_TEXTURE_SHADOWCUBE
:
1985 return PIPE_TEXTURE_CUBE
;
1986 case TGSI_TEXTURE_RECT
:
1987 case TGSI_TEXTURE_SHADOWRECT
:
1988 return PIPE_TEXTURE_RECT
;
1989 case TGSI_TEXTURE_1D_ARRAY
:
1990 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
1991 return PIPE_TEXTURE_1D_ARRAY
;
1992 case TGSI_TEXTURE_2D_ARRAY
:
1993 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
1994 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
1995 return PIPE_TEXTURE_2D_ARRAY
;
1996 case TGSI_TEXTURE_CUBE_ARRAY
:
1997 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
1998 return PIPE_TEXTURE_CUBE_ARRAY
;
2006 static enum lp_sampler_lod_property
2007 lp_build_lod_property(
2008 struct lp_build_tgsi_context
*bld_base
,
2009 const struct tgsi_full_instruction
*inst
,
2012 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
2013 enum lp_sampler_lod_property lod_property
;
2016 * Not much we can do here. We could try catching inputs declared
2017 * with constant interpolation but not sure it's worth it - since for
2018 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
2019 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
2020 * like the constant/immediate recognition below.
2021 * What seems to be of more value would be to recognize temps holding
2022 * broadcasted scalars but no way we can do it.
2023 * Tried asking llvm but without any success (using LLVMIsConstant
2024 * even though this isn't exactly what we'd need), even as simple as
2025 * IMM[0] UINT32 (0,-1,0,0)
2026 * MOV TEMP[0] IMM[0].yyyy
2027 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
2029 * This means there's ZERO chance this will ever catch a scalar lod
2030 * with traditional tex opcodes as well as texel fetches, since the lod
2031 * comes from the same reg as coords (except some test shaders using
2032 * constant coords maybe).
2033 * There's at least hope for sample opcodes as well as size queries.
2035 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
||
2036 reg
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
2037 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2039 else if (bld_base
->info
->processor
== PIPE_SHADER_FRAGMENT
) {
2040 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2041 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2044 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2048 /* never use scalar (per-quad) lod the results are just too wrong. */
2049 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2051 return lod_property
;
2056 * High-level instruction translators.
2060 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
2061 const struct tgsi_full_instruction
*inst
,
2062 enum lp_build_tex_modifier modifier
,
2063 LLVMValueRef
*texel
,
2064 unsigned sampler_reg
,
2065 enum lp_sampler_op_type sampler_op
)
2067 unsigned unit
= inst
->Src
[sampler_reg
].Register
.Index
;
2068 LLVMValueRef oow
= NULL
;
2069 LLVMValueRef lod
= NULL
;
2070 LLVMValueRef coords
[5];
2071 LLVMValueRef offsets
[3] = { NULL
};
2072 struct lp_derivatives derivs
;
2073 struct lp_sampler_params params
;
2074 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2075 unsigned num_derivs
, num_offsets
, i
;
2076 unsigned shadow_coord
= 0;
2077 unsigned layer_coord
= 0;
2078 unsigned sample_key
= sampler_op
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2080 memset(¶ms
, 0, sizeof(params
));
2082 if (!bld
->sampler
) {
2083 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2084 for (i
= 0; i
< 4; i
++) {
2085 texel
[i
] = bld
->bld_base
.base
.undef
;
2090 switch (inst
->Texture
.Texture
) {
2091 case TGSI_TEXTURE_1D_ARRAY
:
2094 case TGSI_TEXTURE_1D
:
2098 case TGSI_TEXTURE_2D_ARRAY
:
2101 case TGSI_TEXTURE_2D
:
2102 case TGSI_TEXTURE_RECT
:
2106 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2109 case TGSI_TEXTURE_SHADOW1D
:
2114 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2120 case TGSI_TEXTURE_SHADOW2D
:
2121 case TGSI_TEXTURE_SHADOWRECT
:
2126 case TGSI_TEXTURE_CUBE
:
2130 case TGSI_TEXTURE_3D
:
2134 case TGSI_TEXTURE_SHADOWCUBE
:
2139 case TGSI_TEXTURE_CUBE_ARRAY
:
2144 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2148 shadow_coord
= 4; /* shadow coord special different reg */
2150 case TGSI_TEXTURE_2D_MSAA
:
2151 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2157 /* Note lod and especially projected are illegal in a LOT of cases */
2158 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2159 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2160 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
2161 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
) {
2162 /* note that shadow cube array with bias/explicit lod does not exist */
2163 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2166 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2168 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2169 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2171 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2172 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2174 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2177 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
2178 oow
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2179 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
2182 for (i
= 0; i
< num_derivs
; i
++) {
2183 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2184 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2185 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
2187 for (i
= num_derivs
; i
< 5; i
++) {
2188 coords
[i
] = bld
->bld_base
.base
.undef
;
2191 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2193 if (layer_coord
== 3) {
2194 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2197 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2199 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2200 coords
[2] = lp_build_mul(&bld
->bld_base
.base
, coords
[2], oow
);
2202 /* Shadow coord occupies always 5th slot. */
2204 sample_key
|= LP_SAMPLER_SHADOW
;
2205 if (shadow_coord
== 4) {
2206 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2209 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, shadow_coord
);
2211 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2212 coords
[4] = lp_build_mul(&bld
->bld_base
.base
, coords
[4], oow
);
2215 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2217 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2218 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2219 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, dim
);
2220 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 2, dim
);
2222 params
.derivs
= &derivs
;
2224 * could also check all src regs if constant but I doubt such
2225 * cases exist in practice.
2227 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2228 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2229 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2232 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2236 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2239 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2241 /* we don't handle the 4 offset version of tg4 */
2242 if (inst
->Texture
.NumOffsets
== 1) {
2244 sample_key
|= LP_SAMPLER_OFFSETS
;
2245 for (dim
= 0; dim
< num_offsets
; dim
++) {
2246 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2250 params
.type
= bld
->bld_base
.base
.type
;
2251 params
.sample_key
= sample_key
;
2252 params
.texture_index
= unit
;
2253 params
.sampler_index
= unit
;
2254 params
.context_ptr
= bld
->context_ptr
;
2255 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2256 params
.coords
= coords
;
2257 params
.offsets
= offsets
;
2259 params
.texel
= texel
;
2261 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2262 bld
->bld_base
.base
.gallivm
,
2267 emit_sample(struct lp_build_tgsi_soa_context
*bld
,
2268 const struct tgsi_full_instruction
*inst
,
2269 enum lp_build_tex_modifier modifier
,
2271 enum lp_sampler_op_type sample_type
,
2272 LLVMValueRef
*texel
)
2274 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2275 unsigned texture_unit
, sampler_unit
;
2276 LLVMValueRef lod
= NULL
;
2277 LLVMValueRef coords
[5];
2278 LLVMValueRef offsets
[3] = { NULL
};
2279 struct lp_derivatives derivs
;
2280 struct lp_sampler_params params
;
2281 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2283 unsigned num_offsets
, num_derivs
, i
;
2284 unsigned layer_coord
= 0;
2285 unsigned sample_key
= sample_type
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2287 memset(¶ms
, 0, sizeof(params
));
2289 if (!bld
->sampler
) {
2290 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2291 for (i
= 0; i
< 4; i
++) {
2292 texel
[i
] = bld
->bld_base
.base
.undef
;
2298 * unlike old-style tex opcodes the texture/sampler indices
2299 * always come from src1 and src2 respectively.
2301 texture_unit
= inst
->Src
[1].Register
.Index
;
2302 sampler_unit
= inst
->Src
[2].Register
.Index
;
2305 * Note inst->Texture.Texture will contain the number of offsets,
2306 * however the target information is NOT there and comes from the
2307 * declared sampler views instead.
2309 switch (bld
->sv
[texture_unit
].Resource
) {
2310 case TGSI_TEXTURE_1D
:
2314 case TGSI_TEXTURE_1D_ARRAY
:
2319 case TGSI_TEXTURE_2D
:
2320 case TGSI_TEXTURE_RECT
:
2324 case TGSI_TEXTURE_2D_ARRAY
:
2329 case TGSI_TEXTURE_CUBE
:
2333 case TGSI_TEXTURE_3D
:
2337 case TGSI_TEXTURE_CUBE_ARRAY
:
2347 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2348 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2349 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2350 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2351 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2353 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2354 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2356 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2358 else if (modifier
== LP_BLD_TEX_MODIFIER_LOD_ZERO
) {
2359 /* XXX might be better to explicitly pass the level zero information */
2360 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2361 lod
= lp_build_const_vec(gallivm
, bld
->bld_base
.base
.type
, 0.0F
);
2364 for (i
= 0; i
< num_derivs
; i
++) {
2365 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2367 for (i
= num_derivs
; i
< 5; i
++) {
2368 coords
[i
] = bld
->bld_base
.base
.undef
;
2371 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2373 if (layer_coord
== 3)
2374 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2376 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2378 /* Shadow coord occupies always 5th slot. */
2380 sample_key
|= LP_SAMPLER_SHADOW
;
2381 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2384 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2386 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2387 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2388 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, dim
);
2389 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 4, dim
);
2391 params
.derivs
= &derivs
;
2393 * could also check all src regs if constant but I doubt such
2394 * cases exist in practice.
2396 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2397 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2398 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2401 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2405 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2409 /* some advanced gather instructions (txgo) would require 4 offsets */
2410 if (inst
->Texture
.NumOffsets
== 1) {
2412 sample_key
|= LP_SAMPLER_OFFSETS
;
2413 for (dim
= 0; dim
< num_offsets
; dim
++) {
2414 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2417 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2419 params
.type
= bld
->bld_base
.base
.type
;
2420 params
.sample_key
= sample_key
;
2421 params
.texture_index
= texture_unit
;
2422 params
.sampler_index
= sampler_unit
;
2423 params
.context_ptr
= bld
->context_ptr
;
2424 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2425 params
.coords
= coords
;
2426 params
.offsets
= offsets
;
2428 params
.texel
= texel
;
2430 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2431 bld
->bld_base
.base
.gallivm
,
2434 if (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2435 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2436 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2437 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
) {
2438 unsigned char swizzles
[4];
2439 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2440 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2441 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2442 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2444 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2449 emit_fetch_texels( struct lp_build_tgsi_soa_context
*bld
,
2450 const struct tgsi_full_instruction
*inst
,
2451 LLVMValueRef
*texel
,
2454 unsigned unit
, target
;
2455 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
2456 LLVMValueRef explicit_lod
= NULL
;
2457 LLVMValueRef coords
[5];
2458 LLVMValueRef offsets
[3] = { NULL
};
2459 struct lp_sampler_params params
;
2460 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2462 unsigned layer_coord
= 0;
2463 unsigned sample_key
= LP_SAMPLER_OP_FETCH
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2465 memset(¶ms
, 0, sizeof(params
));
2467 if (!bld
->sampler
) {
2468 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2469 for (i
= 0; i
< 4; i
++) {
2470 texel
[i
] = coord_undef
;
2475 unit
= inst
->Src
[1].Register
.Index
;
2478 target
= bld
->sv
[unit
].Resource
;
2481 target
= inst
->Texture
.Texture
;
2485 case TGSI_TEXTURE_1D
:
2486 case TGSI_TEXTURE_BUFFER
:
2489 case TGSI_TEXTURE_1D_ARRAY
:
2493 case TGSI_TEXTURE_2D
:
2494 case TGSI_TEXTURE_RECT
:
2495 case TGSI_TEXTURE_2D_MSAA
:
2498 case TGSI_TEXTURE_2D_ARRAY
:
2499 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2503 case TGSI_TEXTURE_3D
:
2511 /* always have lod except for buffers and msaa targets ? */
2512 if (target
!= TGSI_TEXTURE_BUFFER
&&
2513 target
!= TGSI_TEXTURE_2D_MSAA
&&
2514 target
!= TGSI_TEXTURE_2D_ARRAY_MSAA
) {
2515 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2516 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2517 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2520 * XXX: for real msaa support, the w component (or src2.x for sample_i_ms)
2521 * would be the sample index.
2524 for (i
= 0; i
< dims
; i
++) {
2525 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2527 /* never use more than 3 coords here but emit_fetch_texel copies all 5 anyway */
2528 for (i
= dims
; i
< 5; i
++) {
2529 coords
[i
] = coord_undef
;
2532 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2534 if (inst
->Texture
.NumOffsets
== 1) {
2536 sample_key
|= LP_SAMPLER_OFFSETS
;
2537 for (dim
= 0; dim
< dims
; dim
++) {
2538 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2541 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2543 params
.type
= bld
->bld_base
.base
.type
;
2544 params
.sample_key
= sample_key
;
2545 params
.texture_index
= unit
;
2547 * sampler not actually used, set to 0 so it won't exceed PIPE_MAX_SAMPLERS
2548 * and trigger some assertions with d3d10 where the sampler view number
2551 params
.sampler_index
= 0;
2552 params
.context_ptr
= bld
->context_ptr
;
2553 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2554 params
.coords
= coords
;
2555 params
.offsets
= offsets
;
2556 params
.derivs
= NULL
;
2557 params
.lod
= explicit_lod
;
2558 params
.texel
= texel
;
2560 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2561 bld
->bld_base
.base
.gallivm
,
2565 (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2566 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2567 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2568 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
)) {
2569 unsigned char swizzles
[4];
2570 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2571 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2572 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2573 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2575 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2580 emit_size_query( struct lp_build_tgsi_soa_context
*bld
,
2581 const struct tgsi_full_instruction
*inst
,
2582 LLVMValueRef
*sizes_out
,
2583 boolean is_sviewinfo
)
2585 LLVMValueRef explicit_lod
;
2586 enum lp_sampler_lod_property lod_property
;
2589 unsigned unit
= inst
->Src
[1].Register
.Index
;
2590 unsigned target
, pipe_target
;
2591 struct lp_sampler_size_query_params params
;
2594 target
= bld
->sv
[unit
].Resource
;
2597 target
= inst
->Texture
.Texture
;
2600 case TGSI_TEXTURE_BUFFER
:
2601 case TGSI_TEXTURE_RECT
:
2602 case TGSI_TEXTURE_SHADOWRECT
:
2610 if (!bld
->sampler
) {
2611 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2612 for (i
= 0; i
< 4; i
++)
2613 sizes_out
[i
] = bld
->bld_base
.int_bld
.undef
;
2618 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 0);
2619 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2622 explicit_lod
= NULL
;
2623 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2627 pipe_target
= tgsi_to_pipe_tex_target(target
);
2629 params
.int_type
= bld
->bld_base
.int_bld
.type
;
2630 params
.texture_unit
= unit
;
2631 params
.target
= pipe_target
;
2632 params
.context_ptr
= bld
->context_ptr
;
2633 params
.is_sviewinfo
= TRUE
;
2634 params
.lod_property
= lod_property
;
2635 params
.explicit_lod
= explicit_lod
;
2636 params
.sizes_out
= sizes_out
;
2638 bld
->sampler
->emit_size_query(bld
->sampler
,
2639 bld
->bld_base
.base
.gallivm
,
2644 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
2649 for (i
= 0; i
< 5; i
++) {
2650 enum tgsi_opcode opcode
;
2652 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
2655 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
2657 if (opcode
== TGSI_OPCODE_END
)
2660 if (opcode
== TGSI_OPCODE_TEX
||
2661 opcode
== TGSI_OPCODE_TXP
||
2662 opcode
== TGSI_OPCODE_TXD
||
2663 opcode
== TGSI_OPCODE_TXB
||
2664 opcode
== TGSI_OPCODE_TXL
||
2665 opcode
== TGSI_OPCODE_TXF
||
2666 opcode
== TGSI_OPCODE_TXQ
||
2667 opcode
== TGSI_OPCODE_TEX2
||
2668 opcode
== TGSI_OPCODE_TXB2
||
2669 opcode
== TGSI_OPCODE_TXL2
||
2670 opcode
== TGSI_OPCODE_SAMPLE
||
2671 opcode
== TGSI_OPCODE_SAMPLE_B
||
2672 opcode
== TGSI_OPCODE_SAMPLE_C
||
2673 opcode
== TGSI_OPCODE_SAMPLE_C_LZ
||
2674 opcode
== TGSI_OPCODE_SAMPLE_D
||
2675 opcode
== TGSI_OPCODE_SAMPLE_I
||
2676 opcode
== TGSI_OPCODE_SAMPLE_I_MS
||
2677 opcode
== TGSI_OPCODE_SAMPLE_L
||
2678 opcode
== TGSI_OPCODE_SVIEWINFO
||
2679 opcode
== TGSI_OPCODE_CAL
||
2680 opcode
== TGSI_OPCODE_IF
||
2681 opcode
== TGSI_OPCODE_UIF
||
2682 opcode
== TGSI_OPCODE_BGNLOOP
||
2683 opcode
== TGSI_OPCODE_SWITCH
)
2693 * Kill fragment if any of the src register values are negative.
2697 struct lp_build_tgsi_soa_context
*bld
,
2698 const struct tgsi_full_instruction
*inst
,
2701 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2702 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
2703 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
2705 unsigned chan_index
;
2707 memset(&terms
, 0, sizeof terms
);
2709 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2712 /* Unswizzle channel */
2713 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
2715 /* Check if the component has not been already tested. */
2716 assert(swizzle
< TGSI_NUM_CHANNELS
);
2717 if( !terms
[swizzle
] )
2718 /* TODO: change the comparison operator instead of setting the sign */
2719 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
2723 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2724 if(terms
[chan_index
]) {
2725 LLVMValueRef chan_mask
;
2728 * If term < 0 then mask = 0 else mask = ~0.
2730 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
2733 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
2739 if (bld
->exec_mask
.has_mask
) {
2740 LLVMValueRef invmask
;
2741 invmask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2742 mask
= LLVMBuildOr(builder
, mask
, invmask
, "");
2745 lp_build_mask_update(bld
->mask
, mask
);
2746 if (!near_end_of_shader(bld
, pc
))
2747 lp_build_mask_check(bld
->mask
);
2752 * Unconditional fragment kill.
2753 * The only predication is the execution mask which will apply if
2754 * we're inside a loop or conditional.
2757 emit_kill(struct lp_build_tgsi_soa_context
*bld
,
2760 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2763 /* For those channels which are "alive", disable fragment shader
2766 if (bld
->exec_mask
.has_mask
) {
2767 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2770 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
2774 lp_build_mask_update(bld
->mask
, mask
);
2776 if (!near_end_of_shader(bld
, pc
))
2777 lp_build_mask_check(bld
->mask
);
2782 * Emit code which will dump the value of all the temporary registers
2786 emit_dump_file(struct lp_build_tgsi_soa_context
*bld
,
2789 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
2790 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2791 LLVMBuilderRef builder
= gallivm
->builder
;
2792 LLVMValueRef reg_ptr
;
2794 int max_index
= info
->file_max
[file
];
2797 * Some register files, particularly constants, can be very large,
2798 * and dumping everything could make this unusably slow.
2800 max_index
= MIN2(max_index
, 32);
2802 for (index
= 0; index
<= max_index
; index
++) {
2807 if (index
< 8 * sizeof(unsigned) &&
2808 (info
->file_mask
[file
] & (1u << index
)) == 0) {
2809 /* This was not declared.*/
2813 if (file
== TGSI_FILE_INPUT
) {
2814 mask
= info
->input_usage_mask
[index
];
2816 mask
= TGSI_WRITEMASK_XYZW
;
2819 for (chan
= 0; chan
< 4; chan
++) {
2820 if ((mask
& (1 << chan
)) == 0) {
2821 /* This channel is not used.*/
2825 if (file
== TGSI_FILE_CONSTANT
) {
2826 struct tgsi_full_src_register reg
;
2827 memset(®
, 0, sizeof reg
);
2828 reg
.Register
.File
= file
;
2829 reg
.Register
.Index
= index
;
2830 reg
.Register
.SwizzleX
= 0;
2831 reg
.Register
.SwizzleY
= 1;
2832 reg
.Register
.SwizzleZ
= 2;
2833 reg
.Register
.SwizzleW
= 3;
2835 res
= bld
->bld_base
.emit_fetch_funcs
[file
](&bld
->bld_base
, ®
, TGSI_TYPE_FLOAT
, chan
);
2839 } else if (file
== TGSI_FILE_INPUT
) {
2840 res
= bld
->inputs
[index
][chan
];
2844 } else if (file
== TGSI_FILE_TEMPORARY
) {
2845 reg_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
2847 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2848 } else if (file
== TGSI_FILE_OUTPUT
) {
2849 reg_ptr
= lp_get_output_ptr(bld
, index
, chan
);
2851 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2857 emit_dump_reg(gallivm
, file
, index
, chan
, res
);
2865 lp_emit_declaration_soa(
2866 struct lp_build_tgsi_context
*bld_base
,
2867 const struct tgsi_full_declaration
*decl
)
2869 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2870 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2871 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
2872 const unsigned first
= decl
->Range
.First
;
2873 const unsigned last
= decl
->Range
.Last
;
2876 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
2878 switch (decl
->Declaration
.File
) {
2879 case TGSI_FILE_TEMPORARY
:
2880 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
2881 assert(last
< LP_MAX_INLINED_TEMPS
);
2882 for (idx
= first
; idx
<= last
; ++idx
) {
2883 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2884 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
2889 case TGSI_FILE_OUTPUT
:
2890 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
2891 for (idx
= first
; idx
<= last
; ++idx
) {
2892 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2893 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
2894 vec_type
, "output");
2899 case TGSI_FILE_ADDRESS
:
2900 /* ADDR registers are only allocated with an integer LLVM IR type,
2901 * as they are guaranteed to always have integers.
2902 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2903 * an ADDR register for that matter).
2905 assert(last
< LP_MAX_TGSI_ADDRS
);
2906 for (idx
= first
; idx
<= last
; ++idx
) {
2907 assert(idx
< LP_MAX_TGSI_ADDRS
);
2908 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2909 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
2913 case TGSI_FILE_SAMPLER_VIEW
:
2915 * The target stored here MUST match whatever there actually
2916 * is in the set sampler views (what about return type?).
2918 assert(last
< PIPE_MAX_SHADER_SAMPLER_VIEWS
);
2919 for (idx
= first
; idx
<= last
; ++idx
) {
2920 bld
->sv
[idx
] = decl
->SamplerView
;
2924 case TGSI_FILE_CONSTANT
:
2927 * We could trivially fetch the per-buffer pointer when fetching the
2928 * constant, relying on llvm to figure out it's always the same pointer
2929 * anyway. However, doing so results in a huge (more than factor of 10)
2930 * slowdown in llvm compilation times for some (but not all) shaders
2931 * (more specifically, the IR optimization spends way more time in
2932 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
2934 unsigned idx2D
= decl
->Dim
.Index2D
;
2935 LLVMValueRef index2D
= lp_build_const_int32(gallivm
, idx2D
);
2936 assert(idx2D
< LP_MAX_TGSI_CONST_BUFFERS
);
2937 bld
->consts
[idx2D
] =
2938 lp_build_array_get(gallivm
, bld
->consts_ptr
, index2D
);
2939 bld
->consts_sizes
[idx2D
] =
2940 lp_build_array_get(gallivm
, bld
->const_sizes_ptr
, index2D
);
2945 /* don't need to declare other vars */
2951 void lp_emit_immediate_soa(
2952 struct lp_build_tgsi_context
*bld_base
,
2953 const struct tgsi_full_immediate
*imm
)
2955 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2956 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
2957 LLVMValueRef imms
[4];
2959 const uint size
= imm
->Immediate
.NrTokens
- 1;
2961 switch (imm
->Immediate
.DataType
) {
2962 case TGSI_IMM_FLOAT32
:
2963 for( i
= 0; i
< size
; ++i
)
2965 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
2968 case TGSI_IMM_FLOAT64
:
2969 case TGSI_IMM_UINT64
:
2970 case TGSI_IMM_INT64
:
2971 case TGSI_IMM_UINT32
:
2972 for( i
= 0; i
< size
; ++i
) {
2973 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
2974 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2978 case TGSI_IMM_INT32
:
2979 for( i
= 0; i
< size
; ++i
) {
2980 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
2981 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
2986 for( i
= size
; i
< 4; ++i
)
2987 imms
[i
] = bld_base
->base
.undef
;
2989 if (bld
->use_immediates_array
) {
2990 unsigned index
= bld
->num_immediates
;
2991 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2992 LLVMBuilderRef builder
= gallivm
->builder
;
2993 LLVMValueRef gep
[2];
2994 gep
[0] = lp_build_const_int32(gallivm
, 0);
2996 assert(bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
));
2997 for (i
= 0; i
< 4; ++i
) {
2998 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
2999 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3000 bld
->imms_array
, gep
, 2, "");
3001 LLVMBuildStore(builder
, imms
[i
], imm_ptr
);
3004 /* simply copy the immediate values into the next immediates[] slot */
3006 assert(imm
->Immediate
.NrTokens
- 1 <= 4);
3007 assert(bld
->num_immediates
< LP_MAX_INLINED_IMMEDIATES
);
3009 for(i
= 0; i
< 4; ++i
)
3010 bld
->immediates
[bld
->num_immediates
][i
] = imms
[i
];
3012 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3013 unsigned index
= bld
->num_immediates
;
3014 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
3015 LLVMBuilderRef builder
= gallivm
->builder
;
3016 LLVMValueRef gep
[2];
3017 gep
[0] = lp_build_const_int32(gallivm
, 0);
3018 for (i
= 0; i
< 4; ++i
) {
3019 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
3020 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3021 bld
->imms_array
, gep
, 2, "");
3022 LLVMBuildStore(builder
,
3023 bld
->immediates
[index
][i
],
3029 bld
->num_immediates
++;
3034 const struct lp_build_tgsi_action
* action
,
3035 struct lp_build_tgsi_context
* bld_base
,
3036 struct lp_build_emit_data
* emit_data
)
3038 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3040 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
3041 &emit_data
->output
[emit_data
->chan
], NULL
);
3046 const struct lp_build_tgsi_action
* action
,
3047 struct lp_build_tgsi_context
* bld_base
,
3048 struct lp_build_emit_data
* emit_data
)
3050 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3052 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
3053 &emit_data
->output
[emit_data
->chan
]);
3058 const struct lp_build_tgsi_action
* action
,
3059 struct lp_build_tgsi_context
* bld_base
,
3060 struct lp_build_emit_data
* emit_data
)
3062 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3064 emit_kill(bld
, bld_base
->pc
- 1);
3069 const struct lp_build_tgsi_action
* action
,
3070 struct lp_build_tgsi_context
* bld_base
,
3071 struct lp_build_emit_data
* emit_data
)
3073 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3075 emit_kill_if(bld
, emit_data
->inst
, bld_base
->pc
- 1);
3080 const struct lp_build_tgsi_action
* action
,
3081 struct lp_build_tgsi_context
* bld_base
,
3082 struct lp_build_emit_data
* emit_data
)
3084 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3086 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3087 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3092 const struct lp_build_tgsi_action
* action
,
3093 struct lp_build_tgsi_context
* bld_base
,
3094 struct lp_build_emit_data
* emit_data
)
3096 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3098 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3099 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3104 const struct lp_build_tgsi_action
* action
,
3105 struct lp_build_tgsi_context
* bld_base
,
3106 struct lp_build_emit_data
* emit_data
)
3108 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3110 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3111 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3116 const struct lp_build_tgsi_action
* action
,
3117 struct lp_build_tgsi_context
* bld_base
,
3118 struct lp_build_emit_data
* emit_data
)
3120 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3122 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3123 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3128 const struct lp_build_tgsi_action
* action
,
3129 struct lp_build_tgsi_context
* bld_base
,
3130 struct lp_build_emit_data
* emit_data
)
3132 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3134 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3135 emit_data
->output
, 3, LP_SAMPLER_OP_TEXTURE
);
3140 const struct lp_build_tgsi_action
* action
,
3141 struct lp_build_tgsi_context
* bld_base
,
3142 struct lp_build_emit_data
* emit_data
)
3144 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3146 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3147 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3152 const struct lp_build_tgsi_action
* action
,
3153 struct lp_build_tgsi_context
* bld_base
,
3154 struct lp_build_emit_data
* emit_data
)
3156 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3158 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3159 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3164 const struct lp_build_tgsi_action
* action
,
3165 struct lp_build_tgsi_context
* bld_base
,
3166 struct lp_build_emit_data
* emit_data
)
3168 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3170 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
3171 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3176 const struct lp_build_tgsi_action
* action
,
3177 struct lp_build_tgsi_context
* bld_base
,
3178 struct lp_build_emit_data
* emit_data
)
3180 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3182 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3183 emit_data
->output
, 2, LP_SAMPLER_OP_GATHER
);
3188 const struct lp_build_tgsi_action
* action
,
3189 struct lp_build_tgsi_context
* bld_base
,
3190 struct lp_build_emit_data
* emit_data
)
3192 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3194 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3195 emit_data
->output
, 1, LP_SAMPLER_OP_LODQ
);
3200 const struct lp_build_tgsi_action
* action
,
3201 struct lp_build_tgsi_context
* bld_base
,
3202 struct lp_build_emit_data
* emit_data
)
3204 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3206 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3211 const struct lp_build_tgsi_action
* action
,
3212 struct lp_build_tgsi_context
* bld_base
,
3213 struct lp_build_emit_data
* emit_data
)
3215 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3217 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3222 const struct lp_build_tgsi_action
* action
,
3223 struct lp_build_tgsi_context
* bld_base
,
3224 struct lp_build_emit_data
* emit_data
)
3226 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3228 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3233 const struct lp_build_tgsi_action
* action
,
3234 struct lp_build_tgsi_context
* bld_base
,
3235 struct lp_build_emit_data
* emit_data
)
3237 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3239 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3240 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3245 const struct lp_build_tgsi_action
* action
,
3246 struct lp_build_tgsi_context
* bld_base
,
3247 struct lp_build_emit_data
* emit_data
)
3249 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3251 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3252 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3257 const struct lp_build_tgsi_action
* action
,
3258 struct lp_build_tgsi_context
* bld_base
,
3259 struct lp_build_emit_data
* emit_data
)
3261 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3263 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3264 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3269 const struct lp_build_tgsi_action
* action
,
3270 struct lp_build_tgsi_context
* bld_base
,
3271 struct lp_build_emit_data
* emit_data
)
3273 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3275 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_ZERO
,
3276 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3281 const struct lp_build_tgsi_action
* action
,
3282 struct lp_build_tgsi_context
* bld_base
,
3283 struct lp_build_emit_data
* emit_data
)
3285 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3287 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3288 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3293 const struct lp_build_tgsi_action
* action
,
3294 struct lp_build_tgsi_context
* bld_base
,
3295 struct lp_build_emit_data
* emit_data
)
3297 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3299 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3300 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3305 const struct lp_build_tgsi_action
* action
,
3306 struct lp_build_tgsi_context
* bld_base
,
3307 struct lp_build_emit_data
* emit_data
)
3309 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3311 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3312 FALSE
, LP_SAMPLER_OP_GATHER
, emit_data
->output
);
3317 const struct lp_build_tgsi_action
* action
,
3318 struct lp_build_tgsi_context
* bld_base
,
3319 struct lp_build_emit_data
* emit_data
)
3321 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3323 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3328 const struct lp_build_tgsi_action
* action
,
3329 struct lp_build_tgsi_context
* bld_base
,
3330 struct lp_build_emit_data
* emit_data
)
3332 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3334 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3335 FALSE
, LP_SAMPLER_OP_LODQ
, emit_data
->output
);
3339 mask_vec(struct lp_build_tgsi_context
*bld_base
)
3341 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3342 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3343 struct lp_exec_mask
*exec_mask
= &bld
->exec_mask
;
3345 if (!exec_mask
->has_mask
) {
3346 return lp_build_mask_value(bld
->mask
);
3348 return LLVMBuildAnd(builder
, lp_build_mask_value(bld
->mask
),
3349 exec_mask
->exec_mask
, "");
3353 increment_vec_ptr_by_mask(struct lp_build_tgsi_context
* bld_base
,
3357 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3358 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3360 current_vec
= LLVMBuildSub(builder
, current_vec
, mask
, "");
3362 LLVMBuildStore(builder
, current_vec
, ptr
);
3366 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context
* bld_base
,
3370 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3371 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3373 current_vec
= lp_build_select(&bld_base
->uint_bld
,
3375 bld_base
->uint_bld
.zero
,
3378 LLVMBuildStore(builder
, current_vec
, ptr
);
3382 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context
* bld
,
3383 LLVMValueRef current_mask_vec
,
3384 LLVMValueRef total_emitted_vertices_vec
)
3386 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3387 struct lp_build_context
*int_bld
= &bld
->bld_base
.int_bld
;
3388 LLVMValueRef max_mask
= lp_build_cmp(int_bld
, PIPE_FUNC_LESS
,
3389 total_emitted_vertices_vec
,
3390 bld
->max_output_vertices_vec
);
3392 return LLVMBuildAnd(builder
, current_mask_vec
, max_mask
, "");
3397 const struct lp_build_tgsi_action
* action
,
3398 struct lp_build_tgsi_context
* bld_base
,
3399 struct lp_build_emit_data
* emit_data
)
3401 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3402 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3404 if (bld
->gs_iface
->emit_vertex
) {
3405 LLVMValueRef mask
= mask_vec(bld_base
);
3406 LLVMValueRef total_emitted_vertices_vec
=
3407 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3408 mask
= clamp_mask_to_max_output_vertices(bld
, mask
,
3409 total_emitted_vertices_vec
);
3410 gather_outputs(bld
);
3411 bld
->gs_iface
->emit_vertex(bld
->gs_iface
, &bld
->bld_base
,
3413 total_emitted_vertices_vec
);
3414 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3416 increment_vec_ptr_by_mask(bld_base
, bld
->total_emitted_vertices_vec_ptr
,
3419 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3420 " +++ emit vertex masked ones = ",
3422 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3423 " +++ emit vertex emitted = ",
3424 total_emitted_vertices_vec
);
3431 end_primitive_masked(struct lp_build_tgsi_context
* bld_base
,
3434 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3435 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3437 if (bld
->gs_iface
->end_primitive
) {
3438 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3439 LLVMValueRef emitted_vertices_vec
=
3440 LLVMBuildLoad(builder
, bld
->emitted_vertices_vec_ptr
, "");
3441 LLVMValueRef emitted_prims_vec
=
3442 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3444 LLVMValueRef emitted_mask
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3445 emitted_vertices_vec
,
3447 /* We need to combine the current execution mask with the mask
3448 telling us which, if any, execution slots actually have
3449 unemitted primitives, this way we make sure that end_primitives
3450 executes only on the paths that have unflushed vertices */
3451 mask
= LLVMBuildAnd(builder
, mask
, emitted_mask
, "");
3453 bld
->gs_iface
->end_primitive(bld
->gs_iface
, &bld
->bld_base
,
3454 emitted_vertices_vec
,
3458 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3459 " +++ end prim masked ones = ",
3461 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3462 " +++ end prim emitted verts1 = ",
3463 emitted_vertices_vec
);
3464 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3465 " +++ end prim emitted prims1 = ",
3466 LLVMBuildLoad(builder
,
3467 bld
->emitted_prims_vec_ptr
, ""));
3469 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_prims_vec_ptr
,
3471 clear_uint_vec_ptr_from_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3474 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3475 " +++ end prim emitted verts2 = ",
3476 LLVMBuildLoad(builder
,
3477 bld
->emitted_vertices_vec_ptr
, ""));
3485 const struct lp_build_tgsi_action
* action
,
3486 struct lp_build_tgsi_context
* bld_base
,
3487 struct lp_build_emit_data
* emit_data
)
3489 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3491 if (bld
->gs_iface
->end_primitive
) {
3492 LLVMValueRef mask
= mask_vec(bld_base
);
3493 end_primitive_masked(bld_base
, mask
);
3499 const struct lp_build_tgsi_action
* action
,
3500 struct lp_build_tgsi_context
* bld_base
,
3501 struct lp_build_emit_data
* emit_data
)
3503 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3505 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
3511 const struct lp_build_tgsi_action
* action
,
3512 struct lp_build_tgsi_context
* bld_base
,
3513 struct lp_build_emit_data
* emit_data
)
3515 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3517 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
3522 const struct lp_build_tgsi_action
* action
,
3523 struct lp_build_tgsi_context
* bld_base
,
3524 struct lp_build_emit_data
* emit_data
)
3526 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3528 lp_exec_break(&bld
->exec_mask
, bld_base
);
3533 const struct lp_build_tgsi_action
* action
,
3534 struct lp_build_tgsi_context
* bld_base
,
3535 struct lp_build_emit_data
* emit_data
)
3538 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3540 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
3541 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
3542 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3547 const struct lp_build_tgsi_action
* action
,
3548 struct lp_build_tgsi_context
* bld_base
,
3549 struct lp_build_emit_data
* emit_data
)
3552 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3553 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3555 tmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3556 emit_data
->args
[0], uint_bld
->zero
);
3557 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3562 const struct lp_build_tgsi_action
* action
,
3563 struct lp_build_tgsi_context
* bld_base
,
3564 struct lp_build_emit_data
* emit_data
)
3566 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3568 lp_exec_case(&bld
->exec_mask
, emit_data
->args
[0]);
3573 const struct lp_build_tgsi_action
* action
,
3574 struct lp_build_tgsi_context
* bld_base
,
3575 struct lp_build_emit_data
* emit_data
)
3577 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3579 lp_exec_default(&bld
->exec_mask
, bld_base
);
3584 const struct lp_build_tgsi_action
* action
,
3585 struct lp_build_tgsi_context
* bld_base
,
3586 struct lp_build_emit_data
* emit_data
)
3588 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3590 lp_exec_switch(&bld
->exec_mask
, emit_data
->args
[0]);
3595 const struct lp_build_tgsi_action
* action
,
3596 struct lp_build_tgsi_context
* bld_base
,
3597 struct lp_build_emit_data
* emit_data
)
3599 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3601 lp_exec_endswitch(&bld
->exec_mask
, bld_base
);
3606 const struct lp_build_tgsi_action
* action
,
3607 struct lp_build_tgsi_context
* bld_base
,
3608 struct lp_build_emit_data
* emit_data
)
3610 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3612 lp_exec_bgnloop(&bld
->exec_mask
);
3617 const struct lp_build_tgsi_action
* action
,
3618 struct lp_build_tgsi_context
* bld_base
,
3619 struct lp_build_emit_data
* emit_data
)
3621 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3623 lp_exec_mask_bgnsub(&bld
->exec_mask
);
3628 const struct lp_build_tgsi_action
* action
,
3629 struct lp_build_tgsi_context
* bld_base
,
3630 struct lp_build_emit_data
* emit_data
)
3632 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3634 lp_exec_mask_cond_invert(&bld
->exec_mask
);
3639 const struct lp_build_tgsi_action
* action
,
3640 struct lp_build_tgsi_context
* bld_base
,
3641 struct lp_build_emit_data
* emit_data
)
3643 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3645 lp_exec_mask_cond_pop(&bld
->exec_mask
);
3650 const struct lp_build_tgsi_action
* action
,
3651 struct lp_build_tgsi_context
* bld_base
,
3652 struct lp_build_emit_data
* emit_data
)
3654 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3656 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
3661 const struct lp_build_tgsi_action
* action
,
3662 struct lp_build_tgsi_context
* bld_base
,
3663 struct lp_build_emit_data
* emit_data
)
3665 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3667 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
3672 const struct lp_build_tgsi_action
* action
,
3673 struct lp_build_tgsi_context
* bld_base
,
3674 struct lp_build_emit_data
* emit_data
)
3676 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3678 lp_exec_continue(&bld
->exec_mask
);
3681 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
3683 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3684 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3686 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
3687 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4;
3688 bld
->temps_array
= lp_build_alloca_undef(gallivm
,
3689 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
3693 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
3694 LLVMValueRef array_size
=
3695 lp_build_const_int32(gallivm
,
3696 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
3697 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
3698 bld_base
->base
.vec_type
, array_size
,
3702 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3703 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_IMMEDIATE
] * 4 + 4;
3704 bld
->imms_array
= lp_build_alloca_undef(gallivm
,
3705 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
3709 /* If we have indirect addressing in inputs we need to copy them into
3710 * our alloca array to be able to iterate over them */
3711 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
) && !bld
->gs_iface
) {
3712 unsigned index
, chan
;
3713 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
3714 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
3715 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
3716 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
3717 vec_type
, array_size
,
3720 assert(bld_base
->info
->num_inputs
3721 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
3723 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
3724 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
3725 LLVMValueRef lindex
=
3726 lp_build_const_int32(gallivm
, index
* 4 + chan
);
3727 LLVMValueRef input_ptr
=
3728 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
3730 LLVMValueRef value
= bld
->inputs
[index
][chan
];
3732 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
3737 if (bld
->gs_iface
) {
3738 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
3739 bld
->emitted_prims_vec_ptr
=
3740 lp_build_alloca(gallivm
,
3742 "emitted_prims_ptr");
3743 bld
->emitted_vertices_vec_ptr
=
3744 lp_build_alloca(gallivm
,
3746 "emitted_vertices_ptr");
3747 bld
->total_emitted_vertices_vec_ptr
=
3748 lp_build_alloca(gallivm
,
3750 "total_emitted_vertices_ptr");
3752 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3753 bld
->emitted_prims_vec_ptr
);
3754 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3755 bld
->emitted_vertices_vec_ptr
);
3756 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3757 bld
->total_emitted_vertices_vec_ptr
);
3760 if (DEBUG_EXECUTION
) {
3761 lp_build_printf(gallivm
, "\n");
3762 emit_dump_file(bld
, TGSI_FILE_CONSTANT
);
3764 emit_dump_file(bld
, TGSI_FILE_INPUT
);
3768 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
3770 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3771 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3773 if (DEBUG_EXECUTION
) {
3776 emit_dump_file(bld
, TGSI_FILE_TEMPORARY
);
3778 emit_dump_file(bld
, TGSI_FILE_OUTPUT
);
3779 lp_build_printf(bld_base
->base
.gallivm
, "\n");
3782 /* If we have indirect addressing in outputs we need to copy our alloca array
3783 * to the outputs slots specified by the caller */
3784 if (bld
->gs_iface
) {
3785 LLVMValueRef total_emitted_vertices_vec
;
3786 LLVMValueRef emitted_prims_vec
;
3787 /* implicit end_primitives, needed in case there are any unflushed
3788 vertices in the cache. Note must not call end_primitive here
3789 since the exec_mask is not valid at this point. */
3790 end_primitive_masked(bld_base
, lp_build_mask_value(bld
->mask
));
3792 total_emitted_vertices_vec
=
3793 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3795 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3797 bld
->gs_iface
->gs_epilogue(bld
->gs_iface
,
3799 total_emitted_vertices_vec
,
3802 gather_outputs(bld
);
3807 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
3808 const struct tgsi_token
*tokens
,
3809 struct lp_type type
,
3810 struct lp_build_mask_context
*mask
,
3811 LLVMValueRef consts_ptr
,
3812 LLVMValueRef const_sizes_ptr
,
3813 const struct lp_bld_tgsi_system_values
*system_values
,
3814 const LLVMValueRef (*inputs
)[TGSI_NUM_CHANNELS
],
3815 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
],
3816 LLVMValueRef context_ptr
,
3817 LLVMValueRef thread_data_ptr
,
3818 const struct lp_build_sampler_soa
*sampler
,
3819 const struct tgsi_shader_info
*info
,
3820 const struct lp_build_tgsi_gs_iface
*gs_iface
)
3822 struct lp_build_tgsi_soa_context bld
;
3824 struct lp_type res_type
;
3826 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
3827 memset(&res_type
, 0, sizeof res_type
);
3828 res_type
.width
= type
.width
;
3829 res_type
.length
= type
.length
;
3832 /* Setup build context */
3833 memset(&bld
, 0, sizeof bld
);
3834 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
3835 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
3836 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
3837 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
3839 struct lp_type dbl_type
;
3841 dbl_type
.width
*= 2;
3842 lp_build_context_init(&bld
.bld_base
.dbl_bld
, gallivm
, dbl_type
);
3845 struct lp_type uint64_type
;
3846 uint64_type
= lp_uint_type(type
);
3847 uint64_type
.width
*= 2;
3848 lp_build_context_init(&bld
.bld_base
.uint64_bld
, gallivm
, uint64_type
);
3851 struct lp_type int64_type
;
3852 int64_type
= lp_int_type(type
);
3853 int64_type
.width
*= 2;
3854 lp_build_context_init(&bld
.bld_base
.int64_bld
, gallivm
, int64_type
);
3857 bld
.inputs
= inputs
;
3858 bld
.outputs
= outputs
;
3859 bld
.consts_ptr
= consts_ptr
;
3860 bld
.const_sizes_ptr
= const_sizes_ptr
;
3861 bld
.sampler
= sampler
;
3862 bld
.bld_base
.info
= info
;
3863 bld
.indirect_files
= info
->indirect_files
;
3864 bld
.context_ptr
= context_ptr
;
3865 bld
.thread_data_ptr
= thread_data_ptr
;
3868 * If the number of temporaries is rather large then we just
3869 * allocate them as an array right from the start and treat
3870 * like indirect temporaries.
3872 if (info
->file_max
[TGSI_FILE_TEMPORARY
] >= LP_MAX_INLINED_TEMPS
) {
3873 bld
.indirect_files
|= (1 << TGSI_FILE_TEMPORARY
);
3876 * For performance reason immediates are always backed in a static
3877 * array, but if their number is too great, we have to use just
3878 * a dynamically allocated array.
3880 bld
.use_immediates_array
=
3881 (info
->file_max
[TGSI_FILE_IMMEDIATE
] >= LP_MAX_INLINED_IMMEDIATES
);
3882 if (bld
.use_immediates_array
) {
3883 bld
.indirect_files
|= (1 << TGSI_FILE_IMMEDIATE
);
3887 bld
.bld_base
.soa
= TRUE
;
3888 bld
.bld_base
.emit_debug
= emit_debug
;
3889 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
3890 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
3891 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
3892 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
3893 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
3894 bld
.bld_base
.emit_store
= emit_store
;
3896 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
3897 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
3899 bld
.bld_base
.emit_prologue
= emit_prologue
;
3900 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
3902 /* Set opcode actions */
3903 lp_set_default_actions_cpu(&bld
.bld_base
);
3905 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
3906 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
3907 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
3908 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
3909 bld
.bld_base
.op_actions
[TGSI_OPCODE_CASE
].emit
= case_emit
;
3910 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
3911 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
3912 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
3913 bld
.bld_base
.op_actions
[TGSI_OPCODE_DEFAULT
].emit
= default_emit
;
3914 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
3915 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
3916 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
3917 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
3918 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSWITCH
].emit
= endswitch_emit
;
3919 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
3920 bld
.bld_base
.op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
3921 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL_IF
].emit
= kill_if_emit
;
3922 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL
].emit
= kill_emit
;
3923 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
3924 bld
.bld_base
.op_actions
[TGSI_OPCODE_SWITCH
].emit
= switch_emit
;
3925 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
3926 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
3927 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
3928 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
3929 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
3930 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
3931 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
3932 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX2
].emit
= tex2_emit
;
3933 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB2
].emit
= txb2_emit
;
3934 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL2
].emit
= txl2_emit
;
3935 bld
.bld_base
.op_actions
[TGSI_OPCODE_TG4
].emit
= tg4_emit
;
3936 bld
.bld_base
.op_actions
[TGSI_OPCODE_LODQ
].emit
= lodq_emit
;
3937 /* DX10 sampling ops */
3938 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE
].emit
= sample_emit
;
3939 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_B
].emit
= sample_b_emit
;
3940 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C
].emit
= sample_c_emit
;
3941 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C_LZ
].emit
= sample_c_lz_emit
;
3942 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_D
].emit
= sample_d_emit
;
3943 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I
].emit
= sample_i_emit
;
3944 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I_MS
].emit
= sample_i_emit
;
3945 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_L
].emit
= sample_l_emit
;
3946 bld
.bld_base
.op_actions
[TGSI_OPCODE_GATHER4
].emit
= gather4_emit
;
3947 bld
.bld_base
.op_actions
[TGSI_OPCODE_SVIEWINFO
].emit
= sviewinfo_emit
;
3948 bld
.bld_base
.op_actions
[TGSI_OPCODE_LOD
].emit
= lod_emit
;
3952 /* There's no specific value for this because it should always
3953 * be set, but apps using ext_geometry_shader4 quite often
3954 * were forgetting so we're using MAX_VERTEX_VARYING from
3955 * that spec even though we could debug_assert if it's not
3956 * set, but that's a lot uglier. */
3957 uint max_output_vertices
;
3959 /* inputs are always indirect with gs */
3960 bld
.indirect_files
|= (1 << TGSI_FILE_INPUT
);
3961 bld
.gs_iface
= gs_iface
;
3962 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_gs_input
;
3963 bld
.bld_base
.op_actions
[TGSI_OPCODE_EMIT
].emit
= emit_vertex
;
3964 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDPRIM
].emit
= end_primitive
;
3966 max_output_vertices
=
3967 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
3968 if (!max_output_vertices
)
3969 max_output_vertices
= 32;
3971 bld
.max_output_vertices_vec
=
3972 lp_build_const_int_vec(gallivm
, bld
.bld_base
.int_bld
.type
,
3973 max_output_vertices
);
3976 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.int_bld
);
3978 bld
.system_values
= *system_values
;
3980 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
3983 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
3984 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
3985 debug_printf("11111111111111111111111111111 \n");
3986 tgsi_dump(tokens
, 0);
3987 lp_debug_dump_value(function
);
3988 debug_printf("2222222222222222222222222222 \n");
3992 LLVMModuleRef module
= LLVMGetGlobalParent(
3993 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
3994 LLVMDumpModule(module
);
3997 lp_exec_mask_fini(&bld
.exec_mask
);