1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_prim.h"
45 #include "tgsi/tgsi_dump.h"
46 #include "tgsi/tgsi_exec.h"
47 #include "tgsi/tgsi_info.h"
48 #include "tgsi/tgsi_parse.h"
49 #include "tgsi/tgsi_util.h"
50 #include "tgsi/tgsi_scan.h"
51 #include "tgsi/tgsi_strings.h"
52 #include "lp_bld_tgsi_action.h"
53 #include "lp_bld_type.h"
54 #include "lp_bld_const.h"
55 #include "lp_bld_arit.h"
56 #include "lp_bld_bitarit.h"
57 #include "lp_bld_gather.h"
58 #include "lp_bld_init.h"
59 #include "lp_bld_logic.h"
60 #include "lp_bld_swizzle.h"
61 #include "lp_bld_flow.h"
62 #include "lp_bld_quad.h"
63 #include "lp_bld_tgsi.h"
64 #include "lp_bld_limits.h"
65 #include "lp_bld_debug.h"
66 #include "lp_bld_printf.h"
67 #include "lp_bld_sample.h"
68 #include "lp_bld_struct.h"
70 /* SM 4.0 says that subroutines can nest 32 deep and
71 * we need one more for our main function */
72 #define LP_MAX_NUM_FUNCS 33
74 #define DUMP_GS_EMITS 0
77 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
81 * - take execution masks in consideration
82 * - debug control-flow instructions
84 #define DEBUG_EXECUTION 0
88 * Emit code to print a register value.
91 emit_dump_reg(struct gallivm_state
*gallivm
,
99 util_snprintf(buf
, sizeof buf
, " %s[%u].%c = ",
100 tgsi_file_name(file
),
101 index
, "xyzw"[chan
]);
103 lp_build_print_value(gallivm
, buf
, value
);
107 * Return the context for the current function.
108 * (always 'main', if shader doesn't do any function calls)
110 static inline struct function_ctx
*
111 func_ctx(struct lp_exec_mask
*mask
)
113 assert(mask
->function_stack_size
> 0);
114 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
115 return &mask
->function_stack
[mask
->function_stack_size
- 1];
119 * Returns true if we're in a loop.
120 * It's global, meaning that it returns true even if there's
121 * no loop inside the current function, but we were inside
122 * a loop inside another function, from which this one was called.
124 static inline boolean
125 mask_has_loop(struct lp_exec_mask
*mask
)
128 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
129 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
130 if (ctx
->loop_stack_size
> 0)
137 mask_vec(struct lp_build_tgsi_context
*bld_base
)
139 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
140 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
141 struct lp_exec_mask
*exec_mask
= &bld
->exec_mask
;
143 if (!exec_mask
->has_mask
) {
144 return lp_build_mask_value(bld
->mask
);
146 return LLVMBuildAnd(builder
, lp_build_mask_value(bld
->mask
),
147 exec_mask
->exec_mask
, "");
152 * Returns true if we're inside a switch statement.
153 * It's global, meaning that it returns true even if there's
154 * no switch in the current function, but we were inside
155 * a switch inside another function, from which this one was called.
157 static inline boolean
158 mask_has_switch(struct lp_exec_mask
*mask
)
161 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
162 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
163 if (ctx
->switch_stack_size
> 0)
170 * Returns true if we're inside a conditional.
171 * It's global, meaning that it returns true even if there's
172 * no conditional in the current function, but we were inside
173 * a conditional inside another function, from which this one was called.
175 static inline boolean
176 mask_has_cond(struct lp_exec_mask
*mask
)
179 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
180 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
181 if (ctx
->cond_stack_size
> 0)
189 * Initialize a function context at the specified index.
192 lp_exec_mask_function_init(struct lp_exec_mask
*mask
, int function_idx
)
194 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
195 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
196 struct function_ctx
*ctx
= &mask
->function_stack
[function_idx
];
198 ctx
->cond_stack_size
= 0;
199 ctx
->loop_stack_size
= 0;
200 ctx
->switch_stack_size
= 0;
202 if (function_idx
== 0) {
203 ctx
->ret_mask
= mask
->ret_mask
;
206 ctx
->loop_limiter
= lp_build_alloca(mask
->bld
->gallivm
,
207 int_type
, "looplimiter");
210 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
214 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
217 mask
->has_mask
= FALSE
;
218 mask
->ret_in_main
= FALSE
;
219 /* For the main function */
220 mask
->function_stack_size
= 1;
222 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
223 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
=
224 mask
->cond_mask
= mask
->switch_mask
=
225 LLVMConstAllOnes(mask
->int_vec_type
);
227 mask
->function_stack
= CALLOC(LP_MAX_NUM_FUNCS
,
228 sizeof(mask
->function_stack
[0]));
229 lp_exec_mask_function_init(mask
, 0);
233 lp_exec_mask_fini(struct lp_exec_mask
*mask
)
235 FREE(mask
->function_stack
);
238 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
240 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
241 boolean has_loop_mask
= mask_has_loop(mask
);
242 boolean has_cond_mask
= mask_has_cond(mask
);
243 boolean has_switch_mask
= mask_has_switch(mask
);
244 boolean has_ret_mask
= mask
->function_stack_size
> 1 ||
248 /*for loops we need to update the entire mask at runtime */
250 assert(mask
->break_mask
);
251 tmp
= LLVMBuildAnd(builder
,
255 mask
->exec_mask
= LLVMBuildAnd(builder
,
260 mask
->exec_mask
= mask
->cond_mask
;
262 if (has_switch_mask
) {
263 mask
->exec_mask
= LLVMBuildAnd(builder
,
270 mask
->exec_mask
= LLVMBuildAnd(builder
,
276 mask
->has_mask
= (has_cond_mask
||
282 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
285 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
286 struct function_ctx
*ctx
= func_ctx(mask
);
288 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
) {
289 ctx
->cond_stack_size
++;
292 if (ctx
->cond_stack_size
== 0 && mask
->function_stack_size
== 1) {
293 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
295 ctx
->cond_stack
[ctx
->cond_stack_size
++] = mask
->cond_mask
;
296 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
297 mask
->cond_mask
= LLVMBuildAnd(builder
,
301 lp_exec_mask_update(mask
);
304 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
306 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
307 struct function_ctx
*ctx
= func_ctx(mask
);
308 LLVMValueRef prev_mask
;
309 LLVMValueRef inv_mask
;
311 assert(ctx
->cond_stack_size
);
312 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
314 prev_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
- 1];
315 if (ctx
->cond_stack_size
== 1 && mask
->function_stack_size
== 1) {
316 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
319 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
321 mask
->cond_mask
= LLVMBuildAnd(builder
,
324 lp_exec_mask_update(mask
);
327 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
329 struct function_ctx
*ctx
= func_ctx(mask
);
330 assert(ctx
->cond_stack_size
);
331 --ctx
->cond_stack_size
;
332 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
334 mask
->cond_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
];
335 lp_exec_mask_update(mask
);
338 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
340 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
341 struct function_ctx
*ctx
= func_ctx(mask
);
343 if (ctx
->loop_stack_size
>= LP_MAX_TGSI_NESTING
) {
344 ++ctx
->loop_stack_size
;
348 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
350 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_LOOP
;
352 ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
= ctx
->loop_block
;
353 ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
354 ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
= mask
->break_mask
;
355 ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
= ctx
->break_var
;
356 ++ctx
->loop_stack_size
;
358 ctx
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
359 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
361 ctx
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
363 LLVMBuildBr(builder
, ctx
->loop_block
);
364 LLVMPositionBuilderAtEnd(builder
, ctx
->loop_block
);
366 mask
->break_mask
= LLVMBuildLoad(builder
, ctx
->break_var
, "");
368 lp_exec_mask_update(mask
);
371 static void lp_exec_break(struct lp_exec_mask
*mask
,
372 struct lp_build_tgsi_context
* bld_base
)
374 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
375 struct function_ctx
*ctx
= func_ctx(mask
);
377 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
378 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
382 mask
->break_mask
= LLVMBuildAnd(builder
,
384 exec_mask
, "break_full");
387 enum tgsi_opcode opcode
=
388 bld_base
->instructions
[bld_base
->pc
+ 1].Instruction
.Opcode
;
389 boolean break_always
= (opcode
== TGSI_OPCODE_ENDSWITCH
||
390 opcode
== TGSI_OPCODE_CASE
);
393 if (ctx
->switch_in_default
) {
395 * stop default execution but only if this is an unconditional switch.
396 * (The condition here is not perfect since dead code after break is
397 * allowed but should be sufficient since false negatives are just
398 * unoptimized - so we don't have to pre-evaluate that).
400 if(break_always
&& ctx
->switch_pc
) {
401 bld_base
->pc
= ctx
->switch_pc
;
407 mask
->switch_mask
= LLVMConstNull(mask
->bld
->int_vec_type
);
410 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
413 mask
->switch_mask
= LLVMBuildAnd(builder
,
415 exec_mask
, "break_switch");
419 lp_exec_mask_update(mask
);
422 static void lp_exec_continue(struct lp_exec_mask
*mask
)
424 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
425 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
429 mask
->cont_mask
= LLVMBuildAnd(builder
,
433 lp_exec_mask_update(mask
);
437 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
438 struct lp_exec_mask
*mask
)
440 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
441 struct function_ctx
*ctx
= func_ctx(mask
);
442 LLVMBasicBlockRef endloop
;
443 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
444 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
445 mask
->bld
->type
.width
*
446 mask
->bld
->type
.length
);
447 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
449 assert(mask
->break_mask
);
452 assert(ctx
->loop_stack_size
);
453 if (ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
454 --ctx
->loop_stack_size
;
459 * Restore the cont_mask, but don't pop
461 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
- 1].cont_mask
;
462 lp_exec_mask_update(mask
);
465 * Unlike the continue mask, the break_mask must be preserved across loop
468 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
470 /* Decrement the loop limiter */
471 limiter
= LLVMBuildLoad(builder
, ctx
->loop_limiter
, "");
473 limiter
= LLVMBuildSub(
476 LLVMConstInt(int_type
, 1, false),
479 LLVMBuildStore(builder
, limiter
, ctx
->loop_limiter
);
481 /* i1cond = (mask != 0) */
482 i1cond
= LLVMBuildICmp(
485 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
486 LLVMConstNull(reg_type
), "i1cond");
488 /* i2cond = (looplimiter > 0) */
489 i2cond
= LLVMBuildICmp(
493 LLVMConstNull(int_type
), "i2cond");
495 /* if( i1cond && i2cond ) */
496 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
498 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
500 LLVMBuildCondBr(builder
,
501 icond
, ctx
->loop_block
, endloop
);
503 LLVMPositionBuilderAtEnd(builder
, endloop
);
505 assert(ctx
->loop_stack_size
);
506 --ctx
->loop_stack_size
;
507 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
;
508 mask
->break_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
;
509 ctx
->loop_block
= ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
;
510 ctx
->break_var
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
;
511 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+
512 ctx
->switch_stack_size
];
514 lp_exec_mask_update(mask
);
517 static void lp_exec_switch(struct lp_exec_mask
*mask
,
518 LLVMValueRef switchval
)
520 struct function_ctx
*ctx
= func_ctx(mask
);
522 if (ctx
->switch_stack_size
>= LP_MAX_TGSI_NESTING
||
523 ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
524 ctx
->switch_stack_size
++;
528 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
530 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_SWITCH
;
532 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
= mask
->switch_mask
;
533 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
= ctx
->switch_val
;
534 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
= ctx
->switch_mask_default
;
535 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
= ctx
->switch_in_default
;
536 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
= ctx
->switch_pc
;
537 ctx
->switch_stack_size
++;
539 mask
->switch_mask
= LLVMConstNull(mask
->int_vec_type
);
540 ctx
->switch_val
= switchval
;
541 ctx
->switch_mask_default
= LLVMConstNull(mask
->int_vec_type
);
542 ctx
->switch_in_default
= false;
545 lp_exec_mask_update(mask
);
548 static void lp_exec_endswitch(struct lp_exec_mask
*mask
,
549 struct lp_build_tgsi_context
* bld_base
)
551 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
552 struct function_ctx
*ctx
= func_ctx(mask
);
554 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
555 ctx
->switch_stack_size
--;
559 /* check if there's deferred default if so do it now */
560 if (ctx
->switch_pc
&& !ctx
->switch_in_default
) {
561 LLVMValueRef prevmask
, defaultmask
;
563 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
564 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
565 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
566 ctx
->switch_in_default
= true;
568 lp_exec_mask_update(mask
);
570 assert(bld_base
->instructions
[ctx
->switch_pc
- 1].Instruction
.Opcode
==
571 TGSI_OPCODE_DEFAULT
);
573 tmp_pc
= bld_base
->pc
;
574 bld_base
->pc
= ctx
->switch_pc
;
576 * re-purpose switch_pc to point to here again, since we stop execution of
577 * the deferred default after next break.
579 ctx
->switch_pc
= tmp_pc
- 1;
584 else if (ctx
->switch_pc
&& ctx
->switch_in_default
) {
585 assert(bld_base
->pc
== ctx
->switch_pc
+ 1);
588 ctx
->switch_stack_size
--;
589 mask
->switch_mask
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
;
590 ctx
->switch_val
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
;
591 ctx
->switch_mask_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
;
592 ctx
->switch_in_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
;
593 ctx
->switch_pc
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
;
595 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
];
597 lp_exec_mask_update(mask
);
600 static void lp_exec_case(struct lp_exec_mask
*mask
,
601 LLVMValueRef caseval
)
603 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
604 struct function_ctx
*ctx
= func_ctx(mask
);
606 LLVMValueRef casemask
, prevmask
;
608 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
612 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
613 if (!ctx
->switch_in_default
) {
614 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
615 casemask
= lp_build_cmp(mask
->bld
, PIPE_FUNC_EQUAL
, caseval
, ctx
->switch_val
);
616 ctx
->switch_mask_default
= LLVMBuildOr(builder
, casemask
,
617 ctx
->switch_mask_default
, "sw_default_mask");
618 casemask
= LLVMBuildOr(builder
, casemask
, mask
->switch_mask
, "");
619 mask
->switch_mask
= LLVMBuildAnd(builder
, casemask
, prevmask
, "sw_mask");
621 lp_exec_mask_update(mask
);
626 * Analyse default statement in a switch.
627 * \return true if default is last statement, false otherwise
628 * \param default_pc_start contains pc of instruction to jump to
629 * if default wasn't last but there's no
630 * fallthrough into default.
632 static boolean
default_analyse_is_last(struct lp_exec_mask
*mask
,
633 struct lp_build_tgsi_context
* bld_base
,
634 int *default_pc_start
)
636 unsigned pc
= bld_base
->pc
;
637 struct function_ctx
*ctx
= func_ctx(mask
);
638 int curr_switch_stack
= ctx
->switch_stack_size
;
640 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
644 /* skip over case statements which are together with default */
645 while (bld_base
->instructions
[pc
].Instruction
.Opcode
== TGSI_OPCODE_CASE
) {
649 while (pc
!= ~0u && pc
< bld_base
->num_instructions
) {
650 enum tgsi_opcode opcode
= bld_base
->instructions
[pc
].Instruction
.Opcode
;
652 case TGSI_OPCODE_CASE
:
653 if (curr_switch_stack
== ctx
->switch_stack_size
) {
654 *default_pc_start
= pc
- 1;
658 case TGSI_OPCODE_SWITCH
:
661 case TGSI_OPCODE_ENDSWITCH
:
662 if (curr_switch_stack
== ctx
->switch_stack_size
) {
663 *default_pc_start
= pc
- 1;
673 /* should never arrive here */
678 static void lp_exec_default(struct lp_exec_mask
*mask
,
679 struct lp_build_tgsi_context
* bld_base
)
681 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
682 struct function_ctx
*ctx
= func_ctx(mask
);
685 boolean default_is_last
;
687 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
692 * This is a messy opcode, because it may not be always at the end and
693 * there can be fallthrough in and out of it.
696 default_is_last
= default_analyse_is_last(mask
, bld_base
, &default_exec_pc
);
698 * If it is last statement in switch (note that case statements appearing
699 * "at the same time" as default don't change that) everything is just fine,
700 * update switch mask and go on. This means we can handle default with
701 * fallthrough INTO it without overhead, if it is last.
703 if (default_is_last
) {
704 LLVMValueRef prevmask
, defaultmask
;
705 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
706 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
707 defaultmask
= LLVMBuildOr(builder
, defaultmask
, mask
->switch_mask
, "");
708 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
709 ctx
->switch_in_default
= true;
711 lp_exec_mask_update(mask
);
715 * Technically, "case" immediately before default isn't really a
716 * fallthrough, however we still have to count them as such as we
717 * already have updated the masks.
718 * If that happens in practice could add a switch optimizer pass
719 * which just gets rid of all case statements appearing together with
720 * default (or could do switch analysis at switch start time instead).
722 enum tgsi_opcode opcode
=
723 bld_base
->instructions
[bld_base
->pc
- 1].Instruction
.Opcode
;
724 boolean ft_into
= (opcode
!= TGSI_OPCODE_BRK
&&
725 opcode
!= TGSI_OPCODE_SWITCH
);
727 * If it is not last statement and there was no fallthrough into it,
728 * we record the PC and continue execution at next case (again, those
729 * case encountered at the same time don't count). At endswitch
730 * time, we update switchmask, and go back executing the code we skipped
731 * until the next break (possibly re-executing some code with changed mask
732 * if there was a fallthrough out of default).
733 * Finally, if it is not last statement and there was a fallthrough into it,
734 * do the same as with the former case, except instead of skipping the code
735 * just execute it without updating the mask, then go back and re-execute.
737 ctx
->switch_pc
= bld_base
->pc
;
739 bld_base
->pc
= default_exec_pc
;
745 /* stores val into an address pointed to by dst_ptr.
746 * mask->exec_mask is used to figure out which bits of val
747 * should be stored into the address
748 * (0 means don't store this bit, 1 means do store).
750 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
751 struct lp_build_context
*bld_store
,
753 LLVMValueRef dst_ptr
)
755 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
756 LLVMValueRef exec_mask
= mask
->has_mask
? mask
->exec_mask
: NULL
;
758 assert(lp_check_value(bld_store
->type
, val
));
759 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr
)) == LLVMPointerTypeKind
);
760 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr
)) == LLVMTypeOf(val
) ||
761 LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(dst_ptr
))) == LLVMArrayTypeKind
);
764 LLVMValueRef res
, dst
;
766 dst
= LLVMBuildLoad(builder
, dst_ptr
, "");
767 res
= lp_build_select(bld_store
, exec_mask
, val
, dst
);
768 LLVMBuildStore(builder
, res
, dst_ptr
);
770 LLVMBuildStore(builder
, val
, dst_ptr
);
773 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
777 if (mask
->function_stack_size
>= LP_MAX_NUM_FUNCS
) {
781 lp_exec_mask_function_init(mask
, mask
->function_stack_size
);
782 mask
->function_stack
[mask
->function_stack_size
].pc
= *pc
;
783 mask
->function_stack
[mask
->function_stack_size
].ret_mask
= mask
->ret_mask
;
784 mask
->function_stack_size
++;
788 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
790 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
791 struct function_ctx
*ctx
= func_ctx(mask
);
792 LLVMValueRef exec_mask
;
794 if (ctx
->cond_stack_size
== 0 &&
795 ctx
->loop_stack_size
== 0 &&
796 ctx
->switch_stack_size
== 0 &&
797 mask
->function_stack_size
== 1) {
798 /* returning from main() */
803 if (mask
->function_stack_size
== 1) {
805 * This requires special handling since we need to ensure
806 * we don't drop the mask even if we have no call stack
807 * (e.g. after a ret in a if clause after the endif)
809 mask
->ret_in_main
= TRUE
;
812 exec_mask
= LLVMBuildNot(builder
,
816 mask
->ret_mask
= LLVMBuildAnd(builder
,
818 exec_mask
, "ret_full");
820 lp_exec_mask_update(mask
);
823 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
827 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
829 struct function_ctx
*ctx
;
831 assert(mask
->function_stack_size
> 1);
832 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
834 ctx
= func_ctx(mask
);
835 mask
->function_stack_size
--;
838 mask
->ret_mask
= ctx
->ret_mask
;
840 lp_exec_mask_update(mask
);
845 get_file_ptr(struct lp_build_tgsi_soa_context
*bld
,
850 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
851 LLVMValueRef (*array_of_vars
)[TGSI_NUM_CHANNELS
];
852 LLVMValueRef var_of_array
;
855 case TGSI_FILE_TEMPORARY
:
856 array_of_vars
= bld
->temps
;
857 var_of_array
= bld
->temps_array
;
859 case TGSI_FILE_OUTPUT
:
860 array_of_vars
= bld
->outputs
;
861 var_of_array
= bld
->outputs_array
;
870 if (bld
->indirect_files
& (1 << file
)) {
871 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
872 if (LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(var_of_array
))) == LLVMArrayTypeKind
) {
874 gep
[0] = lp_build_const_int32(bld
->bld_base
.base
.gallivm
, 0);
876 return LLVMBuildGEP(builder
, var_of_array
, gep
, 2, "");
878 return LLVMBuildGEP(builder
, var_of_array
, &lindex
, 1, "");
882 assert(index
<= bld
->bld_base
.info
->file_max
[file
]);
883 return array_of_vars
[index
][chan
];
889 * Return pointer to a temporary register channel (src or dest).
890 * Note that indirect addressing cannot be handled here.
891 * \param index which temporary register
892 * \param chan which channel of the temp register.
895 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
899 return get_file_ptr(bld
, TGSI_FILE_TEMPORARY
, index
, chan
);
903 * Return pointer to a output register channel (src or dest).
904 * Note that indirect addressing cannot be handled here.
905 * \param index which output register
906 * \param chan which channel of the output register.
909 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
913 return get_file_ptr(bld
, TGSI_FILE_OUTPUT
, index
, chan
);
917 * If we have indirect addressing in outputs copy our alloca array
918 * to the outputs slots specified by the caller to make sure
919 * our outputs are delivered consistently via the same interface.
922 gather_outputs(struct lp_build_tgsi_soa_context
* bld
)
924 if ((bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
925 unsigned index
, chan
;
926 assert(bld
->bld_base
.info
->num_outputs
<=
927 bld
->bld_base
.info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
928 for (index
= 0; index
< bld
->bld_base
.info
->num_outputs
; ++index
) {
929 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
930 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
938 * XXX the lp_build_gather() function should be capable of doing this
939 * with a little work.
942 build_gather(struct lp_build_tgsi_context
*bld_base
,
943 LLVMValueRef base_ptr
,
944 LLVMValueRef indexes
,
945 LLVMValueRef overflow_mask
,
946 LLVMValueRef indexes2
)
948 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
949 LLVMBuilderRef builder
= gallivm
->builder
;
950 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
951 struct lp_build_context
*bld
= &bld_base
->base
;
956 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
960 * overflow_mask is a vector telling us which channels
961 * in the vector overflowed. We use the overflow behavior for
962 * constant buffers which is defined as:
963 * Out of bounds access to constant buffer returns 0 in all
964 * components. Out of bounds behavior is always with respect
965 * to the size of the buffer bound at that slot.
970 * We avoid per-element control flow here (also due to llvm going crazy,
971 * though I suspect it's better anyway since overflow is likely rare).
972 * Note that since we still fetch from buffers even if num_elements was
973 * zero (in this case we'll fetch from index zero) the jit func callers
974 * MUST provide valid fake constant buffers of size 4x32 (the values do
975 * not matter), otherwise we'd still need (not per element though)
978 indexes
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes
);
980 indexes2
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes2
);
984 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
986 for (i
= 0; i
< bld
->type
.length
* (indexes2
? 2 : 1); i
++) {
989 LLVMValueRef scalar_ptr
, scalar
;
991 di
= lp_build_const_int32(bld
->gallivm
, i
);
993 si
= lp_build_const_int32(bld
->gallivm
, i
>> 1);
997 if (indexes2
&& (i
& 1)) {
998 index
= LLVMBuildExtractElement(builder
,
1001 index
= LLVMBuildExtractElement(builder
,
1004 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
1005 &index
, 1, "gather_ptr");
1006 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1008 res
= LLVMBuildInsertElement(builder
, res
, scalar
, di
, "");
1011 if (overflow_mask
) {
1013 res
= LLVMBuildBitCast(builder
, res
, bld_base
->dbl_bld
.vec_type
, "");
1014 overflow_mask
= LLVMBuildSExt(builder
, overflow_mask
,
1015 bld_base
->dbl_bld
.int_vec_type
, "");
1016 res
= lp_build_select(&bld_base
->dbl_bld
, overflow_mask
,
1017 bld_base
->dbl_bld
.zero
, res
);
1019 res
= lp_build_select(bld
, overflow_mask
, bld
->zero
, res
);
1027 * Scatter/store vector.
1030 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
1031 LLVMValueRef base_ptr
,
1032 LLVMValueRef indexes
,
1033 LLVMValueRef values
,
1034 struct lp_exec_mask
*mask
)
1036 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1037 LLVMBuilderRef builder
= gallivm
->builder
;
1039 LLVMValueRef pred
= mask
->has_mask
? mask
->exec_mask
: NULL
;
1042 * Loop over elements of index_vec, store scalar value.
1044 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1045 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1046 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
1047 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
1048 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
1049 LLVMValueRef scalar_pred
= pred
?
1050 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
1053 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
1054 ii
, val
, index
, scalar_ptr
);
1057 LLVMValueRef real_val
, dst_val
;
1058 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1059 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
1060 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
1063 LLVMBuildStore(builder
, val
, scalar_ptr
);
1070 * Read the current value of the ADDR register, convert the floats to
1071 * ints, add the base index and return the vector of offsets.
1072 * The offsets will be used to index into the constant buffer or
1073 * temporary register file.
1076 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
1077 unsigned reg_file
, unsigned reg_index
,
1078 const struct tgsi_ind_register
*indirect_reg
,
1081 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1082 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
1083 /* always use X component of address register */
1084 unsigned swizzle
= indirect_reg
->Swizzle
;
1087 LLVMValueRef max_index
;
1090 assert(bld
->indirect_files
& (1 << reg_file
));
1092 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
1094 assert(swizzle
< 4);
1095 switch (indirect_reg
->File
) {
1096 case TGSI_FILE_ADDRESS
:
1097 rel
= LLVMBuildLoad(builder
,
1098 bld
->addr
[indirect_reg
->Index
][swizzle
],
1100 /* ADDR LLVM values already have LLVM integer type. */
1102 case TGSI_FILE_TEMPORARY
:
1103 rel
= lp_get_temp_ptr_soa(bld
, indirect_reg
->Index
, swizzle
);
1104 rel
= LLVMBuildLoad(builder
, rel
, "load temp reg");
1105 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1106 * value actually stored is expected to be an integer */
1107 rel
= LLVMBuildBitCast(builder
, rel
, uint_bld
->vec_type
, "");
1111 rel
= uint_bld
->zero
;
1114 index
= lp_build_add(uint_bld
, base
, rel
);
1117 * emit_fetch_constant handles constant buffer overflow so this code
1118 * is pointless for them.
1119 * Furthermore the D3D10 spec in section 6.5 says:
1120 * If the constant buffer bound to a slot is larger than the size
1121 * declared in the shader for that slot, implementations are allowed
1122 * to return incorrect data (not necessarily 0) for indices that are
1123 * larger than the declared size but smaller than the buffer size.
1125 if (reg_file
!= TGSI_FILE_CONSTANT
) {
1126 assert(index_limit
>= 0);
1127 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
1128 uint_bld
->type
, index_limit
);
1130 assert(!uint_bld
->type
.sign
);
1131 index
= lp_build_min(uint_bld
, index
, max_index
);
1137 static struct lp_build_context
*
1138 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
1139 enum tgsi_opcode_type stype
)
1141 struct lp_build_context
*bld_fetch
;
1144 case TGSI_TYPE_FLOAT
:
1145 case TGSI_TYPE_UNTYPED
:
1146 bld_fetch
= &bld_base
->base
;
1148 case TGSI_TYPE_UNSIGNED
:
1149 bld_fetch
= &bld_base
->uint_bld
;
1151 case TGSI_TYPE_SIGNED
:
1152 bld_fetch
= &bld_base
->int_bld
;
1154 case TGSI_TYPE_DOUBLE
:
1155 bld_fetch
= &bld_base
->dbl_bld
;
1157 case TGSI_TYPE_UNSIGNED64
:
1158 bld_fetch
= &bld_base
->uint64_bld
;
1160 case TGSI_TYPE_SIGNED64
:
1161 bld_fetch
= &bld_base
->int64_bld
;
1163 case TGSI_TYPE_VOID
:
1173 get_soa_array_offsets(struct lp_build_context
*uint_bld
,
1174 LLVMValueRef indirect_index
,
1175 unsigned chan_index
,
1176 boolean need_perelement_offset
)
1178 struct gallivm_state
*gallivm
= uint_bld
->gallivm
;
1179 LLVMValueRef chan_vec
=
1180 lp_build_const_int_vec(uint_bld
->gallivm
, uint_bld
->type
, chan_index
);
1181 LLVMValueRef length_vec
=
1182 lp_build_const_int_vec(gallivm
, uint_bld
->type
, uint_bld
->type
.length
);
1183 LLVMValueRef index_vec
;
1185 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1186 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1187 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1188 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1190 if (need_perelement_offset
) {
1191 LLVMValueRef pixel_offsets
;
1193 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1194 pixel_offsets
= uint_bld
->undef
;
1195 for (i
= 0; i
< uint_bld
->type
.length
; i
++) {
1196 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1197 pixel_offsets
= LLVMBuildInsertElement(gallivm
->builder
, pixel_offsets
,
1200 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1206 emit_fetch_constant(
1207 struct lp_build_tgsi_context
* bld_base
,
1208 const struct tgsi_full_src_register
* reg
,
1209 enum tgsi_opcode_type stype
,
1210 unsigned swizzle_in
)
1212 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1213 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1214 LLVMBuilderRef builder
= gallivm
->builder
;
1215 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1216 unsigned dimension
= 0;
1217 LLVMValueRef consts_ptr
;
1218 LLVMValueRef num_consts
;
1220 unsigned swizzle
= swizzle_in
& 0xffff;
1222 /* XXX: Handle fetching xyzw components as a vector */
1223 assert(swizzle
!= ~0u);
1225 if (reg
->Register
.Dimension
) {
1226 assert(!reg
->Dimension
.Indirect
);
1227 dimension
= reg
->Dimension
.Index
;
1228 assert(dimension
< LP_MAX_TGSI_CONST_BUFFERS
);
1231 consts_ptr
= bld
->consts
[dimension
];
1232 num_consts
= bld
->consts_sizes
[dimension
];
1234 if (reg
->Register
.Indirect
) {
1235 LLVMValueRef indirect_index
;
1236 LLVMValueRef swizzle_vec
=
1237 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
1238 LLVMValueRef index_vec
; /* index into the const buffer */
1239 LLVMValueRef overflow_mask
;
1240 LLVMValueRef index_vec2
= NULL
;
1242 indirect_index
= get_indirect_index(bld
,
1244 reg
->Register
.Index
,
1246 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1248 /* All fetches are from the same constant buffer, so
1249 * we need to propagate the size to a vector to do a
1250 * vector comparison */
1251 num_consts
= lp_build_broadcast_scalar(uint_bld
, num_consts
);
1252 /* Construct a boolean vector telling us which channels
1253 * overflow the bound constant buffer */
1254 overflow_mask
= lp_build_compare(gallivm
, uint_bld
->type
, PIPE_FUNC_GEQUAL
,
1255 indirect_index
, num_consts
);
1257 /* index_vec = indirect_index * 4 + swizzle */
1258 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1259 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
1261 if (tgsi_type_is_64bit(stype
)) {
1262 LLVMValueRef swizzle_vec2
;
1263 swizzle_vec2
= lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle_in
>> 16);
1264 index_vec2
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1265 index_vec2
= lp_build_add(uint_bld
, index_vec2
, swizzle_vec2
);
1267 /* Gather values from the constant buffer */
1268 res
= build_gather(bld_base
, consts_ptr
, index_vec
, overflow_mask
, index_vec2
);
1271 LLVMValueRef index
; /* index into the const buffer */
1272 LLVMValueRef scalar
, scalar_ptr
;
1273 struct lp_build_context
*bld_broad
= &bld_base
->base
;
1274 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1276 scalar_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1279 if (tgsi_type_is_64bit(stype
) && ((swizzle_in
>> 16) != swizzle
+ 1)) {
1281 LLVMValueRef scalar2
, scalar2_ptr
;
1282 LLVMValueRef shuffles
[2];
1283 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1285 scalar2_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1288 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1289 scalar2
= LLVMBuildLoad(builder
, scalar2_ptr
, "");
1290 shuffles
[0] = lp_build_const_int32(gallivm
, 0);
1291 shuffles
[1] = lp_build_const_int32(gallivm
, 1);
1293 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
1294 res
= LLVMBuildInsertElement(builder
, res
, scalar
, shuffles
[0], "");
1295 res
= LLVMBuildInsertElement(builder
, res
, scalar2
, shuffles
[1], "");
1297 if (stype
== TGSI_TYPE_DOUBLE
) {
1298 LLVMTypeRef dptr_type
= LLVMPointerType(LLVMDoubleTypeInContext(gallivm
->context
), 0);
1299 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, dptr_type
, "");
1300 bld_broad
= &bld_base
->dbl_bld
;
1301 } else if (stype
== TGSI_TYPE_UNSIGNED64
) {
1302 LLVMTypeRef u64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1303 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, u64ptr_type
, "");
1304 bld_broad
= &bld_base
->uint64_bld
;
1305 } else if (stype
== TGSI_TYPE_SIGNED64
) {
1306 LLVMTypeRef i64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1307 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, i64ptr_type
, "");
1308 bld_broad
= &bld_base
->int64_bld
;
1310 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1311 res
= lp_build_broadcast_scalar(bld_broad
, scalar
);
1316 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| stype
== TGSI_TYPE_DOUBLE
|| stype
== TGSI_TYPE_SIGNED64
|| stype
== TGSI_TYPE_UNSIGNED64
) {
1317 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1318 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1325 * Fetch 64-bit values from two separate channels.
1326 * 64-bit values are stored split across two channels, like xy and zw.
1327 * This function creates a set of vec_length*2 floats,
1328 * extracts the values from the two channels,
1329 * puts them in the correct place, then casts to vec_length 64-bits.
1333 struct lp_build_tgsi_context
* bld_base
,
1334 enum tgsi_opcode_type stype
,
1336 LLVMValueRef input2
)
1338 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1339 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1340 LLVMBuilderRef builder
= gallivm
->builder
;
1342 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1344 LLVMValueRef shuffles
[2 * (LP_MAX_VECTOR_WIDTH
/32)];
1345 int len
= bld_base
->base
.type
.length
* 2;
1346 assert(len
<= (2 * (LP_MAX_VECTOR_WIDTH
/32)));
1348 for (i
= 0; i
< bld_base
->base
.type
.length
* 2; i
+=2) {
1349 shuffles
[i
] = lp_build_const_int32(gallivm
, i
/ 2);
1350 shuffles
[i
+ 1] = lp_build_const_int32(gallivm
, i
/ 2 + bld_base
->base
.type
.length
);
1352 res
= LLVMBuildShuffleVector(builder
, input
, input2
, LLVMConstVector(shuffles
, len
), "");
1354 return LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1358 emit_fetch_immediate(
1359 struct lp_build_tgsi_context
* bld_base
,
1360 const struct tgsi_full_src_register
* reg
,
1361 enum tgsi_opcode_type stype
,
1362 unsigned swizzle_in
)
1364 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1365 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1366 LLVMBuilderRef builder
= gallivm
->builder
;
1367 LLVMValueRef res
= NULL
;
1368 unsigned swizzle
= swizzle_in
& 0xffff;
1370 if (bld
->use_immediates_array
|| reg
->Register
.Indirect
) {
1371 LLVMValueRef imms_array
;
1372 LLVMTypeRef fptr_type
;
1374 /* cast imms_array pointer to float* */
1375 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1376 imms_array
= LLVMBuildBitCast(builder
, bld
->imms_array
, fptr_type
, "");
1378 if (reg
->Register
.Indirect
) {
1379 LLVMValueRef indirect_index
;
1380 LLVMValueRef index_vec
; /* index into the immediate register array */
1381 LLVMValueRef index_vec2
= NULL
;
1382 indirect_index
= get_indirect_index(bld
,
1384 reg
->Register
.Index
,
1386 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1388 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1389 * immediates are stored as full vectors (FIXME??? - might be better
1390 * to store them the same as constants) but all elements are the same
1393 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1397 if (tgsi_type_is_64bit(stype
))
1398 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1402 /* Gather values from the immediate register array */
1403 res
= build_gather(bld_base
, imms_array
, index_vec
, NULL
, index_vec2
);
1405 LLVMValueRef gep
[2];
1406 gep
[0] = lp_build_const_int32(gallivm
, 0);
1407 gep
[1] = lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1408 LLVMValueRef imms_ptr
= LLVMBuildGEP(builder
,
1409 bld
->imms_array
, gep
, 2, "");
1410 res
= LLVMBuildLoad(builder
, imms_ptr
, "");
1412 if (tgsi_type_is_64bit(stype
)) {
1413 LLVMValueRef imms_ptr2
;
1415 gep
[1] = lp_build_const_int32(gallivm
,
1416 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1417 imms_ptr2
= LLVMBuildGEP(builder
,
1418 bld
->imms_array
, gep
, 2, "");
1419 res2
= LLVMBuildLoad(builder
, imms_ptr2
, "");
1420 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1425 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
1426 if (tgsi_type_is_64bit(stype
))
1427 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->immediates
[reg
->Register
.Index
][swizzle_in
>> 16]);
1430 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1431 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1432 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1439 struct lp_build_tgsi_context
* bld_base
,
1440 const struct tgsi_full_src_register
* reg
,
1441 enum tgsi_opcode_type stype
,
1442 unsigned swizzle_in
)
1444 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1445 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1446 LLVMBuilderRef builder
= gallivm
->builder
;
1448 unsigned swizzle
= swizzle_in
& 0xffff;
1450 if (reg
->Register
.Indirect
) {
1451 LLVMValueRef indirect_index
;
1452 LLVMValueRef index_vec
; /* index into the input reg array */
1453 LLVMValueRef index_vec2
= NULL
;
1454 LLVMValueRef inputs_array
;
1455 LLVMTypeRef fptr_type
;
1457 indirect_index
= get_indirect_index(bld
,
1459 reg
->Register
.Index
,
1461 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1463 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1467 if (tgsi_type_is_64bit(stype
)) {
1468 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1473 /* cast inputs_array pointer to float* */
1474 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1475 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
, fptr_type
, "");
1477 /* Gather values from the input register array */
1478 res
= build_gather(bld_base
, inputs_array
, index_vec
, NULL
, index_vec2
);
1480 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
1481 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1482 reg
->Register
.Index
* 4 + swizzle
);
1483 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
1484 bld
->inputs_array
, &lindex
, 1, "");
1486 res
= LLVMBuildLoad(builder
, input_ptr
, "");
1487 if (tgsi_type_is_64bit(stype
)) {
1488 LLVMValueRef lindex1
;
1489 LLVMValueRef input_ptr2
;
1492 lindex1
= lp_build_const_int32(gallivm
,
1493 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1494 input_ptr2
= LLVMBuildGEP(builder
,
1495 bld
->inputs_array
, &lindex1
, 1, "");
1496 res2
= LLVMBuildLoad(builder
, input_ptr2
, "");
1497 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1501 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
1502 if (tgsi_type_is_64bit(stype
))
1503 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->inputs
[reg
->Register
.Index
][swizzle_in
>> 16]);
1509 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1510 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1511 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1519 emit_fetch_gs_input(
1520 struct lp_build_tgsi_context
* bld_base
,
1521 const struct tgsi_full_src_register
* reg
,
1522 enum tgsi_opcode_type stype
,
1523 unsigned swizzle_in
)
1525 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1526 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1527 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1528 LLVMBuilderRef builder
= gallivm
->builder
;
1529 LLVMValueRef attrib_index
= NULL
;
1530 LLVMValueRef vertex_index
= NULL
;
1531 unsigned swizzle
= swizzle_in
& 0xffff;
1532 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
);
1535 if (info
->input_semantic_name
[reg
->Register
.Index
] == TGSI_SEMANTIC_PRIMID
) {
1536 /* This is really a system value not a regular input */
1537 assert(!reg
->Register
.Indirect
);
1538 assert(!reg
->Dimension
.Indirect
);
1539 res
= bld
->system_values
.prim_id
;
1540 if (stype
!= TGSI_TYPE_UNSIGNED
&& stype
!= TGSI_TYPE_SIGNED
) {
1541 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1546 if (reg
->Register
.Indirect
) {
1548 * XXX: this is possibly not quite the right value, since file_max may be
1549 * larger than the max attrib index, due to it being the max of declared
1550 * inputs AND the max vertices per prim (which is 6 for tri adj).
1551 * It should however be safe to use (since we always allocate
1552 * PIPE_MAX_SHADER_INPUTS (80) for it, which is overallocated quite a bit).
1554 int index_limit
= info
->file_max
[reg
->Register
.File
];
1555 attrib_index
= get_indirect_index(bld
,
1557 reg
->Register
.Index
,
1561 attrib_index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
);
1564 if (reg
->Dimension
.Indirect
) {
1566 * A fixed 6 should do as well (which is what we allocate).
1568 int index_limit
= u_vertices_per_prim(info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
]);
1569 vertex_index
= get_indirect_index(bld
,
1571 reg
->Dimension
.Index
,
1575 vertex_index
= lp_build_const_int32(gallivm
, reg
->Dimension
.Index
);
1578 res
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1579 reg
->Dimension
.Indirect
,
1581 reg
->Register
.Indirect
,
1586 if (tgsi_type_is_64bit(stype
)) {
1587 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle_in
>> 16);
1589 res2
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1590 reg
->Dimension
.Indirect
,
1592 reg
->Register
.Indirect
,
1596 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1597 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1598 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1599 } else if (stype
== TGSI_TYPE_SIGNED
) {
1600 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1607 emit_fetch_temporary(
1608 struct lp_build_tgsi_context
* bld_base
,
1609 const struct tgsi_full_src_register
* reg
,
1610 enum tgsi_opcode_type stype
,
1611 unsigned swizzle_in
)
1613 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1614 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1615 LLVMBuilderRef builder
= gallivm
->builder
;
1617 unsigned swizzle
= swizzle_in
& 0xffff;
1619 if (reg
->Register
.Indirect
) {
1620 LLVMValueRef indirect_index
;
1621 LLVMValueRef index_vec
, index_vec2
= NULL
; /* index into the temp reg array */
1622 LLVMValueRef temps_array
;
1623 LLVMTypeRef fptr_type
;
1625 indirect_index
= get_indirect_index(bld
,
1627 reg
->Register
.Index
,
1629 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1631 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1635 if (tgsi_type_is_64bit(stype
)) {
1636 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1642 /* cast temps_array pointer to float* */
1643 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1644 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1646 /* Gather values from the temporary register array */
1647 res
= build_gather(bld_base
, temps_array
, index_vec
, NULL
, index_vec2
);
1650 LLVMValueRef temp_ptr
;
1651 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
1652 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1654 if (tgsi_type_is_64bit(stype
)) {
1655 LLVMValueRef temp_ptr2
, res2
;
1657 temp_ptr2
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle_in
>> 16);
1658 res2
= LLVMBuildLoad(builder
, temp_ptr2
, "");
1659 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1663 if (stype
== TGSI_TYPE_SIGNED
||
1664 stype
== TGSI_TYPE_UNSIGNED
||
1665 stype
== TGSI_TYPE_DOUBLE
||
1666 stype
== TGSI_TYPE_SIGNED64
||
1667 stype
== TGSI_TYPE_UNSIGNED64
) {
1668 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1669 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1676 emit_fetch_system_value(
1677 struct lp_build_tgsi_context
* bld_base
,
1678 const struct tgsi_full_src_register
* reg
,
1679 enum tgsi_opcode_type stype
,
1680 unsigned swizzle_in
)
1682 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1683 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1684 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1685 LLVMBuilderRef builder
= gallivm
->builder
;
1687 enum tgsi_opcode_type atype
; // Actual type of the value
1689 assert(!reg
->Register
.Indirect
);
1691 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
1692 case TGSI_SEMANTIC_INSTANCEID
:
1693 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
1694 atype
= TGSI_TYPE_UNSIGNED
;
1697 case TGSI_SEMANTIC_VERTEXID
:
1698 res
= bld
->system_values
.vertex_id
;
1699 atype
= TGSI_TYPE_UNSIGNED
;
1702 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
1703 res
= bld
->system_values
.vertex_id_nobase
;
1704 atype
= TGSI_TYPE_UNSIGNED
;
1707 case TGSI_SEMANTIC_BASEVERTEX
:
1708 res
= bld
->system_values
.basevertex
;
1709 atype
= TGSI_TYPE_UNSIGNED
;
1712 case TGSI_SEMANTIC_PRIMID
:
1713 res
= bld
->system_values
.prim_id
;
1714 atype
= TGSI_TYPE_UNSIGNED
;
1717 case TGSI_SEMANTIC_INVOCATIONID
:
1718 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.invocation_id
);
1719 atype
= TGSI_TYPE_UNSIGNED
;
1723 assert(!"unexpected semantic in emit_fetch_system_value");
1724 res
= bld_base
->base
.zero
;
1725 atype
= TGSI_TYPE_FLOAT
;
1729 if (atype
!= stype
) {
1730 if (stype
== TGSI_TYPE_FLOAT
) {
1731 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1732 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1733 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1734 } else if (stype
== TGSI_TYPE_SIGNED
) {
1735 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1743 * Register fetch with derivatives.
1747 struct lp_build_tgsi_soa_context
*bld
,
1756 /* TODO: use interpolation coeffs for inputs */
1759 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
1762 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
1766 * store an array of vec-length 64-bit into two arrays of vec_length floats
1768 * value is d0, d1, d2, d3 etc.
1769 * each 64-bit has high and low pieces x, y
1770 * so gets stored into the separate channels as:
1771 * chan_ptr = d0.x, d1.x, d2.x, d3.x
1772 * chan_ptr2 = d0.y, d1.y, d2.y, d3.y
1775 emit_store_64bit_chan(struct lp_build_tgsi_context
*bld_base
,
1776 LLVMValueRef chan_ptr
, LLVMValueRef chan_ptr2
,
1779 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1780 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1781 LLVMBuilderRef builder
= gallivm
->builder
;
1782 struct lp_build_context
*float_bld
= &bld_base
->base
;
1784 LLVMValueRef temp
, temp2
;
1785 LLVMValueRef shuffles
[LP_MAX_VECTOR_WIDTH
/32];
1786 LLVMValueRef shuffles2
[LP_MAX_VECTOR_WIDTH
/32];
1788 for (i
= 0; i
< bld_base
->base
.type
.length
; i
++) {
1789 shuffles
[i
] = lp_build_const_int32(gallivm
, i
* 2);
1790 shuffles2
[i
] = lp_build_const_int32(gallivm
, (i
* 2) + 1);
1793 temp
= LLVMBuildShuffleVector(builder
, value
,
1794 LLVMGetUndef(LLVMTypeOf(value
)),
1795 LLVMConstVector(shuffles
,
1796 bld_base
->base
.type
.length
),
1798 temp2
= LLVMBuildShuffleVector(builder
, value
,
1799 LLVMGetUndef(LLVMTypeOf(value
)),
1800 LLVMConstVector(shuffles2
,
1801 bld_base
->base
.type
.length
),
1804 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp
, chan_ptr
);
1805 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp2
, chan_ptr2
);
1813 struct lp_build_tgsi_context
*bld_base
,
1814 const struct tgsi_full_instruction
*inst
,
1816 unsigned chan_index
,
1819 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1820 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1821 LLVMBuilderRef builder
= gallivm
->builder
;
1822 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
1823 struct lp_build_context
*float_bld
= &bld_base
->base
;
1824 struct lp_build_context
*int_bld
= &bld_base
->int_bld
;
1825 LLVMValueRef indirect_index
= NULL
;
1826 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
1831 * It is always assumed to be float.
1833 if (inst
->Instruction
.Saturate
) {
1834 assert(dtype
== TGSI_TYPE_FLOAT
||
1835 dtype
== TGSI_TYPE_UNTYPED
);
1836 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1837 value
= lp_build_clamp_zero_one_nanzero(float_bld
, value
);
1840 if (reg
->Register
.Indirect
) {
1842 * Currently the mesa/st doesn't generate indirect stores
1843 * to 64-bit values, it normally uses MOV to do indirect stores.
1845 assert(!tgsi_type_is_64bit(dtype
));
1846 indirect_index
= get_indirect_index(bld
,
1848 reg
->Register
.Index
,
1850 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1852 assert(reg
->Register
.Index
<=
1853 bld_base
->info
->file_max
[reg
->Register
.File
]);
1856 if (DEBUG_EXECUTION
) {
1857 emit_dump_reg(gallivm
, reg
->Register
.File
, reg
->Register
.Index
, chan_index
, value
);
1860 switch( reg
->Register
.File
) {
1861 case TGSI_FILE_OUTPUT
:
1862 /* Outputs are always stored as floats */
1863 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1865 if (reg
->Register
.Indirect
) {
1866 LLVMValueRef index_vec
; /* indexes into the output registers */
1867 LLVMValueRef outputs_array
;
1868 LLVMTypeRef fptr_type
;
1870 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1875 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1876 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
, fptr_type
, "");
1878 /* Scatter store values into output registers */
1879 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1883 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1886 if (tgsi_type_is_64bit(dtype
)) {
1887 LLVMValueRef out_ptr2
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1889 emit_store_64bit_chan(bld_base
, out_ptr
, out_ptr2
,
1892 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, out_ptr
);
1896 case TGSI_FILE_TEMPORARY
:
1897 /* Temporaries are always stored as floats */
1898 if (!tgsi_type_is_64bit(dtype
))
1899 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1901 value
= LLVMBuildBitCast(builder
, value
, LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2), "");
1903 if (reg
->Register
.Indirect
) {
1904 LLVMValueRef index_vec
; /* indexes into the temp registers */
1905 LLVMValueRef temps_array
;
1906 LLVMTypeRef fptr_type
;
1908 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1913 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1914 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1916 /* Scatter store values into temp registers */
1917 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1921 LLVMValueRef temp_ptr
;
1922 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, chan_index
);
1924 if (tgsi_type_is_64bit(dtype
)) {
1925 LLVMValueRef temp_ptr2
= lp_get_temp_ptr_soa(bld
,
1926 reg
->Register
.Index
,
1928 emit_store_64bit_chan(bld_base
, temp_ptr
, temp_ptr2
,
1932 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, temp_ptr
);
1936 case TGSI_FILE_ADDRESS
:
1937 assert(dtype
== TGSI_TYPE_SIGNED
);
1938 assert(LLVMTypeOf(value
) == int_bld
->vec_type
);
1939 value
= LLVMBuildBitCast(builder
, value
, int_bld
->vec_type
, "");
1940 lp_exec_mask_store(&bld
->exec_mask
, int_bld
, value
,
1941 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1952 * Called at the beginning of the translation of each TGSI instruction, to
1953 * emit some debug code.
1957 struct lp_build_tgsi_context
* bld_base
,
1958 const struct tgsi_full_instruction
* inst
,
1959 const struct tgsi_opcode_info
* info
)
1962 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1964 if (DEBUG_EXECUTION
) {
1966 * Dump the TGSI instruction.
1969 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1973 tgsi_dump_instruction_str(inst
, bld_base
->pc
, &buf
[2], sizeof buf
- 2);
1974 lp_build_printf(gallivm
, buf
);
1976 /* Dump the execution mask.
1978 if (bld
->exec_mask
.has_mask
) {
1979 lp_build_print_value(gallivm
, " mask = ", bld
->exec_mask
.exec_mask
);
1986 struct lp_build_tgsi_context
* bld_base
,
1987 const struct tgsi_full_instruction
* inst
,
1988 const struct tgsi_opcode_info
* info
,
1990 LLVMValueRef dst
[4])
1993 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
1995 unsigned writemask
= inst
->Dst
[index
].Register
.WriteMask
;
1997 unsigned chan_index
= u_bit_scan(&writemask
);
1998 if (tgsi_type_is_64bit(dtype
) && (chan_index
== 1 || chan_index
== 3))
2000 emit_store_chan(bld_base
, inst
, index
, chan_index
, dst
[chan_index
]);
2005 tgsi_to_pipe_tex_target(unsigned tgsi_target
)
2007 switch (tgsi_target
) {
2008 case TGSI_TEXTURE_BUFFER
:
2010 case TGSI_TEXTURE_1D
:
2011 case TGSI_TEXTURE_SHADOW1D
:
2012 return PIPE_TEXTURE_1D
;
2013 case TGSI_TEXTURE_2D
:
2014 case TGSI_TEXTURE_SHADOW2D
:
2015 case TGSI_TEXTURE_2D_MSAA
:
2016 return PIPE_TEXTURE_2D
;
2017 case TGSI_TEXTURE_3D
:
2018 return PIPE_TEXTURE_3D
;
2019 case TGSI_TEXTURE_CUBE
:
2020 case TGSI_TEXTURE_SHADOWCUBE
:
2021 return PIPE_TEXTURE_CUBE
;
2022 case TGSI_TEXTURE_RECT
:
2023 case TGSI_TEXTURE_SHADOWRECT
:
2024 return PIPE_TEXTURE_RECT
;
2025 case TGSI_TEXTURE_1D_ARRAY
:
2026 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2027 return PIPE_TEXTURE_1D_ARRAY
;
2028 case TGSI_TEXTURE_2D_ARRAY
:
2029 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2030 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2031 return PIPE_TEXTURE_2D_ARRAY
;
2032 case TGSI_TEXTURE_CUBE_ARRAY
:
2033 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2034 return PIPE_TEXTURE_CUBE_ARRAY
;
2042 static enum lp_sampler_lod_property
2043 lp_build_lod_property(
2044 struct lp_build_tgsi_context
*bld_base
,
2045 const struct tgsi_full_instruction
*inst
,
2048 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
2049 enum lp_sampler_lod_property lod_property
;
2052 * Not much we can do here. We could try catching inputs declared
2053 * with constant interpolation but not sure it's worth it - since for
2054 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
2055 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
2056 * like the constant/immediate recognition below.
2057 * What seems to be of more value would be to recognize temps holding
2058 * broadcasted scalars but no way we can do it.
2059 * Tried asking llvm but without any success (using LLVMIsConstant
2060 * even though this isn't exactly what we'd need), even as simple as
2061 * IMM[0] UINT32 (0,-1,0,0)
2062 * MOV TEMP[0] IMM[0].yyyy
2063 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
2065 * This means there's ZERO chance this will ever catch a scalar lod
2066 * with traditional tex opcodes as well as texel fetches, since the lod
2067 * comes from the same reg as coords (except some test shaders using
2068 * constant coords maybe).
2069 * There's at least hope for sample opcodes as well as size queries.
2071 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
||
2072 reg
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
2073 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2075 else if (bld_base
->info
->processor
== PIPE_SHADER_FRAGMENT
) {
2076 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2077 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2080 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2084 /* never use scalar (per-quad) lod the results are just too wrong. */
2085 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2087 return lod_property
;
2092 * High-level instruction translators.
2096 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
2097 const struct tgsi_full_instruction
*inst
,
2098 enum lp_build_tex_modifier modifier
,
2099 LLVMValueRef
*texel
,
2100 unsigned sampler_reg
,
2101 enum lp_sampler_op_type sampler_op
)
2103 unsigned unit
= inst
->Src
[sampler_reg
].Register
.Index
;
2104 LLVMValueRef oow
= NULL
;
2105 LLVMValueRef lod
= NULL
;
2106 LLVMValueRef coords
[5];
2107 LLVMValueRef offsets
[3] = { NULL
};
2108 struct lp_derivatives derivs
;
2109 struct lp_sampler_params params
;
2110 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2111 unsigned num_derivs
, num_offsets
, i
;
2112 unsigned shadow_coord
= 0;
2113 unsigned layer_coord
= 0;
2114 unsigned sample_key
= sampler_op
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2116 memset(¶ms
, 0, sizeof(params
));
2118 if (!bld
->sampler
) {
2119 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2120 for (i
= 0; i
< 4; i
++) {
2121 texel
[i
] = bld
->bld_base
.base
.undef
;
2126 switch (inst
->Texture
.Texture
) {
2127 case TGSI_TEXTURE_1D_ARRAY
:
2130 case TGSI_TEXTURE_1D
:
2134 case TGSI_TEXTURE_2D_ARRAY
:
2137 case TGSI_TEXTURE_2D
:
2138 case TGSI_TEXTURE_RECT
:
2142 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2145 case TGSI_TEXTURE_SHADOW1D
:
2150 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2156 case TGSI_TEXTURE_SHADOW2D
:
2157 case TGSI_TEXTURE_SHADOWRECT
:
2162 case TGSI_TEXTURE_CUBE
:
2166 case TGSI_TEXTURE_3D
:
2170 case TGSI_TEXTURE_SHADOWCUBE
:
2175 case TGSI_TEXTURE_CUBE_ARRAY
:
2180 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2184 shadow_coord
= 4; /* shadow coord special different reg */
2186 case TGSI_TEXTURE_2D_MSAA
:
2187 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2193 /* Note lod and especially projected are illegal in a LOT of cases */
2194 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2195 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2196 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
2197 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
) {
2198 /* note that shadow cube array with bias/explicit lod does not exist */
2199 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2202 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2204 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2205 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2207 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2208 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2210 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2213 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
2214 oow
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2215 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
2218 for (i
= 0; i
< num_derivs
; i
++) {
2219 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2220 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2221 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
2223 for (i
= num_derivs
; i
< 5; i
++) {
2224 coords
[i
] = bld
->bld_base
.base
.undef
;
2227 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2229 if (layer_coord
== 3) {
2230 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2233 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2235 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2236 coords
[2] = lp_build_mul(&bld
->bld_base
.base
, coords
[2], oow
);
2238 /* Shadow coord occupies always 5th slot. */
2240 sample_key
|= LP_SAMPLER_SHADOW
;
2241 if (shadow_coord
== 4) {
2242 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2245 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, shadow_coord
);
2247 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2248 coords
[4] = lp_build_mul(&bld
->bld_base
.base
, coords
[4], oow
);
2251 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2253 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2254 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2255 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, dim
);
2256 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 2, dim
);
2258 params
.derivs
= &derivs
;
2260 * could also check all src regs if constant but I doubt such
2261 * cases exist in practice.
2263 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2264 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2265 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2268 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2272 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2275 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2277 /* we don't handle the 4 offset version of tg4 */
2278 if (inst
->Texture
.NumOffsets
== 1) {
2280 sample_key
|= LP_SAMPLER_OFFSETS
;
2281 for (dim
= 0; dim
< num_offsets
; dim
++) {
2282 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2286 params
.type
= bld
->bld_base
.base
.type
;
2287 params
.sample_key
= sample_key
;
2288 params
.texture_index
= unit
;
2289 params
.sampler_index
= unit
;
2290 params
.context_ptr
= bld
->context_ptr
;
2291 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2292 params
.coords
= coords
;
2293 params
.offsets
= offsets
;
2295 params
.texel
= texel
;
2297 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2298 bld
->bld_base
.base
.gallivm
,
2303 emit_sample(struct lp_build_tgsi_soa_context
*bld
,
2304 const struct tgsi_full_instruction
*inst
,
2305 enum lp_build_tex_modifier modifier
,
2307 enum lp_sampler_op_type sample_type
,
2308 LLVMValueRef
*texel
)
2310 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2311 unsigned texture_unit
, sampler_unit
;
2312 LLVMValueRef lod
= NULL
;
2313 LLVMValueRef coords
[5];
2314 LLVMValueRef offsets
[3] = { NULL
};
2315 struct lp_derivatives derivs
;
2316 struct lp_sampler_params params
;
2317 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2319 unsigned num_offsets
, num_derivs
, i
;
2320 unsigned layer_coord
= 0;
2321 unsigned sample_key
= sample_type
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2323 memset(¶ms
, 0, sizeof(params
));
2325 if (!bld
->sampler
) {
2326 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2327 for (i
= 0; i
< 4; i
++) {
2328 texel
[i
] = bld
->bld_base
.base
.undef
;
2334 * unlike old-style tex opcodes the texture/sampler indices
2335 * always come from src1 and src2 respectively.
2337 texture_unit
= inst
->Src
[1].Register
.Index
;
2338 sampler_unit
= inst
->Src
[2].Register
.Index
;
2341 * Note inst->Texture.Texture will contain the number of offsets,
2342 * however the target information is NOT there and comes from the
2343 * declared sampler views instead.
2345 switch (bld
->sv
[texture_unit
].Resource
) {
2346 case TGSI_TEXTURE_1D
:
2350 case TGSI_TEXTURE_1D_ARRAY
:
2355 case TGSI_TEXTURE_2D
:
2356 case TGSI_TEXTURE_RECT
:
2360 case TGSI_TEXTURE_2D_ARRAY
:
2365 case TGSI_TEXTURE_CUBE
:
2369 case TGSI_TEXTURE_3D
:
2373 case TGSI_TEXTURE_CUBE_ARRAY
:
2383 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2384 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2385 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2386 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2387 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2389 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2390 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2392 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2394 else if (modifier
== LP_BLD_TEX_MODIFIER_LOD_ZERO
) {
2395 /* XXX might be better to explicitly pass the level zero information */
2396 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2397 lod
= lp_build_const_vec(gallivm
, bld
->bld_base
.base
.type
, 0.0F
);
2400 for (i
= 0; i
< num_derivs
; i
++) {
2401 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2403 for (i
= num_derivs
; i
< 5; i
++) {
2404 coords
[i
] = bld
->bld_base
.base
.undef
;
2407 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2409 if (layer_coord
== 3)
2410 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2412 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2414 /* Shadow coord occupies always 5th slot. */
2416 sample_key
|= LP_SAMPLER_SHADOW
;
2417 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2420 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2422 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2423 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2424 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, dim
);
2425 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 4, dim
);
2427 params
.derivs
= &derivs
;
2429 * could also check all src regs if constant but I doubt such
2430 * cases exist in practice.
2432 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2433 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2434 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2437 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2441 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2445 /* some advanced gather instructions (txgo) would require 4 offsets */
2446 if (inst
->Texture
.NumOffsets
== 1) {
2448 sample_key
|= LP_SAMPLER_OFFSETS
;
2449 for (dim
= 0; dim
< num_offsets
; dim
++) {
2450 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2453 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2455 params
.type
= bld
->bld_base
.base
.type
;
2456 params
.sample_key
= sample_key
;
2457 params
.texture_index
= texture_unit
;
2458 params
.sampler_index
= sampler_unit
;
2459 params
.context_ptr
= bld
->context_ptr
;
2460 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2461 params
.coords
= coords
;
2462 params
.offsets
= offsets
;
2464 params
.texel
= texel
;
2466 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2467 bld
->bld_base
.base
.gallivm
,
2470 if (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2471 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2472 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2473 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
) {
2474 unsigned char swizzles
[4];
2475 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2476 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2477 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2478 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2480 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2485 emit_fetch_texels( struct lp_build_tgsi_soa_context
*bld
,
2486 const struct tgsi_full_instruction
*inst
,
2487 LLVMValueRef
*texel
,
2490 unsigned unit
, target
;
2491 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
2492 LLVMValueRef explicit_lod
= NULL
;
2493 LLVMValueRef coords
[5];
2494 LLVMValueRef offsets
[3] = { NULL
};
2495 struct lp_sampler_params params
;
2496 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2498 unsigned layer_coord
= 0;
2499 unsigned sample_key
= LP_SAMPLER_OP_FETCH
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2501 memset(¶ms
, 0, sizeof(params
));
2503 if (!bld
->sampler
) {
2504 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2505 for (i
= 0; i
< 4; i
++) {
2506 texel
[i
] = coord_undef
;
2511 unit
= inst
->Src
[1].Register
.Index
;
2514 target
= bld
->sv
[unit
].Resource
;
2517 target
= inst
->Texture
.Texture
;
2521 case TGSI_TEXTURE_1D
:
2522 case TGSI_TEXTURE_BUFFER
:
2525 case TGSI_TEXTURE_1D_ARRAY
:
2529 case TGSI_TEXTURE_2D
:
2530 case TGSI_TEXTURE_RECT
:
2531 case TGSI_TEXTURE_2D_MSAA
:
2534 case TGSI_TEXTURE_2D_ARRAY
:
2535 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2539 case TGSI_TEXTURE_3D
:
2547 /* always have lod except for buffers and msaa targets ? */
2548 if (target
!= TGSI_TEXTURE_BUFFER
&&
2549 target
!= TGSI_TEXTURE_2D_MSAA
&&
2550 target
!= TGSI_TEXTURE_2D_ARRAY_MSAA
) {
2551 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2552 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2553 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2556 * XXX: for real msaa support, the w component (or src2.x for sample_i_ms)
2557 * would be the sample index.
2560 for (i
= 0; i
< dims
; i
++) {
2561 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2563 /* never use more than 3 coords here but emit_fetch_texel copies all 5 anyway */
2564 for (i
= dims
; i
< 5; i
++) {
2565 coords
[i
] = coord_undef
;
2568 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2570 if (inst
->Texture
.NumOffsets
== 1) {
2572 sample_key
|= LP_SAMPLER_OFFSETS
;
2573 for (dim
= 0; dim
< dims
; dim
++) {
2574 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2577 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2579 params
.type
= bld
->bld_base
.base
.type
;
2580 params
.sample_key
= sample_key
;
2581 params
.texture_index
= unit
;
2583 * sampler not actually used, set to 0 so it won't exceed PIPE_MAX_SAMPLERS
2584 * and trigger some assertions with d3d10 where the sampler view number
2587 params
.sampler_index
= 0;
2588 params
.context_ptr
= bld
->context_ptr
;
2589 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2590 params
.coords
= coords
;
2591 params
.offsets
= offsets
;
2592 params
.derivs
= NULL
;
2593 params
.lod
= explicit_lod
;
2594 params
.texel
= texel
;
2596 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2597 bld
->bld_base
.base
.gallivm
,
2601 (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2602 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2603 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2604 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
)) {
2605 unsigned char swizzles
[4];
2606 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2607 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2608 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2609 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2611 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2616 emit_size_query( struct lp_build_tgsi_soa_context
*bld
,
2617 const struct tgsi_full_instruction
*inst
,
2618 LLVMValueRef
*sizes_out
,
2619 boolean is_sviewinfo
)
2621 LLVMValueRef explicit_lod
;
2622 enum lp_sampler_lod_property lod_property
;
2625 unsigned unit
= inst
->Src
[1].Register
.Index
;
2626 unsigned target
, pipe_target
;
2627 struct lp_sampler_size_query_params params
;
2630 target
= bld
->sv
[unit
].Resource
;
2633 target
= inst
->Texture
.Texture
;
2636 case TGSI_TEXTURE_BUFFER
:
2637 case TGSI_TEXTURE_RECT
:
2638 case TGSI_TEXTURE_SHADOWRECT
:
2646 if (!bld
->sampler
) {
2647 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2648 for (i
= 0; i
< 4; i
++)
2649 sizes_out
[i
] = bld
->bld_base
.int_bld
.undef
;
2654 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 0);
2655 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2658 explicit_lod
= NULL
;
2659 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2663 pipe_target
= tgsi_to_pipe_tex_target(target
);
2665 params
.int_type
= bld
->bld_base
.int_bld
.type
;
2666 params
.texture_unit
= unit
;
2667 params
.target
= pipe_target
;
2668 params
.context_ptr
= bld
->context_ptr
;
2669 params
.is_sviewinfo
= TRUE
;
2670 params
.lod_property
= lod_property
;
2671 params
.explicit_lod
= explicit_lod
;
2672 params
.sizes_out
= sizes_out
;
2674 bld
->sampler
->emit_size_query(bld
->sampler
,
2675 bld
->bld_base
.base
.gallivm
,
2680 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
2685 for (i
= 0; i
< 5; i
++) {
2686 enum tgsi_opcode opcode
;
2688 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
2691 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
2693 if (opcode
== TGSI_OPCODE_END
)
2696 if (opcode
== TGSI_OPCODE_TEX
||
2697 opcode
== TGSI_OPCODE_TXP
||
2698 opcode
== TGSI_OPCODE_TXD
||
2699 opcode
== TGSI_OPCODE_TXB
||
2700 opcode
== TGSI_OPCODE_TXL
||
2701 opcode
== TGSI_OPCODE_TXF
||
2702 opcode
== TGSI_OPCODE_TXQ
||
2703 opcode
== TGSI_OPCODE_TEX2
||
2704 opcode
== TGSI_OPCODE_TXB2
||
2705 opcode
== TGSI_OPCODE_TXL2
||
2706 opcode
== TGSI_OPCODE_SAMPLE
||
2707 opcode
== TGSI_OPCODE_SAMPLE_B
||
2708 opcode
== TGSI_OPCODE_SAMPLE_C
||
2709 opcode
== TGSI_OPCODE_SAMPLE_C_LZ
||
2710 opcode
== TGSI_OPCODE_SAMPLE_D
||
2711 opcode
== TGSI_OPCODE_SAMPLE_I
||
2712 opcode
== TGSI_OPCODE_SAMPLE_I_MS
||
2713 opcode
== TGSI_OPCODE_SAMPLE_L
||
2714 opcode
== TGSI_OPCODE_SVIEWINFO
||
2715 opcode
== TGSI_OPCODE_CAL
||
2716 opcode
== TGSI_OPCODE_IF
||
2717 opcode
== TGSI_OPCODE_UIF
||
2718 opcode
== TGSI_OPCODE_BGNLOOP
||
2719 opcode
== TGSI_OPCODE_SWITCH
)
2729 * Kill fragment if any of the src register values are negative.
2733 struct lp_build_tgsi_soa_context
*bld
,
2734 const struct tgsi_full_instruction
*inst
,
2737 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2738 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
2739 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
2741 unsigned chan_index
;
2743 memset(&terms
, 0, sizeof terms
);
2745 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2748 /* Unswizzle channel */
2749 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
2751 /* Check if the component has not been already tested. */
2752 assert(swizzle
< TGSI_NUM_CHANNELS
);
2753 if( !terms
[swizzle
] )
2754 /* TODO: change the comparison operator instead of setting the sign */
2755 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
2759 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2760 if(terms
[chan_index
]) {
2761 LLVMValueRef chan_mask
;
2764 * If term < 0 then mask = 0 else mask = ~0.
2766 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
2769 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
2775 if (bld
->exec_mask
.has_mask
) {
2776 LLVMValueRef invmask
;
2777 invmask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2778 mask
= LLVMBuildOr(builder
, mask
, invmask
, "");
2781 lp_build_mask_update(bld
->mask
, mask
);
2782 if (!near_end_of_shader(bld
, pc
))
2783 lp_build_mask_check(bld
->mask
);
2788 * Unconditional fragment kill.
2789 * The only predication is the execution mask which will apply if
2790 * we're inside a loop or conditional.
2793 emit_kill(struct lp_build_tgsi_soa_context
*bld
,
2796 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2799 /* For those channels which are "alive", disable fragment shader
2802 if (bld
->exec_mask
.has_mask
) {
2803 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2806 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
2810 lp_build_mask_update(bld
->mask
, mask
);
2812 if (!near_end_of_shader(bld
, pc
))
2813 lp_build_mask_check(bld
->mask
);
2818 * Emit code which will dump the value of all the temporary registers
2822 emit_dump_file(struct lp_build_tgsi_soa_context
*bld
,
2825 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
2826 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2827 LLVMBuilderRef builder
= gallivm
->builder
;
2828 LLVMValueRef reg_ptr
;
2830 int max_index
= info
->file_max
[file
];
2833 * Some register files, particularly constants, can be very large,
2834 * and dumping everything could make this unusably slow.
2836 max_index
= MIN2(max_index
, 32);
2838 for (index
= 0; index
<= max_index
; index
++) {
2843 if (index
< 8 * sizeof(unsigned) &&
2844 (info
->file_mask
[file
] & (1u << index
)) == 0) {
2845 /* This was not declared.*/
2849 if (file
== TGSI_FILE_INPUT
) {
2850 mask
= info
->input_usage_mask
[index
];
2852 mask
= TGSI_WRITEMASK_XYZW
;
2855 for (chan
= 0; chan
< 4; chan
++) {
2856 if ((mask
& (1 << chan
)) == 0) {
2857 /* This channel is not used.*/
2861 if (file
== TGSI_FILE_CONSTANT
) {
2862 struct tgsi_full_src_register reg
;
2863 memset(®
, 0, sizeof reg
);
2864 reg
.Register
.File
= file
;
2865 reg
.Register
.Index
= index
;
2866 reg
.Register
.SwizzleX
= 0;
2867 reg
.Register
.SwizzleY
= 1;
2868 reg
.Register
.SwizzleZ
= 2;
2869 reg
.Register
.SwizzleW
= 3;
2871 res
= bld
->bld_base
.emit_fetch_funcs
[file
](&bld
->bld_base
, ®
, TGSI_TYPE_FLOAT
, chan
);
2875 } else if (file
== TGSI_FILE_INPUT
) {
2876 res
= bld
->inputs
[index
][chan
];
2880 } else if (file
== TGSI_FILE_TEMPORARY
) {
2881 reg_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
2883 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2884 } else if (file
== TGSI_FILE_OUTPUT
) {
2885 reg_ptr
= lp_get_output_ptr(bld
, index
, chan
);
2887 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2893 emit_dump_reg(gallivm
, file
, index
, chan
, res
);
2901 lp_emit_declaration_soa(
2902 struct lp_build_tgsi_context
*bld_base
,
2903 const struct tgsi_full_declaration
*decl
)
2905 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2906 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2907 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
2908 const unsigned first
= decl
->Range
.First
;
2909 const unsigned last
= decl
->Range
.Last
;
2912 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
2914 switch (decl
->Declaration
.File
) {
2915 case TGSI_FILE_TEMPORARY
:
2916 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
2917 assert(last
< LP_MAX_INLINED_TEMPS
);
2918 for (idx
= first
; idx
<= last
; ++idx
) {
2919 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2920 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
2925 case TGSI_FILE_OUTPUT
:
2926 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
2927 for (idx
= first
; idx
<= last
; ++idx
) {
2928 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2929 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
2930 vec_type
, "output");
2935 case TGSI_FILE_ADDRESS
:
2936 /* ADDR registers are only allocated with an integer LLVM IR type,
2937 * as they are guaranteed to always have integers.
2938 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2939 * an ADDR register for that matter).
2941 assert(last
< LP_MAX_TGSI_ADDRS
);
2942 for (idx
= first
; idx
<= last
; ++idx
) {
2943 assert(idx
< LP_MAX_TGSI_ADDRS
);
2944 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2945 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
2949 case TGSI_FILE_SAMPLER_VIEW
:
2951 * The target stored here MUST match whatever there actually
2952 * is in the set sampler views (what about return type?).
2954 assert(last
< PIPE_MAX_SHADER_SAMPLER_VIEWS
);
2955 for (idx
= first
; idx
<= last
; ++idx
) {
2956 bld
->sv
[idx
] = decl
->SamplerView
;
2960 case TGSI_FILE_CONSTANT
:
2963 * We could trivially fetch the per-buffer pointer when fetching the
2964 * constant, relying on llvm to figure out it's always the same pointer
2965 * anyway. However, doing so results in a huge (more than factor of 10)
2966 * slowdown in llvm compilation times for some (but not all) shaders
2967 * (more specifically, the IR optimization spends way more time in
2968 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
2970 unsigned idx2D
= decl
->Dim
.Index2D
;
2971 LLVMValueRef index2D
= lp_build_const_int32(gallivm
, idx2D
);
2972 assert(idx2D
< LP_MAX_TGSI_CONST_BUFFERS
);
2973 bld
->consts
[idx2D
] =
2974 lp_build_array_get(gallivm
, bld
->consts_ptr
, index2D
);
2975 bld
->consts_sizes
[idx2D
] =
2976 lp_build_array_get(gallivm
, bld
->const_sizes_ptr
, index2D
);
2981 /* don't need to declare other vars */
2987 void lp_emit_immediate_soa(
2988 struct lp_build_tgsi_context
*bld_base
,
2989 const struct tgsi_full_immediate
*imm
)
2991 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2992 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
2993 LLVMValueRef imms
[4];
2995 const uint size
= imm
->Immediate
.NrTokens
- 1;
2997 switch (imm
->Immediate
.DataType
) {
2998 case TGSI_IMM_FLOAT32
:
2999 for( i
= 0; i
< size
; ++i
)
3001 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
3004 case TGSI_IMM_FLOAT64
:
3005 case TGSI_IMM_UINT64
:
3006 case TGSI_IMM_INT64
:
3007 case TGSI_IMM_UINT32
:
3008 for( i
= 0; i
< size
; ++i
) {
3009 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
3010 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
3014 case TGSI_IMM_INT32
:
3015 for( i
= 0; i
< size
; ++i
) {
3016 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
3017 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
3022 for( i
= size
; i
< 4; ++i
)
3023 imms
[i
] = bld_base
->base
.undef
;
3025 if (bld
->use_immediates_array
) {
3026 unsigned index
= bld
->num_immediates
;
3027 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
3028 LLVMBuilderRef builder
= gallivm
->builder
;
3029 LLVMValueRef gep
[2];
3030 gep
[0] = lp_build_const_int32(gallivm
, 0);
3032 assert(bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
));
3033 for (i
= 0; i
< 4; ++i
) {
3034 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
3035 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3036 bld
->imms_array
, gep
, 2, "");
3037 LLVMBuildStore(builder
, imms
[i
], imm_ptr
);
3040 /* simply copy the immediate values into the next immediates[] slot */
3042 assert(imm
->Immediate
.NrTokens
- 1 <= 4);
3043 assert(bld
->num_immediates
< LP_MAX_INLINED_IMMEDIATES
);
3045 for(i
= 0; i
< 4; ++i
)
3046 bld
->immediates
[bld
->num_immediates
][i
] = imms
[i
];
3048 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3049 unsigned index
= bld
->num_immediates
;
3050 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
3051 LLVMBuilderRef builder
= gallivm
->builder
;
3052 LLVMValueRef gep
[2];
3053 gep
[0] = lp_build_const_int32(gallivm
, 0);
3054 for (i
= 0; i
< 4; ++i
) {
3055 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
3056 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3057 bld
->imms_array
, gep
, 2, "");
3058 LLVMBuildStore(builder
,
3059 bld
->immediates
[index
][i
],
3065 bld
->num_immediates
++;
3070 const struct lp_build_tgsi_action
* action
,
3071 struct lp_build_tgsi_context
* bld_base
,
3072 struct lp_build_emit_data
* emit_data
)
3074 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3076 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
3077 &emit_data
->output
[emit_data
->chan
], NULL
);
3082 const struct lp_build_tgsi_action
* action
,
3083 struct lp_build_tgsi_context
* bld_base
,
3084 struct lp_build_emit_data
* emit_data
)
3086 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3088 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
3089 &emit_data
->output
[emit_data
->chan
]);
3094 const struct lp_build_tgsi_action
* action
,
3095 struct lp_build_tgsi_context
* bld_base
,
3096 struct lp_build_emit_data
* emit_data
)
3098 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3100 emit_kill(bld
, bld_base
->pc
- 1);
3105 const struct lp_build_tgsi_action
* action
,
3106 struct lp_build_tgsi_context
* bld_base
,
3107 struct lp_build_emit_data
* emit_data
)
3109 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3111 emit_kill_if(bld
, emit_data
->inst
, bld_base
->pc
- 1);
3116 const struct lp_build_tgsi_action
* action
,
3117 struct lp_build_tgsi_context
* bld_base
,
3118 struct lp_build_emit_data
* emit_data
)
3120 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3122 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3123 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3128 const struct lp_build_tgsi_action
* action
,
3129 struct lp_build_tgsi_context
* bld_base
,
3130 struct lp_build_emit_data
* emit_data
)
3132 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3134 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3135 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3140 const struct lp_build_tgsi_action
* action
,
3141 struct lp_build_tgsi_context
* bld_base
,
3142 struct lp_build_emit_data
* emit_data
)
3144 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3146 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3147 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3152 const struct lp_build_tgsi_action
* action
,
3153 struct lp_build_tgsi_context
* bld_base
,
3154 struct lp_build_emit_data
* emit_data
)
3156 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3158 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3159 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3164 const struct lp_build_tgsi_action
* action
,
3165 struct lp_build_tgsi_context
* bld_base
,
3166 struct lp_build_emit_data
* emit_data
)
3168 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3170 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3171 emit_data
->output
, 3, LP_SAMPLER_OP_TEXTURE
);
3176 const struct lp_build_tgsi_action
* action
,
3177 struct lp_build_tgsi_context
* bld_base
,
3178 struct lp_build_emit_data
* emit_data
)
3180 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3182 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3183 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3188 const struct lp_build_tgsi_action
* action
,
3189 struct lp_build_tgsi_context
* bld_base
,
3190 struct lp_build_emit_data
* emit_data
)
3192 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3194 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3195 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3200 const struct lp_build_tgsi_action
* action
,
3201 struct lp_build_tgsi_context
* bld_base
,
3202 struct lp_build_emit_data
* emit_data
)
3204 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3206 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
3207 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3212 const struct lp_build_tgsi_action
* action
,
3213 struct lp_build_tgsi_context
* bld_base
,
3214 struct lp_build_emit_data
* emit_data
)
3216 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3218 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3219 emit_data
->output
, 2, LP_SAMPLER_OP_GATHER
);
3224 const struct lp_build_tgsi_action
* action
,
3225 struct lp_build_tgsi_context
* bld_base
,
3226 struct lp_build_emit_data
* emit_data
)
3228 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3230 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3231 emit_data
->output
, 1, LP_SAMPLER_OP_LODQ
);
3236 const struct lp_build_tgsi_action
* action
,
3237 struct lp_build_tgsi_context
* bld_base
,
3238 struct lp_build_emit_data
* emit_data
)
3240 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3242 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3247 const struct lp_build_tgsi_action
* action
,
3248 struct lp_build_tgsi_context
* bld_base
,
3249 struct lp_build_emit_data
* emit_data
)
3251 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3253 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3258 const struct lp_build_tgsi_action
* action
,
3259 struct lp_build_tgsi_context
* bld_base
,
3260 struct lp_build_emit_data
* emit_data
)
3262 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3264 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3269 const struct lp_build_tgsi_action
* action
,
3270 struct lp_build_tgsi_context
* bld_base
,
3271 struct lp_build_emit_data
* emit_data
)
3273 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3275 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3276 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3281 const struct lp_build_tgsi_action
* action
,
3282 struct lp_build_tgsi_context
* bld_base
,
3283 struct lp_build_emit_data
* emit_data
)
3285 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3287 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3288 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3293 const struct lp_build_tgsi_action
* action
,
3294 struct lp_build_tgsi_context
* bld_base
,
3295 struct lp_build_emit_data
* emit_data
)
3297 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3299 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3300 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3305 const struct lp_build_tgsi_action
* action
,
3306 struct lp_build_tgsi_context
* bld_base
,
3307 struct lp_build_emit_data
* emit_data
)
3309 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3311 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_ZERO
,
3312 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3317 const struct lp_build_tgsi_action
* action
,
3318 struct lp_build_tgsi_context
* bld_base
,
3319 struct lp_build_emit_data
* emit_data
)
3321 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3323 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3324 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3329 const struct lp_build_tgsi_action
* action
,
3330 struct lp_build_tgsi_context
* bld_base
,
3331 struct lp_build_emit_data
* emit_data
)
3333 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3335 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3336 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3341 const struct lp_build_tgsi_action
* action
,
3342 struct lp_build_tgsi_context
* bld_base
,
3343 struct lp_build_emit_data
* emit_data
)
3345 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3347 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3348 FALSE
, LP_SAMPLER_OP_GATHER
, emit_data
->output
);
3353 const struct lp_build_tgsi_action
* action
,
3354 struct lp_build_tgsi_context
* bld_base
,
3355 struct lp_build_emit_data
* emit_data
)
3357 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3359 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3364 const struct lp_build_tgsi_action
* action
,
3365 struct lp_build_tgsi_context
* bld_base
,
3366 struct lp_build_emit_data
* emit_data
)
3368 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3370 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3371 FALSE
, LP_SAMPLER_OP_LODQ
, emit_data
->output
);
3375 increment_vec_ptr_by_mask(struct lp_build_tgsi_context
* bld_base
,
3379 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3380 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3382 current_vec
= LLVMBuildSub(builder
, current_vec
, mask
, "");
3384 LLVMBuildStore(builder
, current_vec
, ptr
);
3388 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context
* bld_base
,
3392 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3393 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3395 current_vec
= lp_build_select(&bld_base
->uint_bld
,
3397 bld_base
->uint_bld
.zero
,
3400 LLVMBuildStore(builder
, current_vec
, ptr
);
3404 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context
* bld
,
3405 LLVMValueRef current_mask_vec
,
3406 LLVMValueRef total_emitted_vertices_vec
)
3408 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3409 struct lp_build_context
*int_bld
= &bld
->bld_base
.int_bld
;
3410 LLVMValueRef max_mask
= lp_build_cmp(int_bld
, PIPE_FUNC_LESS
,
3411 total_emitted_vertices_vec
,
3412 bld
->max_output_vertices_vec
);
3414 return LLVMBuildAnd(builder
, current_mask_vec
, max_mask
, "");
3419 const struct lp_build_tgsi_action
* action
,
3420 struct lp_build_tgsi_context
* bld_base
,
3421 struct lp_build_emit_data
* emit_data
)
3423 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3424 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3426 if (bld
->gs_iface
->emit_vertex
) {
3427 LLVMValueRef mask
= mask_vec(bld_base
);
3428 LLVMValueRef total_emitted_vertices_vec
=
3429 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3430 mask
= clamp_mask_to_max_output_vertices(bld
, mask
,
3431 total_emitted_vertices_vec
);
3432 gather_outputs(bld
);
3433 bld
->gs_iface
->emit_vertex(bld
->gs_iface
, &bld
->bld_base
,
3435 total_emitted_vertices_vec
);
3436 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3438 increment_vec_ptr_by_mask(bld_base
, bld
->total_emitted_vertices_vec_ptr
,
3441 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3442 " +++ emit vertex masked ones = ",
3444 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3445 " +++ emit vertex emitted = ",
3446 total_emitted_vertices_vec
);
3453 end_primitive_masked(struct lp_build_tgsi_context
* bld_base
,
3456 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3457 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3459 if (bld
->gs_iface
->end_primitive
) {
3460 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3461 LLVMValueRef emitted_vertices_vec
=
3462 LLVMBuildLoad(builder
, bld
->emitted_vertices_vec_ptr
, "");
3463 LLVMValueRef emitted_prims_vec
=
3464 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3466 LLVMValueRef emitted_mask
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3467 emitted_vertices_vec
,
3469 /* We need to combine the current execution mask with the mask
3470 telling us which, if any, execution slots actually have
3471 unemitted primitives, this way we make sure that end_primitives
3472 executes only on the paths that have unflushed vertices */
3473 mask
= LLVMBuildAnd(builder
, mask
, emitted_mask
, "");
3475 bld
->gs_iface
->end_primitive(bld
->gs_iface
, &bld
->bld_base
,
3476 emitted_vertices_vec
,
3480 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3481 " +++ end prim masked ones = ",
3483 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3484 " +++ end prim emitted verts1 = ",
3485 emitted_vertices_vec
);
3486 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3487 " +++ end prim emitted prims1 = ",
3488 LLVMBuildLoad(builder
,
3489 bld
->emitted_prims_vec_ptr
, ""));
3491 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_prims_vec_ptr
,
3493 clear_uint_vec_ptr_from_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3496 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3497 " +++ end prim emitted verts2 = ",
3498 LLVMBuildLoad(builder
,
3499 bld
->emitted_vertices_vec_ptr
, ""));
3507 const struct lp_build_tgsi_action
* action
,
3508 struct lp_build_tgsi_context
* bld_base
,
3509 struct lp_build_emit_data
* emit_data
)
3511 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3513 if (bld
->gs_iface
->end_primitive
) {
3514 LLVMValueRef mask
= mask_vec(bld_base
);
3515 end_primitive_masked(bld_base
, mask
);
3521 const struct lp_build_tgsi_action
* action
,
3522 struct lp_build_tgsi_context
* bld_base
,
3523 struct lp_build_emit_data
* emit_data
)
3525 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3527 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
3533 const struct lp_build_tgsi_action
* action
,
3534 struct lp_build_tgsi_context
* bld_base
,
3535 struct lp_build_emit_data
* emit_data
)
3537 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3539 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
3544 const struct lp_build_tgsi_action
* action
,
3545 struct lp_build_tgsi_context
* bld_base
,
3546 struct lp_build_emit_data
* emit_data
)
3548 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3550 lp_exec_break(&bld
->exec_mask
, bld_base
);
3555 const struct lp_build_tgsi_action
* action
,
3556 struct lp_build_tgsi_context
* bld_base
,
3557 struct lp_build_emit_data
* emit_data
)
3560 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3562 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
3563 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
3564 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3569 const struct lp_build_tgsi_action
* action
,
3570 struct lp_build_tgsi_context
* bld_base
,
3571 struct lp_build_emit_data
* emit_data
)
3574 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3575 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3577 tmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3578 emit_data
->args
[0], uint_bld
->zero
);
3579 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
3584 const struct lp_build_tgsi_action
* action
,
3585 struct lp_build_tgsi_context
* bld_base
,
3586 struct lp_build_emit_data
* emit_data
)
3588 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3590 lp_exec_case(&bld
->exec_mask
, emit_data
->args
[0]);
3595 const struct lp_build_tgsi_action
* action
,
3596 struct lp_build_tgsi_context
* bld_base
,
3597 struct lp_build_emit_data
* emit_data
)
3599 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3601 lp_exec_default(&bld
->exec_mask
, bld_base
);
3606 const struct lp_build_tgsi_action
* action
,
3607 struct lp_build_tgsi_context
* bld_base
,
3608 struct lp_build_emit_data
* emit_data
)
3610 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3612 lp_exec_switch(&bld
->exec_mask
, emit_data
->args
[0]);
3617 const struct lp_build_tgsi_action
* action
,
3618 struct lp_build_tgsi_context
* bld_base
,
3619 struct lp_build_emit_data
* emit_data
)
3621 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3623 lp_exec_endswitch(&bld
->exec_mask
, bld_base
);
3628 const struct lp_build_tgsi_action
* action
,
3629 struct lp_build_tgsi_context
* bld_base
,
3630 struct lp_build_emit_data
* emit_data
)
3632 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3634 lp_exec_bgnloop(&bld
->exec_mask
);
3639 const struct lp_build_tgsi_action
* action
,
3640 struct lp_build_tgsi_context
* bld_base
,
3641 struct lp_build_emit_data
* emit_data
)
3643 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3645 lp_exec_mask_bgnsub(&bld
->exec_mask
);
3650 const struct lp_build_tgsi_action
* action
,
3651 struct lp_build_tgsi_context
* bld_base
,
3652 struct lp_build_emit_data
* emit_data
)
3654 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3656 lp_exec_mask_cond_invert(&bld
->exec_mask
);
3661 const struct lp_build_tgsi_action
* action
,
3662 struct lp_build_tgsi_context
* bld_base
,
3663 struct lp_build_emit_data
* emit_data
)
3665 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3667 lp_exec_mask_cond_pop(&bld
->exec_mask
);
3672 const struct lp_build_tgsi_action
* action
,
3673 struct lp_build_tgsi_context
* bld_base
,
3674 struct lp_build_emit_data
* emit_data
)
3676 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3678 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
3683 const struct lp_build_tgsi_action
* action
,
3684 struct lp_build_tgsi_context
* bld_base
,
3685 struct lp_build_emit_data
* emit_data
)
3687 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3689 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
3694 const struct lp_build_tgsi_action
* action
,
3695 struct lp_build_tgsi_context
* bld_base
,
3696 struct lp_build_emit_data
* emit_data
)
3698 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3700 lp_exec_continue(&bld
->exec_mask
);
3703 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
3705 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3706 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3708 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
3709 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4;
3710 bld
->temps_array
= lp_build_alloca_undef(gallivm
,
3711 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
3715 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
3716 LLVMValueRef array_size
=
3717 lp_build_const_int32(gallivm
,
3718 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
3719 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
3720 bld_base
->base
.vec_type
, array_size
,
3724 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3725 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_IMMEDIATE
] * 4 + 4;
3726 bld
->imms_array
= lp_build_alloca_undef(gallivm
,
3727 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
3731 /* If we have indirect addressing in inputs we need to copy them into
3732 * our alloca array to be able to iterate over them */
3733 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
) && !bld
->gs_iface
) {
3734 unsigned index
, chan
;
3735 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
3736 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
3737 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
3738 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
3739 vec_type
, array_size
,
3742 assert(bld_base
->info
->num_inputs
3743 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
3745 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
3746 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
3747 LLVMValueRef lindex
=
3748 lp_build_const_int32(gallivm
, index
* 4 + chan
);
3749 LLVMValueRef input_ptr
=
3750 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
3752 LLVMValueRef value
= bld
->inputs
[index
][chan
];
3754 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
3759 if (bld
->gs_iface
) {
3760 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
3761 bld
->emitted_prims_vec_ptr
=
3762 lp_build_alloca(gallivm
,
3764 "emitted_prims_ptr");
3765 bld
->emitted_vertices_vec_ptr
=
3766 lp_build_alloca(gallivm
,
3768 "emitted_vertices_ptr");
3769 bld
->total_emitted_vertices_vec_ptr
=
3770 lp_build_alloca(gallivm
,
3772 "total_emitted_vertices_ptr");
3774 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3775 bld
->emitted_prims_vec_ptr
);
3776 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3777 bld
->emitted_vertices_vec_ptr
);
3778 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
3779 bld
->total_emitted_vertices_vec_ptr
);
3782 if (DEBUG_EXECUTION
) {
3783 lp_build_printf(gallivm
, "\n");
3784 emit_dump_file(bld
, TGSI_FILE_CONSTANT
);
3786 emit_dump_file(bld
, TGSI_FILE_INPUT
);
3790 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
3792 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3793 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3795 if (DEBUG_EXECUTION
) {
3798 emit_dump_file(bld
, TGSI_FILE_TEMPORARY
);
3800 emit_dump_file(bld
, TGSI_FILE_OUTPUT
);
3801 lp_build_printf(bld_base
->base
.gallivm
, "\n");
3804 /* If we have indirect addressing in outputs we need to copy our alloca array
3805 * to the outputs slots specified by the caller */
3806 if (bld
->gs_iface
) {
3807 LLVMValueRef total_emitted_vertices_vec
;
3808 LLVMValueRef emitted_prims_vec
;
3809 /* implicit end_primitives, needed in case there are any unflushed
3810 vertices in the cache. Note must not call end_primitive here
3811 since the exec_mask is not valid at this point. */
3812 end_primitive_masked(bld_base
, lp_build_mask_value(bld
->mask
));
3814 total_emitted_vertices_vec
=
3815 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3817 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3819 bld
->gs_iface
->gs_epilogue(bld
->gs_iface
,
3821 total_emitted_vertices_vec
,
3824 gather_outputs(bld
);
3829 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
3830 const struct tgsi_token
*tokens
,
3831 struct lp_type type
,
3832 struct lp_build_mask_context
*mask
,
3833 LLVMValueRef consts_ptr
,
3834 LLVMValueRef const_sizes_ptr
,
3835 const struct lp_bld_tgsi_system_values
*system_values
,
3836 const LLVMValueRef (*inputs
)[TGSI_NUM_CHANNELS
],
3837 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
],
3838 LLVMValueRef context_ptr
,
3839 LLVMValueRef thread_data_ptr
,
3840 const struct lp_build_sampler_soa
*sampler
,
3841 const struct tgsi_shader_info
*info
,
3842 const struct lp_build_tgsi_gs_iface
*gs_iface
,
3843 LLVMValueRef ssbo_ptr
,
3844 LLVMValueRef ssbo_sizes_ptr
)
3846 struct lp_build_tgsi_soa_context bld
;
3848 struct lp_type res_type
;
3850 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
3851 memset(&res_type
, 0, sizeof res_type
);
3852 res_type
.width
= type
.width
;
3853 res_type
.length
= type
.length
;
3856 /* Setup build context */
3857 memset(&bld
, 0, sizeof bld
);
3858 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
3859 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
3860 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
3861 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
3863 struct lp_type dbl_type
;
3865 dbl_type
.width
*= 2;
3866 lp_build_context_init(&bld
.bld_base
.dbl_bld
, gallivm
, dbl_type
);
3869 struct lp_type uint64_type
;
3870 uint64_type
= lp_uint_type(type
);
3871 uint64_type
.width
*= 2;
3872 lp_build_context_init(&bld
.bld_base
.uint64_bld
, gallivm
, uint64_type
);
3875 struct lp_type int64_type
;
3876 int64_type
= lp_int_type(type
);
3877 int64_type
.width
*= 2;
3878 lp_build_context_init(&bld
.bld_base
.int64_bld
, gallivm
, int64_type
);
3881 bld
.inputs
= inputs
;
3882 bld
.outputs
= outputs
;
3883 bld
.consts_ptr
= consts_ptr
;
3884 bld
.const_sizes_ptr
= const_sizes_ptr
;
3885 bld
.ssbo_ptr
= ssbo_ptr
;
3886 bld
.ssbo_sizes_ptr
= ssbo_sizes_ptr
;
3887 bld
.sampler
= sampler
;
3888 bld
.bld_base
.info
= info
;
3889 bld
.indirect_files
= info
->indirect_files
;
3890 bld
.context_ptr
= context_ptr
;
3891 bld
.thread_data_ptr
= thread_data_ptr
;
3894 * If the number of temporaries is rather large then we just
3895 * allocate them as an array right from the start and treat
3896 * like indirect temporaries.
3898 if (info
->file_max
[TGSI_FILE_TEMPORARY
] >= LP_MAX_INLINED_TEMPS
) {
3899 bld
.indirect_files
|= (1 << TGSI_FILE_TEMPORARY
);
3902 * For performance reason immediates are always backed in a static
3903 * array, but if their number is too great, we have to use just
3904 * a dynamically allocated array.
3906 bld
.use_immediates_array
=
3907 (info
->file_max
[TGSI_FILE_IMMEDIATE
] >= LP_MAX_INLINED_IMMEDIATES
);
3908 if (bld
.use_immediates_array
) {
3909 bld
.indirect_files
|= (1 << TGSI_FILE_IMMEDIATE
);
3913 bld
.bld_base
.soa
= TRUE
;
3914 bld
.bld_base
.emit_debug
= emit_debug
;
3915 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
3916 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
3917 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
3918 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
3919 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
3920 bld
.bld_base
.emit_store
= emit_store
;
3922 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
3923 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
3925 bld
.bld_base
.emit_prologue
= emit_prologue
;
3926 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
3928 /* Set opcode actions */
3929 lp_set_default_actions_cpu(&bld
.bld_base
);
3931 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
3932 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
3933 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
3934 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
3935 bld
.bld_base
.op_actions
[TGSI_OPCODE_CASE
].emit
= case_emit
;
3936 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
3937 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
3938 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
3939 bld
.bld_base
.op_actions
[TGSI_OPCODE_DEFAULT
].emit
= default_emit
;
3940 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
3941 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
3942 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
3943 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
3944 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSWITCH
].emit
= endswitch_emit
;
3945 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
3946 bld
.bld_base
.op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
3947 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL_IF
].emit
= kill_if_emit
;
3948 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL
].emit
= kill_emit
;
3949 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
3950 bld
.bld_base
.op_actions
[TGSI_OPCODE_SWITCH
].emit
= switch_emit
;
3951 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
3952 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
3953 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
3954 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
3955 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
3956 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
3957 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
3958 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX2
].emit
= tex2_emit
;
3959 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB2
].emit
= txb2_emit
;
3960 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL2
].emit
= txl2_emit
;
3961 bld
.bld_base
.op_actions
[TGSI_OPCODE_TG4
].emit
= tg4_emit
;
3962 bld
.bld_base
.op_actions
[TGSI_OPCODE_LODQ
].emit
= lodq_emit
;
3963 /* DX10 sampling ops */
3964 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE
].emit
= sample_emit
;
3965 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_B
].emit
= sample_b_emit
;
3966 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C
].emit
= sample_c_emit
;
3967 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C_LZ
].emit
= sample_c_lz_emit
;
3968 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_D
].emit
= sample_d_emit
;
3969 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I
].emit
= sample_i_emit
;
3970 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I_MS
].emit
= sample_i_emit
;
3971 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_L
].emit
= sample_l_emit
;
3972 bld
.bld_base
.op_actions
[TGSI_OPCODE_GATHER4
].emit
= gather4_emit
;
3973 bld
.bld_base
.op_actions
[TGSI_OPCODE_SVIEWINFO
].emit
= sviewinfo_emit
;
3974 bld
.bld_base
.op_actions
[TGSI_OPCODE_LOD
].emit
= lod_emit
;
3978 /* There's no specific value for this because it should always
3979 * be set, but apps using ext_geometry_shader4 quite often
3980 * were forgetting so we're using MAX_VERTEX_VARYING from
3981 * that spec even though we could debug_assert if it's not
3982 * set, but that's a lot uglier. */
3983 uint max_output_vertices
;
3985 /* inputs are always indirect with gs */
3986 bld
.indirect_files
|= (1 << TGSI_FILE_INPUT
);
3987 bld
.gs_iface
= gs_iface
;
3988 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_gs_input
;
3989 bld
.bld_base
.op_actions
[TGSI_OPCODE_EMIT
].emit
= emit_vertex
;
3990 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDPRIM
].emit
= end_primitive
;
3992 max_output_vertices
=
3993 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
3994 if (!max_output_vertices
)
3995 max_output_vertices
= 32;
3997 bld
.max_output_vertices_vec
=
3998 lp_build_const_int_vec(gallivm
, bld
.bld_base
.int_bld
.type
,
3999 max_output_vertices
);
4002 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.int_bld
);
4004 bld
.system_values
= *system_values
;
4006 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
4009 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
4010 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
4011 debug_printf("11111111111111111111111111111 \n");
4012 tgsi_dump(tokens
, 0);
4013 lp_debug_dump_value(function
);
4014 debug_printf("2222222222222222222222222222 \n");
4018 LLVMModuleRef module
= LLVMGetGlobalParent(
4019 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
4020 LLVMDumpModule(module
);
4023 lp_exec_mask_fini(&bld
.exec_mask
);