1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_prim.h"
45 #include "tgsi/tgsi_dump.h"
46 #include "tgsi/tgsi_exec.h"
47 #include "tgsi/tgsi_info.h"
48 #include "tgsi/tgsi_parse.h"
49 #include "tgsi/tgsi_util.h"
50 #include "tgsi/tgsi_scan.h"
51 #include "tgsi/tgsi_strings.h"
52 #include "lp_bld_tgsi_action.h"
53 #include "lp_bld_type.h"
54 #include "lp_bld_const.h"
55 #include "lp_bld_arit.h"
56 #include "lp_bld_bitarit.h"
57 #include "lp_bld_gather.h"
58 #include "lp_bld_init.h"
59 #include "lp_bld_logic.h"
60 #include "lp_bld_misc.h"
61 #include "lp_bld_swizzle.h"
62 #include "lp_bld_flow.h"
63 #include "lp_bld_quad.h"
64 #include "lp_bld_tgsi.h"
65 #include "lp_bld_limits.h"
66 #include "lp_bld_debug.h"
67 #include "lp_bld_printf.h"
68 #include "lp_bld_sample.h"
69 #include "lp_bld_struct.h"
71 /* SM 4.0 says that subroutines can nest 32 deep and
72 * we need one more for our main function */
73 #define LP_MAX_NUM_FUNCS 33
75 #define DUMP_GS_EMITS 0
78 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
82 * - take execution masks in consideration
83 * - debug control-flow instructions
85 #define DEBUG_EXECUTION 0
89 * Emit code to print a register value.
92 emit_dump_reg(struct gallivm_state
*gallivm
,
100 snprintf(buf
, sizeof buf
, " %s[%u].%c = ",
101 tgsi_file_name(file
),
102 index
, "xyzw"[chan
]);
104 lp_build_print_value(gallivm
, buf
, value
);
108 * Return the context for the current function.
109 * (always 'main', if shader doesn't do any function calls)
111 static inline struct function_ctx
*
112 func_ctx(struct lp_exec_mask
*mask
)
114 assert(mask
->function_stack_size
> 0);
115 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
116 return &mask
->function_stack
[mask
->function_stack_size
- 1];
120 * Returns true if we're in a loop.
121 * It's global, meaning that it returns true even if there's
122 * no loop inside the current function, but we were inside
123 * a loop inside another function, from which this one was called.
125 static inline boolean
126 mask_has_loop(struct lp_exec_mask
*mask
)
129 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
130 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
131 if (ctx
->loop_stack_size
> 0)
138 * combine the execution mask if there is one with the current mask.
141 mask_vec(struct lp_build_tgsi_context
*bld_base
)
143 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
144 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
145 struct lp_exec_mask
*exec_mask
= &bld
->exec_mask
;
146 LLVMValueRef bld_mask
= bld
->mask
? lp_build_mask_value(bld
->mask
) : NULL
;
147 if (!exec_mask
->has_mask
) {
151 return exec_mask
->exec_mask
;
152 return LLVMBuildAnd(builder
, lp_build_mask_value(bld
->mask
),
153 exec_mask
->exec_mask
, "");
157 * Returns true if we're inside a switch statement.
158 * It's global, meaning that it returns true even if there's
159 * no switch in the current function, but we were inside
160 * a switch inside another function, from which this one was called.
162 static inline boolean
163 mask_has_switch(struct lp_exec_mask
*mask
)
166 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
167 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
168 if (ctx
->switch_stack_size
> 0)
175 * Returns true if we're inside a conditional.
176 * It's global, meaning that it returns true even if there's
177 * no conditional in the current function, but we were inside
178 * a conditional inside another function, from which this one was called.
180 static inline boolean
181 mask_has_cond(struct lp_exec_mask
*mask
)
184 for (i
= mask
->function_stack_size
- 1; i
>= 0; --i
) {
185 const struct function_ctx
*ctx
= &mask
->function_stack
[i
];
186 if (ctx
->cond_stack_size
> 0)
194 * Initialize a function context at the specified index.
197 lp_exec_mask_function_init(struct lp_exec_mask
*mask
, int function_idx
)
199 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
200 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
201 struct function_ctx
*ctx
= &mask
->function_stack
[function_idx
];
203 ctx
->cond_stack_size
= 0;
204 ctx
->loop_stack_size
= 0;
205 ctx
->switch_stack_size
= 0;
207 if (function_idx
== 0) {
208 ctx
->ret_mask
= mask
->ret_mask
;
211 ctx
->loop_limiter
= lp_build_alloca(mask
->bld
->gallivm
,
212 int_type
, "looplimiter");
215 LLVMConstInt(int_type
, LP_MAX_TGSI_LOOP_ITERATIONS
, false),
219 static void lp_exec_mask_init(struct lp_exec_mask
*mask
, struct lp_build_context
*bld
)
222 mask
->has_mask
= FALSE
;
223 mask
->ret_in_main
= FALSE
;
224 /* For the main function */
225 mask
->function_stack_size
= 1;
227 mask
->int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, mask
->bld
->type
);
228 mask
->exec_mask
= mask
->ret_mask
= mask
->break_mask
= mask
->cont_mask
=
229 mask
->cond_mask
= mask
->switch_mask
=
230 LLVMConstAllOnes(mask
->int_vec_type
);
232 mask
->function_stack
= CALLOC(LP_MAX_NUM_FUNCS
,
233 sizeof(mask
->function_stack
[0]));
234 lp_exec_mask_function_init(mask
, 0);
238 lp_exec_mask_fini(struct lp_exec_mask
*mask
)
240 FREE(mask
->function_stack
);
243 static void lp_exec_mask_update(struct lp_exec_mask
*mask
)
245 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
246 boolean has_loop_mask
= mask_has_loop(mask
);
247 boolean has_cond_mask
= mask_has_cond(mask
);
248 boolean has_switch_mask
= mask_has_switch(mask
);
249 boolean has_ret_mask
= mask
->function_stack_size
> 1 ||
253 /*for loops we need to update the entire mask at runtime */
255 assert(mask
->break_mask
);
256 tmp
= LLVMBuildAnd(builder
,
260 mask
->exec_mask
= LLVMBuildAnd(builder
,
265 mask
->exec_mask
= mask
->cond_mask
;
267 if (has_switch_mask
) {
268 mask
->exec_mask
= LLVMBuildAnd(builder
,
275 mask
->exec_mask
= LLVMBuildAnd(builder
,
281 mask
->has_mask
= (has_cond_mask
||
287 static void lp_exec_mask_cond_push(struct lp_exec_mask
*mask
,
290 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
291 struct function_ctx
*ctx
= func_ctx(mask
);
293 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
) {
294 ctx
->cond_stack_size
++;
297 if (ctx
->cond_stack_size
== 0 && mask
->function_stack_size
== 1) {
298 assert(mask
->cond_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
300 ctx
->cond_stack
[ctx
->cond_stack_size
++] = mask
->cond_mask
;
301 assert(LLVMTypeOf(val
) == mask
->int_vec_type
);
302 mask
->cond_mask
= LLVMBuildAnd(builder
,
306 lp_exec_mask_update(mask
);
309 static void lp_exec_mask_cond_invert(struct lp_exec_mask
*mask
)
311 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
312 struct function_ctx
*ctx
= func_ctx(mask
);
313 LLVMValueRef prev_mask
;
314 LLVMValueRef inv_mask
;
316 assert(ctx
->cond_stack_size
);
317 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
319 prev_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
- 1];
320 if (ctx
->cond_stack_size
== 1 && mask
->function_stack_size
== 1) {
321 assert(prev_mask
== LLVMConstAllOnes(mask
->int_vec_type
));
324 inv_mask
= LLVMBuildNot(builder
, mask
->cond_mask
, "");
326 mask
->cond_mask
= LLVMBuildAnd(builder
,
329 lp_exec_mask_update(mask
);
332 static void lp_exec_mask_cond_pop(struct lp_exec_mask
*mask
)
334 struct function_ctx
*ctx
= func_ctx(mask
);
335 assert(ctx
->cond_stack_size
);
336 --ctx
->cond_stack_size
;
337 if (ctx
->cond_stack_size
>= LP_MAX_TGSI_NESTING
)
339 mask
->cond_mask
= ctx
->cond_stack
[ctx
->cond_stack_size
];
340 lp_exec_mask_update(mask
);
343 static void lp_exec_bgnloop(struct lp_exec_mask
*mask
)
345 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
346 struct function_ctx
*ctx
= func_ctx(mask
);
348 if (ctx
->loop_stack_size
>= LP_MAX_TGSI_NESTING
) {
349 ++ctx
->loop_stack_size
;
353 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
355 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_LOOP
;
357 ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
= ctx
->loop_block
;
358 ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
= mask
->cont_mask
;
359 ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
= mask
->break_mask
;
360 ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
= ctx
->break_var
;
361 ++ctx
->loop_stack_size
;
363 ctx
->break_var
= lp_build_alloca(mask
->bld
->gallivm
, mask
->int_vec_type
, "");
364 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
366 ctx
->loop_block
= lp_build_insert_new_block(mask
->bld
->gallivm
, "bgnloop");
368 LLVMBuildBr(builder
, ctx
->loop_block
);
369 LLVMPositionBuilderAtEnd(builder
, ctx
->loop_block
);
371 mask
->break_mask
= LLVMBuildLoad(builder
, ctx
->break_var
, "");
373 lp_exec_mask_update(mask
);
376 static void lp_exec_break(struct lp_exec_mask
*mask
,
377 struct lp_build_tgsi_context
* bld_base
)
379 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
380 struct function_ctx
*ctx
= func_ctx(mask
);
382 if (ctx
->break_type
== LP_EXEC_MASK_BREAK_TYPE_LOOP
) {
383 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
387 mask
->break_mask
= LLVMBuildAnd(builder
,
389 exec_mask
, "break_full");
392 enum tgsi_opcode opcode
=
393 bld_base
->instructions
[bld_base
->pc
+ 1].Instruction
.Opcode
;
394 boolean break_always
= (opcode
== TGSI_OPCODE_ENDSWITCH
||
395 opcode
== TGSI_OPCODE_CASE
);
398 if (ctx
->switch_in_default
) {
400 * stop default execution but only if this is an unconditional switch.
401 * (The condition here is not perfect since dead code after break is
402 * allowed but should be sufficient since false negatives are just
403 * unoptimized - so we don't have to pre-evaluate that).
405 if(break_always
&& ctx
->switch_pc
) {
406 bld_base
->pc
= ctx
->switch_pc
;
412 mask
->switch_mask
= LLVMConstNull(mask
->bld
->int_vec_type
);
415 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
418 mask
->switch_mask
= LLVMBuildAnd(builder
,
420 exec_mask
, "break_switch");
424 lp_exec_mask_update(mask
);
427 static void lp_exec_continue(struct lp_exec_mask
*mask
)
429 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
430 LLVMValueRef exec_mask
= LLVMBuildNot(builder
,
434 mask
->cont_mask
= LLVMBuildAnd(builder
,
438 lp_exec_mask_update(mask
);
442 static void lp_exec_endloop(struct gallivm_state
*gallivm
,
443 struct lp_exec_mask
*mask
)
445 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
446 struct function_ctx
*ctx
= func_ctx(mask
);
447 LLVMBasicBlockRef endloop
;
448 LLVMTypeRef int_type
= LLVMInt32TypeInContext(mask
->bld
->gallivm
->context
);
449 LLVMTypeRef reg_type
= LLVMIntTypeInContext(gallivm
->context
,
450 mask
->bld
->type
.width
*
451 mask
->bld
->type
.length
);
452 LLVMValueRef i1cond
, i2cond
, icond
, limiter
;
454 assert(mask
->break_mask
);
457 assert(ctx
->loop_stack_size
);
458 if (ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
459 --ctx
->loop_stack_size
;
464 * Restore the cont_mask, but don't pop
466 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
- 1].cont_mask
;
467 lp_exec_mask_update(mask
);
470 * Unlike the continue mask, the break_mask must be preserved across loop
473 LLVMBuildStore(builder
, mask
->break_mask
, ctx
->break_var
);
475 /* Decrement the loop limiter */
476 limiter
= LLVMBuildLoad(builder
, ctx
->loop_limiter
, "");
478 limiter
= LLVMBuildSub(
481 LLVMConstInt(int_type
, 1, false),
484 LLVMBuildStore(builder
, limiter
, ctx
->loop_limiter
);
486 /* i1cond = (mask != 0) */
487 i1cond
= LLVMBuildICmp(
490 LLVMBuildBitCast(builder
, mask
->exec_mask
, reg_type
, ""),
491 LLVMConstNull(reg_type
), "i1cond");
493 /* i2cond = (looplimiter > 0) */
494 i2cond
= LLVMBuildICmp(
498 LLVMConstNull(int_type
), "i2cond");
500 /* if( i1cond && i2cond ) */
501 icond
= LLVMBuildAnd(builder
, i1cond
, i2cond
, "");
503 endloop
= lp_build_insert_new_block(mask
->bld
->gallivm
, "endloop");
505 LLVMBuildCondBr(builder
,
506 icond
, ctx
->loop_block
, endloop
);
508 LLVMPositionBuilderAtEnd(builder
, endloop
);
510 assert(ctx
->loop_stack_size
);
511 --ctx
->loop_stack_size
;
512 mask
->cont_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].cont_mask
;
513 mask
->break_mask
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_mask
;
514 ctx
->loop_block
= ctx
->loop_stack
[ctx
->loop_stack_size
].loop_block
;
515 ctx
->break_var
= ctx
->loop_stack
[ctx
->loop_stack_size
].break_var
;
516 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+
517 ctx
->switch_stack_size
];
519 lp_exec_mask_update(mask
);
522 static void lp_exec_switch(struct lp_exec_mask
*mask
,
523 LLVMValueRef switchval
)
525 struct function_ctx
*ctx
= func_ctx(mask
);
527 if (ctx
->switch_stack_size
>= LP_MAX_TGSI_NESTING
||
528 ctx
->loop_stack_size
> LP_MAX_TGSI_NESTING
) {
529 ctx
->switch_stack_size
++;
533 ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
] =
535 ctx
->break_type
= LP_EXEC_MASK_BREAK_TYPE_SWITCH
;
537 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
= mask
->switch_mask
;
538 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
= ctx
->switch_val
;
539 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
= ctx
->switch_mask_default
;
540 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
= ctx
->switch_in_default
;
541 ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
= ctx
->switch_pc
;
542 ctx
->switch_stack_size
++;
544 mask
->switch_mask
= LLVMConstNull(mask
->int_vec_type
);
545 ctx
->switch_val
= switchval
;
546 ctx
->switch_mask_default
= LLVMConstNull(mask
->int_vec_type
);
547 ctx
->switch_in_default
= false;
550 lp_exec_mask_update(mask
);
553 static void lp_exec_endswitch(struct lp_exec_mask
*mask
,
554 struct lp_build_tgsi_context
* bld_base
)
556 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
557 struct function_ctx
*ctx
= func_ctx(mask
);
559 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
560 ctx
->switch_stack_size
--;
564 /* check if there's deferred default if so do it now */
565 if (ctx
->switch_pc
&& !ctx
->switch_in_default
) {
566 LLVMValueRef prevmask
, defaultmask
;
568 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
569 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
570 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
571 ctx
->switch_in_default
= true;
573 lp_exec_mask_update(mask
);
575 assert(bld_base
->instructions
[ctx
->switch_pc
- 1].Instruction
.Opcode
==
576 TGSI_OPCODE_DEFAULT
);
578 tmp_pc
= bld_base
->pc
;
579 bld_base
->pc
= ctx
->switch_pc
;
581 * re-purpose switch_pc to point to here again, since we stop execution of
582 * the deferred default after next break.
584 ctx
->switch_pc
= tmp_pc
- 1;
589 else if (ctx
->switch_pc
&& ctx
->switch_in_default
) {
590 assert(bld_base
->pc
== ctx
->switch_pc
+ 1);
593 ctx
->switch_stack_size
--;
594 mask
->switch_mask
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask
;
595 ctx
->switch_val
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_val
;
596 ctx
->switch_mask_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_mask_default
;
597 ctx
->switch_in_default
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_in_default
;
598 ctx
->switch_pc
= ctx
->switch_stack
[ctx
->switch_stack_size
].switch_pc
;
600 ctx
->break_type
= ctx
->break_type_stack
[ctx
->loop_stack_size
+ ctx
->switch_stack_size
];
602 lp_exec_mask_update(mask
);
605 static void lp_exec_case(struct lp_exec_mask
*mask
,
606 LLVMValueRef caseval
)
608 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
609 struct function_ctx
*ctx
= func_ctx(mask
);
611 LLVMValueRef casemask
, prevmask
;
613 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
617 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
618 if (!ctx
->switch_in_default
) {
619 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
620 casemask
= lp_build_cmp(mask
->bld
, PIPE_FUNC_EQUAL
, caseval
, ctx
->switch_val
);
621 ctx
->switch_mask_default
= LLVMBuildOr(builder
, casemask
,
622 ctx
->switch_mask_default
, "sw_default_mask");
623 casemask
= LLVMBuildOr(builder
, casemask
, mask
->switch_mask
, "");
624 mask
->switch_mask
= LLVMBuildAnd(builder
, casemask
, prevmask
, "sw_mask");
626 lp_exec_mask_update(mask
);
631 * Analyse default statement in a switch.
632 * \return true if default is last statement, false otherwise
633 * \param default_pc_start contains pc of instruction to jump to
634 * if default wasn't last but there's no
635 * fallthrough into default.
637 static boolean
default_analyse_is_last(struct lp_exec_mask
*mask
,
638 struct lp_build_tgsi_context
* bld_base
,
639 int *default_pc_start
)
641 unsigned pc
= bld_base
->pc
;
642 struct function_ctx
*ctx
= func_ctx(mask
);
643 int curr_switch_stack
= ctx
->switch_stack_size
;
645 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
649 /* skip over case statements which are together with default */
650 while (bld_base
->instructions
[pc
].Instruction
.Opcode
== TGSI_OPCODE_CASE
) {
654 while (pc
!= ~0u && pc
< bld_base
->num_instructions
) {
655 enum tgsi_opcode opcode
= bld_base
->instructions
[pc
].Instruction
.Opcode
;
657 case TGSI_OPCODE_CASE
:
658 if (curr_switch_stack
== ctx
->switch_stack_size
) {
659 *default_pc_start
= pc
- 1;
663 case TGSI_OPCODE_SWITCH
:
666 case TGSI_OPCODE_ENDSWITCH
:
667 if (curr_switch_stack
== ctx
->switch_stack_size
) {
668 *default_pc_start
= pc
- 1;
678 /* should never arrive here */
683 static void lp_exec_default(struct lp_exec_mask
*mask
,
684 struct lp_build_tgsi_context
* bld_base
)
686 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
687 struct function_ctx
*ctx
= func_ctx(mask
);
690 boolean default_is_last
;
692 if (ctx
->switch_stack_size
> LP_MAX_TGSI_NESTING
) {
697 * This is a messy opcode, because it may not be always at the end and
698 * there can be fallthrough in and out of it.
701 default_is_last
= default_analyse_is_last(mask
, bld_base
, &default_exec_pc
);
703 * If it is last statement in switch (note that case statements appearing
704 * "at the same time" as default don't change that) everything is just fine,
705 * update switch mask and go on. This means we can handle default with
706 * fallthrough INTO it without overhead, if it is last.
708 if (default_is_last
) {
709 LLVMValueRef prevmask
, defaultmask
;
710 prevmask
= ctx
->switch_stack
[ctx
->switch_stack_size
- 1].switch_mask
;
711 defaultmask
= LLVMBuildNot(builder
, ctx
->switch_mask_default
, "sw_default_mask");
712 defaultmask
= LLVMBuildOr(builder
, defaultmask
, mask
->switch_mask
, "");
713 mask
->switch_mask
= LLVMBuildAnd(builder
, prevmask
, defaultmask
, "sw_mask");
714 ctx
->switch_in_default
= true;
716 lp_exec_mask_update(mask
);
720 * Technically, "case" immediately before default isn't really a
721 * fallthrough, however we still have to count them as such as we
722 * already have updated the masks.
723 * If that happens in practice could add a switch optimizer pass
724 * which just gets rid of all case statements appearing together with
725 * default (or could do switch analysis at switch start time instead).
727 enum tgsi_opcode opcode
=
728 bld_base
->instructions
[bld_base
->pc
- 1].Instruction
.Opcode
;
729 boolean ft_into
= (opcode
!= TGSI_OPCODE_BRK
&&
730 opcode
!= TGSI_OPCODE_SWITCH
);
732 * If it is not last statement and there was no fallthrough into it,
733 * we record the PC and continue execution at next case (again, those
734 * case encountered at the same time don't count). At endswitch
735 * time, we update switchmask, and go back executing the code we skipped
736 * until the next break (possibly re-executing some code with changed mask
737 * if there was a fallthrough out of default).
738 * Finally, if it is not last statement and there was a fallthrough into it,
739 * do the same as with the former case, except instead of skipping the code
740 * just execute it without updating the mask, then go back and re-execute.
742 ctx
->switch_pc
= bld_base
->pc
;
744 bld_base
->pc
= default_exec_pc
;
750 /* stores val into an address pointed to by dst_ptr.
751 * mask->exec_mask is used to figure out which bits of val
752 * should be stored into the address
753 * (0 means don't store this bit, 1 means do store).
755 static void lp_exec_mask_store(struct lp_exec_mask
*mask
,
756 struct lp_build_context
*bld_store
,
758 LLVMValueRef dst_ptr
)
760 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
761 LLVMValueRef exec_mask
= mask
->has_mask
? mask
->exec_mask
: NULL
;
763 assert(lp_check_value(bld_store
->type
, val
));
764 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr
)) == LLVMPointerTypeKind
);
765 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr
)) == LLVMTypeOf(val
) ||
766 LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(dst_ptr
))) == LLVMArrayTypeKind
);
769 LLVMValueRef res
, dst
;
771 dst
= LLVMBuildLoad(builder
, dst_ptr
, "");
772 res
= lp_build_select(bld_store
, exec_mask
, val
, dst
);
773 LLVMBuildStore(builder
, res
, dst_ptr
);
775 LLVMBuildStore(builder
, val
, dst_ptr
);
778 static void lp_exec_mask_call(struct lp_exec_mask
*mask
,
782 if (mask
->function_stack_size
>= LP_MAX_NUM_FUNCS
) {
786 lp_exec_mask_function_init(mask
, mask
->function_stack_size
);
787 mask
->function_stack
[mask
->function_stack_size
].pc
= *pc
;
788 mask
->function_stack
[mask
->function_stack_size
].ret_mask
= mask
->ret_mask
;
789 mask
->function_stack_size
++;
793 static void lp_exec_mask_ret(struct lp_exec_mask
*mask
, int *pc
)
795 LLVMBuilderRef builder
= mask
->bld
->gallivm
->builder
;
796 struct function_ctx
*ctx
= func_ctx(mask
);
797 LLVMValueRef exec_mask
;
799 if (ctx
->cond_stack_size
== 0 &&
800 ctx
->loop_stack_size
== 0 &&
801 ctx
->switch_stack_size
== 0 &&
802 mask
->function_stack_size
== 1) {
803 /* returning from main() */
808 if (mask
->function_stack_size
== 1) {
810 * This requires special handling since we need to ensure
811 * we don't drop the mask even if we have no call stack
812 * (e.g. after a ret in a if clause after the endif)
814 mask
->ret_in_main
= TRUE
;
817 exec_mask
= LLVMBuildNot(builder
,
821 mask
->ret_mask
= LLVMBuildAnd(builder
,
823 exec_mask
, "ret_full");
825 lp_exec_mask_update(mask
);
828 static void lp_exec_mask_bgnsub(struct lp_exec_mask
*mask
)
832 static void lp_exec_mask_endsub(struct lp_exec_mask
*mask
, int *pc
)
834 struct function_ctx
*ctx
;
836 assert(mask
->function_stack_size
> 1);
837 assert(mask
->function_stack_size
<= LP_MAX_NUM_FUNCS
);
839 ctx
= func_ctx(mask
);
840 mask
->function_stack_size
--;
843 mask
->ret_mask
= ctx
->ret_mask
;
845 lp_exec_mask_update(mask
);
850 get_file_ptr(struct lp_build_tgsi_soa_context
*bld
,
855 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
856 LLVMValueRef (*array_of_vars
)[TGSI_NUM_CHANNELS
];
857 LLVMValueRef var_of_array
;
860 case TGSI_FILE_TEMPORARY
:
861 array_of_vars
= bld
->temps
;
862 var_of_array
= bld
->temps_array
;
864 case TGSI_FILE_OUTPUT
:
865 array_of_vars
= bld
->outputs
;
866 var_of_array
= bld
->outputs_array
;
875 if (bld
->indirect_files
& (1 << file
)) {
876 LLVMValueRef lindex
= lp_build_const_int32(bld
->bld_base
.base
.gallivm
, index
* 4 + chan
);
877 if (LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(var_of_array
))) == LLVMArrayTypeKind
) {
879 gep
[0] = lp_build_const_int32(bld
->bld_base
.base
.gallivm
, 0);
881 return LLVMBuildGEP(builder
, var_of_array
, gep
, 2, "");
883 return LLVMBuildGEP(builder
, var_of_array
, &lindex
, 1, "");
887 assert(index
<= bld
->bld_base
.info
->file_max
[file
]);
888 return array_of_vars
[index
][chan
];
894 * Return pointer to a temporary register channel (src or dest).
895 * Note that indirect addressing cannot be handled here.
896 * \param index which temporary register
897 * \param chan which channel of the temp register.
900 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context
*bld
,
904 return get_file_ptr(bld
, TGSI_FILE_TEMPORARY
, index
, chan
);
908 * Return pointer to a output register channel (src or dest).
909 * Note that indirect addressing cannot be handled here.
910 * \param index which output register
911 * \param chan which channel of the output register.
914 lp_get_output_ptr(struct lp_build_tgsi_soa_context
*bld
,
918 return get_file_ptr(bld
, TGSI_FILE_OUTPUT
, index
, chan
);
922 * If we have indirect addressing in outputs copy our alloca array
923 * to the outputs slots specified by the caller to make sure
924 * our outputs are delivered consistently via the same interface.
927 gather_outputs(struct lp_build_tgsi_soa_context
* bld
)
929 if ((bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
930 unsigned index
, chan
;
931 assert(bld
->bld_base
.info
->num_outputs
<=
932 bld
->bld_base
.info
->file_max
[TGSI_FILE_OUTPUT
] + 1);
933 for (index
= 0; index
< bld
->bld_base
.info
->num_outputs
; ++index
) {
934 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
935 bld
->outputs
[index
][chan
] = lp_get_output_ptr(bld
, index
, chan
);
943 * XXX the lp_build_gather() function should be capable of doing this
944 * with a little work.
947 build_gather(struct lp_build_tgsi_context
*bld_base
,
948 LLVMValueRef base_ptr
,
949 LLVMValueRef indexes
,
950 LLVMValueRef overflow_mask
,
951 LLVMValueRef indexes2
)
953 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
954 LLVMBuilderRef builder
= gallivm
->builder
;
955 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
956 struct lp_build_context
*bld
= &bld_base
->base
;
961 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
965 * overflow_mask is a vector telling us which channels
966 * in the vector overflowed. We use the overflow behavior for
967 * constant buffers which is defined as:
968 * Out of bounds access to constant buffer returns 0 in all
969 * components. Out of bounds behavior is always with respect
970 * to the size of the buffer bound at that slot.
975 * We avoid per-element control flow here (also due to llvm going crazy,
976 * though I suspect it's better anyway since overflow is likely rare).
977 * Note that since we still fetch from buffers even if num_elements was
978 * zero (in this case we'll fetch from index zero) the jit func callers
979 * MUST provide valid fake constant buffers of size 4x32 (the values do
980 * not matter), otherwise we'd still need (not per element though)
983 indexes
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes
);
985 indexes2
= lp_build_select(uint_bld
, overflow_mask
, uint_bld
->zero
, indexes2
);
989 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
991 for (i
= 0; i
< bld
->type
.length
* (indexes2
? 2 : 1); i
++) {
994 LLVMValueRef scalar_ptr
, scalar
;
996 di
= lp_build_const_int32(bld
->gallivm
, i
);
998 si
= lp_build_const_int32(bld
->gallivm
, i
>> 1);
1002 if (indexes2
&& (i
& 1)) {
1003 index
= LLVMBuildExtractElement(builder
,
1006 index
= LLVMBuildExtractElement(builder
,
1009 scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
,
1010 &index
, 1, "gather_ptr");
1011 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1013 res
= LLVMBuildInsertElement(builder
, res
, scalar
, di
, "");
1016 if (overflow_mask
) {
1018 res
= LLVMBuildBitCast(builder
, res
, bld_base
->dbl_bld
.vec_type
, "");
1019 overflow_mask
= LLVMBuildSExt(builder
, overflow_mask
,
1020 bld_base
->dbl_bld
.int_vec_type
, "");
1021 res
= lp_build_select(&bld_base
->dbl_bld
, overflow_mask
,
1022 bld_base
->dbl_bld
.zero
, res
);
1024 res
= lp_build_select(bld
, overflow_mask
, bld
->zero
, res
);
1032 * Scatter/store vector.
1035 emit_mask_scatter(struct lp_build_tgsi_soa_context
*bld
,
1036 LLVMValueRef base_ptr
,
1037 LLVMValueRef indexes
,
1038 LLVMValueRef values
,
1039 struct lp_exec_mask
*mask
)
1041 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1042 LLVMBuilderRef builder
= gallivm
->builder
;
1044 LLVMValueRef pred
= mask
->has_mask
? mask
->exec_mask
: NULL
;
1047 * Loop over elements of index_vec, store scalar value.
1049 for (i
= 0; i
< bld
->bld_base
.base
.type
.length
; i
++) {
1050 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1051 LLVMValueRef index
= LLVMBuildExtractElement(builder
, indexes
, ii
, "");
1052 LLVMValueRef scalar_ptr
= LLVMBuildGEP(builder
, base_ptr
, &index
, 1, "scatter_ptr");
1053 LLVMValueRef val
= LLVMBuildExtractElement(builder
, values
, ii
, "scatter_val");
1054 LLVMValueRef scalar_pred
= pred
?
1055 LLVMBuildExtractElement(builder
, pred
, ii
, "scatter_pred") : NULL
;
1058 lp_build_printf(gallivm
, "scatter %d: val %f at %d %p\n",
1059 ii
, val
, index
, scalar_ptr
);
1062 LLVMValueRef real_val
, dst_val
;
1063 dst_val
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1064 real_val
= lp_build_select(&bld
->elem_bld
, scalar_pred
, val
, dst_val
);
1065 LLVMBuildStore(builder
, real_val
, scalar_ptr
);
1068 LLVMBuildStore(builder
, val
, scalar_ptr
);
1075 * Read the current value of the ADDR register, convert the floats to
1076 * ints, add the base index and return the vector of offsets.
1077 * The offsets will be used to index into the constant buffer or
1078 * temporary register file.
1081 get_indirect_index(struct lp_build_tgsi_soa_context
*bld
,
1082 unsigned reg_file
, unsigned reg_index
,
1083 const struct tgsi_ind_register
*indirect_reg
,
1086 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
1087 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
1088 /* always use X component of address register */
1089 unsigned swizzle
= indirect_reg
->Swizzle
;
1092 LLVMValueRef max_index
;
1095 assert(bld
->indirect_files
& (1 << reg_file
));
1097 base
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
, uint_bld
->type
, reg_index
);
1099 assert(swizzle
< 4);
1100 switch (indirect_reg
->File
) {
1101 case TGSI_FILE_ADDRESS
:
1102 rel
= LLVMBuildLoad(builder
,
1103 bld
->addr
[indirect_reg
->Index
][swizzle
],
1105 /* ADDR LLVM values already have LLVM integer type. */
1107 case TGSI_FILE_TEMPORARY
:
1108 rel
= lp_get_temp_ptr_soa(bld
, indirect_reg
->Index
, swizzle
);
1109 rel
= LLVMBuildLoad(builder
, rel
, "load temp reg");
1110 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1111 * value actually stored is expected to be an integer */
1112 rel
= LLVMBuildBitCast(builder
, rel
, uint_bld
->vec_type
, "");
1116 rel
= uint_bld
->zero
;
1119 index
= lp_build_add(uint_bld
, base
, rel
);
1122 * emit_fetch_constant handles constant buffer overflow so this code
1123 * is pointless for them.
1124 * Furthermore the D3D10 spec in section 6.5 says:
1125 * If the constant buffer bound to a slot is larger than the size
1126 * declared in the shader for that slot, implementations are allowed
1127 * to return incorrect data (not necessarily 0) for indices that are
1128 * larger than the declared size but smaller than the buffer size.
1130 if (reg_file
!= TGSI_FILE_CONSTANT
) {
1131 assert(index_limit
>= 0);
1132 max_index
= lp_build_const_int_vec(bld
->bld_base
.base
.gallivm
,
1133 uint_bld
->type
, index_limit
);
1135 assert(!uint_bld
->type
.sign
);
1136 index
= lp_build_min(uint_bld
, index
, max_index
);
1142 static struct lp_build_context
*
1143 stype_to_fetch(struct lp_build_tgsi_context
* bld_base
,
1144 enum tgsi_opcode_type stype
)
1146 struct lp_build_context
*bld_fetch
;
1149 case TGSI_TYPE_FLOAT
:
1150 case TGSI_TYPE_UNTYPED
:
1151 bld_fetch
= &bld_base
->base
;
1153 case TGSI_TYPE_UNSIGNED
:
1154 bld_fetch
= &bld_base
->uint_bld
;
1156 case TGSI_TYPE_SIGNED
:
1157 bld_fetch
= &bld_base
->int_bld
;
1159 case TGSI_TYPE_DOUBLE
:
1160 bld_fetch
= &bld_base
->dbl_bld
;
1162 case TGSI_TYPE_UNSIGNED64
:
1163 bld_fetch
= &bld_base
->uint64_bld
;
1165 case TGSI_TYPE_SIGNED64
:
1166 bld_fetch
= &bld_base
->int64_bld
;
1168 case TGSI_TYPE_VOID
:
1178 get_soa_array_offsets(struct lp_build_context
*uint_bld
,
1179 LLVMValueRef indirect_index
,
1180 unsigned chan_index
,
1181 boolean need_perelement_offset
)
1183 struct gallivm_state
*gallivm
= uint_bld
->gallivm
;
1184 LLVMValueRef chan_vec
=
1185 lp_build_const_int_vec(uint_bld
->gallivm
, uint_bld
->type
, chan_index
);
1186 LLVMValueRef length_vec
=
1187 lp_build_const_int_vec(gallivm
, uint_bld
->type
, uint_bld
->type
.length
);
1188 LLVMValueRef index_vec
;
1190 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1191 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1192 index_vec
= lp_build_add(uint_bld
, index_vec
, chan_vec
);
1193 index_vec
= lp_build_mul(uint_bld
, index_vec
, length_vec
);
1195 if (need_perelement_offset
) {
1196 LLVMValueRef pixel_offsets
;
1198 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1199 pixel_offsets
= uint_bld
->undef
;
1200 for (i
= 0; i
< uint_bld
->type
.length
; i
++) {
1201 LLVMValueRef ii
= lp_build_const_int32(gallivm
, i
);
1202 pixel_offsets
= LLVMBuildInsertElement(gallivm
->builder
, pixel_offsets
,
1205 index_vec
= lp_build_add(uint_bld
, index_vec
, pixel_offsets
);
1211 emit_fetch_constant(
1212 struct lp_build_tgsi_context
* bld_base
,
1213 const struct tgsi_full_src_register
* reg
,
1214 enum tgsi_opcode_type stype
,
1215 unsigned swizzle_in
)
1217 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1218 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1219 LLVMBuilderRef builder
= gallivm
->builder
;
1220 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
1221 unsigned dimension
= 0;
1222 LLVMValueRef consts_ptr
;
1223 LLVMValueRef num_consts
;
1225 unsigned swizzle
= swizzle_in
& 0xffff;
1227 /* XXX: Handle fetching xyzw components as a vector */
1228 assert(swizzle
!= ~0u);
1230 if (reg
->Register
.Dimension
) {
1231 assert(!reg
->Dimension
.Indirect
);
1232 dimension
= reg
->Dimension
.Index
;
1233 assert(dimension
< LP_MAX_TGSI_CONST_BUFFERS
);
1236 consts_ptr
= bld
->consts
[dimension
];
1237 num_consts
= bld
->consts_sizes
[dimension
];
1239 if (reg
->Register
.Indirect
) {
1240 LLVMValueRef indirect_index
;
1241 LLVMValueRef swizzle_vec
=
1242 lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle
);
1243 LLVMValueRef index_vec
; /* index into the const buffer */
1244 LLVMValueRef overflow_mask
;
1245 LLVMValueRef index_vec2
= NULL
;
1247 indirect_index
= get_indirect_index(bld
,
1249 reg
->Register
.Index
,
1251 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1253 /* All fetches are from the same constant buffer, so
1254 * we need to propagate the size to a vector to do a
1255 * vector comparison */
1256 num_consts
= lp_build_broadcast_scalar(uint_bld
, num_consts
);
1257 /* Construct a boolean vector telling us which channels
1258 * overflow the bound constant buffer */
1259 overflow_mask
= lp_build_compare(gallivm
, uint_bld
->type
, PIPE_FUNC_GEQUAL
,
1260 indirect_index
, num_consts
);
1262 /* index_vec = indirect_index * 4 + swizzle */
1263 index_vec
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1264 index_vec
= lp_build_add(uint_bld
, index_vec
, swizzle_vec
);
1266 if (tgsi_type_is_64bit(stype
)) {
1267 LLVMValueRef swizzle_vec2
;
1268 swizzle_vec2
= lp_build_const_int_vec(gallivm
, uint_bld
->type
, swizzle_in
>> 16);
1269 index_vec2
= lp_build_shl_imm(uint_bld
, indirect_index
, 2);
1270 index_vec2
= lp_build_add(uint_bld
, index_vec2
, swizzle_vec2
);
1272 /* Gather values from the constant buffer */
1273 res
= build_gather(bld_base
, consts_ptr
, index_vec
, overflow_mask
, index_vec2
);
1276 LLVMValueRef index
; /* index into the const buffer */
1277 LLVMValueRef scalar
, scalar_ptr
;
1278 struct lp_build_context
*bld_broad
= &bld_base
->base
;
1279 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1281 scalar_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1284 if (tgsi_type_is_64bit(stype
) && ((swizzle_in
>> 16) != swizzle
+ 1)) {
1286 LLVMValueRef scalar2
, scalar2_ptr
;
1287 LLVMValueRef shuffles
[2];
1288 index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1290 scalar2_ptr
= LLVMBuildGEP(builder
, consts_ptr
,
1293 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1294 scalar2
= LLVMBuildLoad(builder
, scalar2_ptr
, "");
1295 shuffles
[0] = lp_build_const_int32(gallivm
, 0);
1296 shuffles
[1] = lp_build_const_int32(gallivm
, 1);
1298 res
= LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2));
1299 res
= LLVMBuildInsertElement(builder
, res
, scalar
, shuffles
[0], "");
1300 res
= LLVMBuildInsertElement(builder
, res
, scalar2
, shuffles
[1], "");
1302 if (stype
== TGSI_TYPE_DOUBLE
) {
1303 LLVMTypeRef dptr_type
= LLVMPointerType(LLVMDoubleTypeInContext(gallivm
->context
), 0);
1304 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, dptr_type
, "");
1305 bld_broad
= &bld_base
->dbl_bld
;
1306 } else if (stype
== TGSI_TYPE_UNSIGNED64
) {
1307 LLVMTypeRef u64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1308 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, u64ptr_type
, "");
1309 bld_broad
= &bld_base
->uint64_bld
;
1310 } else if (stype
== TGSI_TYPE_SIGNED64
) {
1311 LLVMTypeRef i64ptr_type
= LLVMPointerType(LLVMInt64TypeInContext(gallivm
->context
), 0);
1312 scalar_ptr
= LLVMBuildBitCast(builder
, scalar_ptr
, i64ptr_type
, "");
1313 bld_broad
= &bld_base
->int64_bld
;
1315 scalar
= LLVMBuildLoad(builder
, scalar_ptr
, "");
1316 res
= lp_build_broadcast_scalar(bld_broad
, scalar
);
1321 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| stype
== TGSI_TYPE_DOUBLE
|| stype
== TGSI_TYPE_SIGNED64
|| stype
== TGSI_TYPE_UNSIGNED64
) {
1322 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1323 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1330 * Fetch 64-bit values from two separate channels.
1331 * 64-bit values are stored split across two channels, like xy and zw.
1332 * This function creates a set of vec_length*2 floats,
1333 * extracts the values from the two channels,
1334 * puts them in the correct place, then casts to vec_length 64-bits.
1338 struct lp_build_tgsi_context
* bld_base
,
1339 enum tgsi_opcode_type stype
,
1341 LLVMValueRef input2
)
1343 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1344 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1345 LLVMBuilderRef builder
= gallivm
->builder
;
1347 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1349 LLVMValueRef shuffles
[2 * (LP_MAX_VECTOR_WIDTH
/32)];
1350 int len
= bld_base
->base
.type
.length
* 2;
1351 assert(len
<= (2 * (LP_MAX_VECTOR_WIDTH
/32)));
1353 for (i
= 0; i
< bld_base
->base
.type
.length
* 2; i
+=2) {
1354 shuffles
[i
] = lp_build_const_int32(gallivm
, i
/ 2);
1355 shuffles
[i
+ 1] = lp_build_const_int32(gallivm
, i
/ 2 + bld_base
->base
.type
.length
);
1357 res
= LLVMBuildShuffleVector(builder
, input
, input2
, LLVMConstVector(shuffles
, len
), "");
1359 return LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1363 emit_fetch_immediate(
1364 struct lp_build_tgsi_context
* bld_base
,
1365 const struct tgsi_full_src_register
* reg
,
1366 enum tgsi_opcode_type stype
,
1367 unsigned swizzle_in
)
1369 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1370 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1371 LLVMBuilderRef builder
= gallivm
->builder
;
1372 LLVMValueRef res
= NULL
;
1373 unsigned swizzle
= swizzle_in
& 0xffff;
1375 if (bld
->use_immediates_array
|| reg
->Register
.Indirect
) {
1376 LLVMValueRef imms_array
;
1377 LLVMTypeRef fptr_type
;
1379 /* cast imms_array pointer to float* */
1380 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1381 imms_array
= LLVMBuildBitCast(builder
, bld
->imms_array
, fptr_type
, "");
1383 if (reg
->Register
.Indirect
) {
1384 LLVMValueRef indirect_index
;
1385 LLVMValueRef index_vec
; /* index into the immediate register array */
1386 LLVMValueRef index_vec2
= NULL
;
1387 indirect_index
= get_indirect_index(bld
,
1389 reg
->Register
.Index
,
1391 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1393 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1394 * immediates are stored as full vectors (FIXME??? - might be better
1395 * to store them the same as constants) but all elements are the same
1398 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1402 if (tgsi_type_is_64bit(stype
))
1403 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1407 /* Gather values from the immediate register array */
1408 res
= build_gather(bld_base
, imms_array
, index_vec
, NULL
, index_vec2
);
1410 LLVMValueRef gep
[2];
1411 gep
[0] = lp_build_const_int32(gallivm
, 0);
1412 gep
[1] = lp_build_const_int32(gallivm
, reg
->Register
.Index
* 4 + swizzle
);
1413 LLVMValueRef imms_ptr
= LLVMBuildGEP(builder
,
1414 bld
->imms_array
, gep
, 2, "");
1415 res
= LLVMBuildLoad(builder
, imms_ptr
, "");
1417 if (tgsi_type_is_64bit(stype
)) {
1418 LLVMValueRef imms_ptr2
;
1420 gep
[1] = lp_build_const_int32(gallivm
,
1421 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1422 imms_ptr2
= LLVMBuildGEP(builder
,
1423 bld
->imms_array
, gep
, 2, "");
1424 res2
= LLVMBuildLoad(builder
, imms_ptr2
, "");
1425 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1430 res
= bld
->immediates
[reg
->Register
.Index
][swizzle
];
1431 if (tgsi_type_is_64bit(stype
))
1432 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->immediates
[reg
->Register
.Index
][swizzle_in
>> 16]);
1435 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1436 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1437 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1444 struct lp_build_tgsi_context
* bld_base
,
1445 const struct tgsi_full_src_register
* reg
,
1446 enum tgsi_opcode_type stype
,
1447 unsigned swizzle_in
)
1449 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1450 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1451 LLVMBuilderRef builder
= gallivm
->builder
;
1453 unsigned swizzle
= swizzle_in
& 0xffff;
1455 if (reg
->Register
.Indirect
) {
1456 LLVMValueRef indirect_index
;
1457 LLVMValueRef index_vec
; /* index into the input reg array */
1458 LLVMValueRef index_vec2
= NULL
;
1459 LLVMValueRef inputs_array
;
1460 LLVMTypeRef fptr_type
;
1462 indirect_index
= get_indirect_index(bld
,
1464 reg
->Register
.Index
,
1466 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1468 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1472 if (tgsi_type_is_64bit(stype
)) {
1473 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1478 /* cast inputs_array pointer to float* */
1479 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1480 inputs_array
= LLVMBuildBitCast(builder
, bld
->inputs_array
, fptr_type
, "");
1482 /* Gather values from the input register array */
1483 res
= build_gather(bld_base
, inputs_array
, index_vec
, NULL
, index_vec2
);
1485 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
)) {
1486 LLVMValueRef lindex
= lp_build_const_int32(gallivm
,
1487 reg
->Register
.Index
* 4 + swizzle
);
1488 LLVMValueRef input_ptr
= LLVMBuildGEP(builder
,
1489 bld
->inputs_array
, &lindex
, 1, "");
1491 res
= LLVMBuildLoad(builder
, input_ptr
, "");
1492 if (tgsi_type_is_64bit(stype
)) {
1493 LLVMValueRef lindex1
;
1494 LLVMValueRef input_ptr2
;
1497 lindex1
= lp_build_const_int32(gallivm
,
1498 reg
->Register
.Index
* 4 + (swizzle_in
>> 16));
1499 input_ptr2
= LLVMBuildGEP(builder
,
1500 bld
->inputs_array
, &lindex1
, 1, "");
1501 res2
= LLVMBuildLoad(builder
, input_ptr2
, "");
1502 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1506 res
= bld
->inputs
[reg
->Register
.Index
][swizzle
];
1507 if (tgsi_type_is_64bit(stype
))
1508 res
= emit_fetch_64bit(bld_base
, stype
, res
, bld
->inputs
[reg
->Register
.Index
][swizzle_in
>> 16]);
1514 if (stype
== TGSI_TYPE_SIGNED
|| stype
== TGSI_TYPE_UNSIGNED
|| tgsi_type_is_64bit(stype
)) {
1515 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1516 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1524 emit_fetch_gs_input(
1525 struct lp_build_tgsi_context
* bld_base
,
1526 const struct tgsi_full_src_register
* reg
,
1527 enum tgsi_opcode_type stype
,
1528 unsigned swizzle_in
)
1530 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1531 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1532 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1533 LLVMBuilderRef builder
= gallivm
->builder
;
1534 LLVMValueRef attrib_index
= NULL
;
1535 LLVMValueRef vertex_index
= NULL
;
1536 unsigned swizzle
= swizzle_in
& 0xffff;
1537 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle
);
1540 if (info
->input_semantic_name
[reg
->Register
.Index
] == TGSI_SEMANTIC_PRIMID
) {
1541 /* This is really a system value not a regular input */
1542 assert(!reg
->Register
.Indirect
);
1543 assert(!reg
->Dimension
.Indirect
);
1544 res
= bld
->system_values
.prim_id
;
1545 if (stype
!= TGSI_TYPE_UNSIGNED
&& stype
!= TGSI_TYPE_SIGNED
) {
1546 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1551 if (reg
->Register
.Indirect
) {
1553 * XXX: this is possibly not quite the right value, since file_max may be
1554 * larger than the max attrib index, due to it being the max of declared
1555 * inputs AND the max vertices per prim (which is 6 for tri adj).
1556 * It should however be safe to use (since we always allocate
1557 * PIPE_MAX_SHADER_INPUTS (80) for it, which is overallocated quite a bit).
1559 int index_limit
= info
->file_max
[reg
->Register
.File
];
1560 attrib_index
= get_indirect_index(bld
,
1562 reg
->Register
.Index
,
1566 attrib_index
= lp_build_const_int32(gallivm
, reg
->Register
.Index
);
1569 if (reg
->Dimension
.Indirect
) {
1571 * A fixed 6 should do as well (which is what we allocate).
1573 int index_limit
= u_vertices_per_prim(info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
]);
1574 vertex_index
= get_indirect_index(bld
,
1576 reg
->Dimension
.Index
,
1580 vertex_index
= lp_build_const_int32(gallivm
, reg
->Dimension
.Index
);
1583 res
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1584 reg
->Dimension
.Indirect
,
1586 reg
->Register
.Indirect
,
1591 if (tgsi_type_is_64bit(stype
)) {
1592 LLVMValueRef swizzle_index
= lp_build_const_int32(gallivm
, swizzle_in
>> 16);
1594 res2
= bld
->gs_iface
->fetch_input(bld
->gs_iface
, bld_base
,
1595 reg
->Dimension
.Indirect
,
1597 reg
->Register
.Indirect
,
1601 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1602 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1603 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1604 } else if (stype
== TGSI_TYPE_SIGNED
) {
1605 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1612 emit_fetch_temporary(
1613 struct lp_build_tgsi_context
* bld_base
,
1614 const struct tgsi_full_src_register
* reg
,
1615 enum tgsi_opcode_type stype
,
1616 unsigned swizzle_in
)
1618 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1619 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1620 LLVMBuilderRef builder
= gallivm
->builder
;
1622 unsigned swizzle
= swizzle_in
& 0xffff;
1624 if (reg
->Register
.Indirect
) {
1625 LLVMValueRef indirect_index
;
1626 LLVMValueRef index_vec
, index_vec2
= NULL
; /* index into the temp reg array */
1627 LLVMValueRef temps_array
;
1628 LLVMTypeRef fptr_type
;
1630 indirect_index
= get_indirect_index(bld
,
1632 reg
->Register
.Index
,
1634 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1636 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1640 if (tgsi_type_is_64bit(stype
)) {
1641 index_vec2
= get_soa_array_offsets(&bld_base
->uint_bld
,
1647 /* cast temps_array pointer to float* */
1648 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1649 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1651 /* Gather values from the temporary register array */
1652 res
= build_gather(bld_base
, temps_array
, index_vec
, NULL
, index_vec2
);
1655 LLVMValueRef temp_ptr
;
1656 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle
);
1657 res
= LLVMBuildLoad(builder
, temp_ptr
, "");
1659 if (tgsi_type_is_64bit(stype
)) {
1660 LLVMValueRef temp_ptr2
, res2
;
1662 temp_ptr2
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, swizzle_in
>> 16);
1663 res2
= LLVMBuildLoad(builder
, temp_ptr2
, "");
1664 res
= emit_fetch_64bit(bld_base
, stype
, res
, res2
);
1668 if (stype
== TGSI_TYPE_SIGNED
||
1669 stype
== TGSI_TYPE_UNSIGNED
||
1670 stype
== TGSI_TYPE_DOUBLE
||
1671 stype
== TGSI_TYPE_SIGNED64
||
1672 stype
== TGSI_TYPE_UNSIGNED64
) {
1673 struct lp_build_context
*bld_fetch
= stype_to_fetch(bld_base
, stype
);
1674 res
= LLVMBuildBitCast(builder
, res
, bld_fetch
->vec_type
, "");
1681 emit_fetch_system_value(
1682 struct lp_build_tgsi_context
* bld_base
,
1683 const struct tgsi_full_src_register
* reg
,
1684 enum tgsi_opcode_type stype
,
1685 unsigned swizzle_in
)
1687 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1688 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
1689 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
1690 LLVMBuilderRef builder
= gallivm
->builder
;
1692 enum tgsi_opcode_type atype
; // Actual type of the value
1694 assert(!reg
->Register
.Indirect
);
1696 switch (info
->system_value_semantic_name
[reg
->Register
.Index
]) {
1697 case TGSI_SEMANTIC_INSTANCEID
:
1698 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.instance_id
);
1699 atype
= TGSI_TYPE_UNSIGNED
;
1702 case TGSI_SEMANTIC_VERTEXID
:
1703 res
= bld
->system_values
.vertex_id
;
1704 atype
= TGSI_TYPE_UNSIGNED
;
1707 case TGSI_SEMANTIC_VERTEXID_NOBASE
:
1708 res
= bld
->system_values
.vertex_id_nobase
;
1709 atype
= TGSI_TYPE_UNSIGNED
;
1712 case TGSI_SEMANTIC_BASEVERTEX
:
1713 res
= bld
->system_values
.basevertex
;
1714 atype
= TGSI_TYPE_UNSIGNED
;
1717 case TGSI_SEMANTIC_PRIMID
:
1718 res
= bld
->system_values
.prim_id
;
1719 atype
= TGSI_TYPE_UNSIGNED
;
1722 case TGSI_SEMANTIC_INVOCATIONID
:
1723 res
= lp_build_broadcast_scalar(&bld_base
->uint_bld
, bld
->system_values
.invocation_id
);
1724 atype
= TGSI_TYPE_UNSIGNED
;
1727 case TGSI_SEMANTIC_HELPER_INVOCATION
:
1728 res
= LLVMBuildNot(gallivm
->builder
, lp_build_mask_value(bld
->mask
), "");
1729 atype
= TGSI_TYPE_UNSIGNED
;
1733 assert(!"unexpected semantic in emit_fetch_system_value");
1734 res
= bld_base
->base
.zero
;
1735 atype
= TGSI_TYPE_FLOAT
;
1739 if (atype
!= stype
) {
1740 if (stype
== TGSI_TYPE_FLOAT
) {
1741 res
= LLVMBuildBitCast(builder
, res
, bld_base
->base
.vec_type
, "");
1742 } else if (stype
== TGSI_TYPE_UNSIGNED
) {
1743 res
= LLVMBuildBitCast(builder
, res
, bld_base
->uint_bld
.vec_type
, "");
1744 } else if (stype
== TGSI_TYPE_SIGNED
) {
1745 res
= LLVMBuildBitCast(builder
, res
, bld_base
->int_bld
.vec_type
, "");
1753 * Register fetch with derivatives.
1757 struct lp_build_tgsi_soa_context
*bld
,
1766 /* TODO: use interpolation coeffs for inputs */
1769 *ddx
= lp_build_ddx(&bld
->bld_base
.base
, src
);
1772 *ddy
= lp_build_ddy(&bld
->bld_base
.base
, src
);
1776 * store an array of vec-length 64-bit into two arrays of vec_length floats
1778 * value is d0, d1, d2, d3 etc.
1779 * each 64-bit has high and low pieces x, y
1780 * so gets stored into the separate channels as:
1781 * chan_ptr = d0.x, d1.x, d2.x, d3.x
1782 * chan_ptr2 = d0.y, d1.y, d2.y, d3.y
1785 emit_store_64bit_chan(struct lp_build_tgsi_context
*bld_base
,
1786 LLVMValueRef chan_ptr
, LLVMValueRef chan_ptr2
,
1789 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1790 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1791 LLVMBuilderRef builder
= gallivm
->builder
;
1792 struct lp_build_context
*float_bld
= &bld_base
->base
;
1794 LLVMValueRef temp
, temp2
;
1795 LLVMValueRef shuffles
[LP_MAX_VECTOR_WIDTH
/32];
1796 LLVMValueRef shuffles2
[LP_MAX_VECTOR_WIDTH
/32];
1798 for (i
= 0; i
< bld_base
->base
.type
.length
; i
++) {
1799 shuffles
[i
] = lp_build_const_int32(gallivm
, i
* 2);
1800 shuffles2
[i
] = lp_build_const_int32(gallivm
, (i
* 2) + 1);
1803 temp
= LLVMBuildShuffleVector(builder
, value
,
1804 LLVMGetUndef(LLVMTypeOf(value
)),
1805 LLVMConstVector(shuffles
,
1806 bld_base
->base
.type
.length
),
1808 temp2
= LLVMBuildShuffleVector(builder
, value
,
1809 LLVMGetUndef(LLVMTypeOf(value
)),
1810 LLVMConstVector(shuffles2
,
1811 bld_base
->base
.type
.length
),
1814 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp
, chan_ptr
);
1815 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, temp2
, chan_ptr2
);
1823 struct lp_build_tgsi_context
*bld_base
,
1824 const struct tgsi_full_instruction
*inst
,
1826 unsigned chan_index
,
1829 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1830 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1831 LLVMBuilderRef builder
= gallivm
->builder
;
1832 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[index
];
1833 struct lp_build_context
*float_bld
= &bld_base
->base
;
1834 struct lp_build_context
*int_bld
= &bld_base
->int_bld
;
1835 LLVMValueRef indirect_index
= NULL
;
1836 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
1841 * It is always assumed to be float.
1843 if (inst
->Instruction
.Saturate
) {
1844 assert(dtype
== TGSI_TYPE_FLOAT
||
1845 dtype
== TGSI_TYPE_UNTYPED
);
1846 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1847 value
= lp_build_clamp_zero_one_nanzero(float_bld
, value
);
1850 if (reg
->Register
.Indirect
) {
1852 * Currently the mesa/st doesn't generate indirect stores
1853 * to 64-bit values, it normally uses MOV to do indirect stores.
1855 assert(!tgsi_type_is_64bit(dtype
));
1856 indirect_index
= get_indirect_index(bld
,
1858 reg
->Register
.Index
,
1860 bld
->bld_base
.info
->file_max
[reg
->Register
.File
]);
1862 assert(reg
->Register
.Index
<=
1863 bld_base
->info
->file_max
[reg
->Register
.File
]);
1866 if (DEBUG_EXECUTION
) {
1867 emit_dump_reg(gallivm
, reg
->Register
.File
, reg
->Register
.Index
, chan_index
, value
);
1870 switch( reg
->Register
.File
) {
1871 case TGSI_FILE_OUTPUT
:
1872 /* Outputs are always stored as floats */
1873 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1875 if (reg
->Register
.Indirect
) {
1876 LLVMValueRef index_vec
; /* indexes into the output registers */
1877 LLVMValueRef outputs_array
;
1878 LLVMTypeRef fptr_type
;
1880 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1885 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1886 outputs_array
= LLVMBuildBitCast(builder
, bld
->outputs_array
, fptr_type
, "");
1888 /* Scatter store values into output registers */
1889 emit_mask_scatter(bld
, outputs_array
, index_vec
, value
,
1893 LLVMValueRef out_ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1896 if (tgsi_type_is_64bit(dtype
)) {
1897 LLVMValueRef out_ptr2
= lp_get_output_ptr(bld
, reg
->Register
.Index
,
1899 emit_store_64bit_chan(bld_base
, out_ptr
, out_ptr2
,
1902 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, out_ptr
);
1906 case TGSI_FILE_TEMPORARY
:
1907 /* Temporaries are always stored as floats */
1908 if (!tgsi_type_is_64bit(dtype
))
1909 value
= LLVMBuildBitCast(builder
, value
, float_bld
->vec_type
, "");
1911 value
= LLVMBuildBitCast(builder
, value
, LLVMVectorType(LLVMFloatTypeInContext(gallivm
->context
), bld_base
->base
.type
.length
* 2), "");
1913 if (reg
->Register
.Indirect
) {
1914 LLVMValueRef index_vec
; /* indexes into the temp registers */
1915 LLVMValueRef temps_array
;
1916 LLVMTypeRef fptr_type
;
1918 index_vec
= get_soa_array_offsets(&bld_base
->uint_bld
,
1923 fptr_type
= LLVMPointerType(LLVMFloatTypeInContext(gallivm
->context
), 0);
1924 temps_array
= LLVMBuildBitCast(builder
, bld
->temps_array
, fptr_type
, "");
1926 /* Scatter store values into temp registers */
1927 emit_mask_scatter(bld
, temps_array
, index_vec
, value
,
1931 LLVMValueRef temp_ptr
;
1932 temp_ptr
= lp_get_temp_ptr_soa(bld
, reg
->Register
.Index
, chan_index
);
1934 if (tgsi_type_is_64bit(dtype
)) {
1935 LLVMValueRef temp_ptr2
= lp_get_temp_ptr_soa(bld
,
1936 reg
->Register
.Index
,
1938 emit_store_64bit_chan(bld_base
, temp_ptr
, temp_ptr2
,
1942 lp_exec_mask_store(&bld
->exec_mask
, float_bld
, value
, temp_ptr
);
1946 case TGSI_FILE_ADDRESS
:
1947 assert(dtype
== TGSI_TYPE_SIGNED
);
1948 assert(LLVMTypeOf(value
) == int_bld
->vec_type
);
1949 value
= LLVMBuildBitCast(builder
, value
, int_bld
->vec_type
, "");
1950 lp_exec_mask_store(&bld
->exec_mask
, int_bld
, value
,
1951 bld
->addr
[reg
->Register
.Index
][chan_index
]);
1962 * Called at the beginning of the translation of each TGSI instruction, to
1963 * emit some debug code.
1967 struct lp_build_tgsi_context
* bld_base
,
1968 const struct tgsi_full_instruction
* inst
,
1969 const struct tgsi_opcode_info
* info
)
1972 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
1974 if (DEBUG_EXECUTION
) {
1976 * Dump the TGSI instruction.
1979 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1983 tgsi_dump_instruction_str(inst
, bld_base
->pc
, &buf
[2], sizeof buf
- 2);
1984 lp_build_printf(gallivm
, buf
);
1986 /* Dump the execution mask.
1988 if (bld
->exec_mask
.has_mask
) {
1989 lp_build_print_value(gallivm
, " mask = ", bld
->exec_mask
.exec_mask
);
1996 struct lp_build_tgsi_context
* bld_base
,
1997 const struct tgsi_full_instruction
* inst
,
1998 const struct tgsi_opcode_info
* info
,
2000 LLVMValueRef dst
[4])
2003 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
, index
);
2005 unsigned writemask
= inst
->Dst
[index
].Register
.WriteMask
;
2007 unsigned chan_index
= u_bit_scan(&writemask
);
2008 if (tgsi_type_is_64bit(dtype
) && (chan_index
== 1 || chan_index
== 3))
2010 emit_store_chan(bld_base
, inst
, index
, chan_index
, dst
[chan_index
]);
2015 tgsi_to_pipe_tex_target(unsigned tgsi_target
)
2017 switch (tgsi_target
) {
2018 case TGSI_TEXTURE_BUFFER
:
2020 case TGSI_TEXTURE_1D
:
2021 case TGSI_TEXTURE_SHADOW1D
:
2022 return PIPE_TEXTURE_1D
;
2023 case TGSI_TEXTURE_2D
:
2024 case TGSI_TEXTURE_SHADOW2D
:
2025 case TGSI_TEXTURE_2D_MSAA
:
2026 return PIPE_TEXTURE_2D
;
2027 case TGSI_TEXTURE_3D
:
2028 return PIPE_TEXTURE_3D
;
2029 case TGSI_TEXTURE_CUBE
:
2030 case TGSI_TEXTURE_SHADOWCUBE
:
2031 return PIPE_TEXTURE_CUBE
;
2032 case TGSI_TEXTURE_RECT
:
2033 case TGSI_TEXTURE_SHADOWRECT
:
2034 return PIPE_TEXTURE_RECT
;
2035 case TGSI_TEXTURE_1D_ARRAY
:
2036 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2037 return PIPE_TEXTURE_1D_ARRAY
;
2038 case TGSI_TEXTURE_2D_ARRAY
:
2039 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2040 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2041 return PIPE_TEXTURE_2D_ARRAY
;
2042 case TGSI_TEXTURE_CUBE_ARRAY
:
2043 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2044 return PIPE_TEXTURE_CUBE_ARRAY
;
2052 static enum lp_sampler_lod_property
2053 lp_build_lod_property(
2054 struct lp_build_tgsi_context
*bld_base
,
2055 const struct tgsi_full_instruction
*inst
,
2058 const struct tgsi_full_src_register
*reg
= &inst
->Src
[src_op
];
2059 enum lp_sampler_lod_property lod_property
;
2062 * Not much we can do here. We could try catching inputs declared
2063 * with constant interpolation but not sure it's worth it - since for
2064 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
2065 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
2066 * like the constant/immediate recognition below.
2067 * What seems to be of more value would be to recognize temps holding
2068 * broadcasted scalars but no way we can do it.
2069 * Tried asking llvm but without any success (using LLVMIsConstant
2070 * even though this isn't exactly what we'd need), even as simple as
2071 * IMM[0] UINT32 (0,-1,0,0)
2072 * MOV TEMP[0] IMM[0].yyyy
2073 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
2075 * This means there's ZERO chance this will ever catch a scalar lod
2076 * with traditional tex opcodes as well as texel fetches, since the lod
2077 * comes from the same reg as coords (except some test shaders using
2078 * constant coords maybe).
2079 * There's at least hope for sample opcodes as well as size queries.
2081 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
||
2082 reg
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
2083 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2085 else if (bld_base
->info
->processor
== PIPE_SHADER_FRAGMENT
) {
2086 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2087 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2090 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2094 /* never use scalar (per-quad) lod the results are just too wrong. */
2095 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2097 return lod_property
;
2102 * High-level instruction translators.
2106 emit_tex( struct lp_build_tgsi_soa_context
*bld
,
2107 const struct tgsi_full_instruction
*inst
,
2108 enum lp_build_tex_modifier modifier
,
2109 LLVMValueRef
*texel
,
2110 unsigned sampler_reg
,
2111 enum lp_sampler_op_type sampler_op
)
2113 unsigned unit
= inst
->Src
[sampler_reg
].Register
.Index
;
2114 LLVMValueRef oow
= NULL
;
2115 LLVMValueRef lod
= NULL
;
2116 LLVMValueRef coords
[5];
2117 LLVMValueRef offsets
[3] = { NULL
};
2118 struct lp_derivatives derivs
;
2119 struct lp_sampler_params params
;
2120 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2121 unsigned num_derivs
, num_offsets
, i
;
2122 unsigned shadow_coord
= 0;
2123 unsigned layer_coord
= 0;
2124 unsigned sample_key
= sampler_op
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2126 memset(¶ms
, 0, sizeof(params
));
2128 if (!bld
->sampler
) {
2129 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2130 for (i
= 0; i
< 4; i
++) {
2131 texel
[i
] = bld
->bld_base
.base
.undef
;
2136 switch (inst
->Texture
.Texture
) {
2137 case TGSI_TEXTURE_1D_ARRAY
:
2140 case TGSI_TEXTURE_1D
:
2144 case TGSI_TEXTURE_2D_ARRAY
:
2147 case TGSI_TEXTURE_2D
:
2148 case TGSI_TEXTURE_RECT
:
2152 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
2155 case TGSI_TEXTURE_SHADOW1D
:
2160 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
2166 case TGSI_TEXTURE_SHADOW2D
:
2167 case TGSI_TEXTURE_SHADOWRECT
:
2172 case TGSI_TEXTURE_CUBE
:
2176 case TGSI_TEXTURE_3D
:
2180 case TGSI_TEXTURE_SHADOWCUBE
:
2185 case TGSI_TEXTURE_CUBE_ARRAY
:
2190 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
2194 shadow_coord
= 4; /* shadow coord special different reg */
2196 case TGSI_TEXTURE_2D_MSAA
:
2197 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2203 /* Note lod and especially projected are illegal in a LOT of cases */
2204 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2205 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2206 if (inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
2207 inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE_ARRAY
) {
2208 /* note that shadow cube array with bias/explicit lod does not exist */
2209 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2212 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2214 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2215 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2217 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2218 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2220 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2223 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
) {
2224 oow
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2225 oow
= lp_build_rcp(&bld
->bld_base
.base
, oow
);
2228 for (i
= 0; i
< num_derivs
; i
++) {
2229 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2230 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2231 coords
[i
] = lp_build_mul(&bld
->bld_base
.base
, coords
[i
], oow
);
2233 for (i
= num_derivs
; i
< 5; i
++) {
2234 coords
[i
] = bld
->bld_base
.base
.undef
;
2237 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2239 if (layer_coord
== 3) {
2240 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2243 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2245 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2246 coords
[2] = lp_build_mul(&bld
->bld_base
.base
, coords
[2], oow
);
2248 /* Shadow coord occupies always 5th slot. */
2250 sample_key
|= LP_SAMPLER_SHADOW
;
2251 if (shadow_coord
== 4) {
2252 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, 0);
2255 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, shadow_coord
);
2257 if (modifier
== LP_BLD_TEX_MODIFIER_PROJECTED
)
2258 coords
[4] = lp_build_mul(&bld
->bld_base
.base
, coords
[4], oow
);
2261 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2263 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2264 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2265 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 1, dim
);
2266 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 2, dim
);
2268 params
.derivs
= &derivs
;
2270 * could also check all src regs if constant but I doubt such
2271 * cases exist in practice.
2273 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2274 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2275 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2278 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2282 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2285 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2287 /* we don't handle the 4 offset version of tg4 */
2288 if (inst
->Texture
.NumOffsets
== 1) {
2290 sample_key
|= LP_SAMPLER_OFFSETS
;
2291 for (dim
= 0; dim
< num_offsets
; dim
++) {
2292 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2296 params
.type
= bld
->bld_base
.base
.type
;
2297 params
.sample_key
= sample_key
;
2298 params
.texture_index
= unit
;
2299 params
.sampler_index
= unit
;
2300 params
.context_ptr
= bld
->context_ptr
;
2301 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2302 params
.coords
= coords
;
2303 params
.offsets
= offsets
;
2305 params
.texel
= texel
;
2307 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2308 bld
->bld_base
.base
.gallivm
,
2313 emit_sample(struct lp_build_tgsi_soa_context
*bld
,
2314 const struct tgsi_full_instruction
*inst
,
2315 enum lp_build_tex_modifier modifier
,
2317 enum lp_sampler_op_type sample_type
,
2318 LLVMValueRef
*texel
)
2320 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2321 unsigned texture_unit
, sampler_unit
;
2322 LLVMValueRef lod
= NULL
;
2323 LLVMValueRef coords
[5];
2324 LLVMValueRef offsets
[3] = { NULL
};
2325 struct lp_derivatives derivs
;
2326 struct lp_sampler_params params
;
2327 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2329 unsigned num_offsets
, num_derivs
, i
;
2330 unsigned layer_coord
= 0;
2331 unsigned sample_key
= sample_type
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2333 memset(¶ms
, 0, sizeof(params
));
2335 if (!bld
->sampler
) {
2336 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2337 for (i
= 0; i
< 4; i
++) {
2338 texel
[i
] = bld
->bld_base
.base
.undef
;
2344 * unlike old-style tex opcodes the texture/sampler indices
2345 * always come from src1 and src2 respectively.
2347 texture_unit
= inst
->Src
[1].Register
.Index
;
2348 sampler_unit
= inst
->Src
[2].Register
.Index
;
2351 * Note inst->Texture.Texture will contain the number of offsets,
2352 * however the target information is NOT there and comes from the
2353 * declared sampler views instead.
2355 switch (bld
->sv
[texture_unit
].Resource
) {
2356 case TGSI_TEXTURE_1D
:
2360 case TGSI_TEXTURE_1D_ARRAY
:
2365 case TGSI_TEXTURE_2D
:
2366 case TGSI_TEXTURE_RECT
:
2370 case TGSI_TEXTURE_2D_ARRAY
:
2375 case TGSI_TEXTURE_CUBE
:
2379 case TGSI_TEXTURE_3D
:
2383 case TGSI_TEXTURE_CUBE_ARRAY
:
2393 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
||
2394 modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2395 lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2396 if (modifier
== LP_BLD_TEX_MODIFIER_LOD_BIAS
) {
2397 sample_key
|= LP_SAMPLER_LOD_BIAS
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2399 else if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
) {
2400 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2402 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2404 else if (modifier
== LP_BLD_TEX_MODIFIER_LOD_ZERO
) {
2405 /* XXX might be better to explicitly pass the level zero information */
2406 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2407 lod
= lp_build_const_vec(gallivm
, bld
->bld_base
.base
.type
, 0.0F
);
2410 for (i
= 0; i
< num_derivs
; i
++) {
2411 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2413 for (i
= num_derivs
; i
< 5; i
++) {
2414 coords
[i
] = bld
->bld_base
.base
.undef
;
2417 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2419 if (layer_coord
== 3)
2420 coords
[3] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2422 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2424 /* Shadow coord occupies always 5th slot. */
2426 sample_key
|= LP_SAMPLER_SHADOW
;
2427 coords
[4] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, 0);
2430 if (modifier
== LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
) {
2432 sample_key
|= LP_SAMPLER_LOD_DERIVATIVES
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2433 for (dim
= 0; dim
< num_derivs
; ++dim
) {
2434 derivs
.ddx
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 3, dim
);
2435 derivs
.ddy
[dim
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 4, dim
);
2437 params
.derivs
= &derivs
;
2439 * could also check all src regs if constant but I doubt such
2440 * cases exist in practice.
2442 if (bld
->bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
) {
2443 if (gallivm_perf
& GALLIVM_PERF_NO_QUAD_LOD
) {
2444 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2447 lod_property
= LP_SAMPLER_LOD_PER_QUAD
;
2451 lod_property
= LP_SAMPLER_LOD_PER_ELEMENT
;
2455 /* some advanced gather instructions (txgo) would require 4 offsets */
2456 if (inst
->Texture
.NumOffsets
== 1) {
2458 sample_key
|= LP_SAMPLER_OFFSETS
;
2459 for (dim
= 0; dim
< num_offsets
; dim
++) {
2460 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2463 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2465 params
.type
= bld
->bld_base
.base
.type
;
2466 params
.sample_key
= sample_key
;
2467 params
.texture_index
= texture_unit
;
2468 params
.sampler_index
= sampler_unit
;
2469 params
.context_ptr
= bld
->context_ptr
;
2470 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2471 params
.coords
= coords
;
2472 params
.offsets
= offsets
;
2474 params
.texel
= texel
;
2476 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2477 bld
->bld_base
.base
.gallivm
,
2480 if (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2481 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2482 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2483 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
) {
2484 unsigned char swizzles
[4];
2485 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2486 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2487 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2488 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2490 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2495 emit_fetch_texels( struct lp_build_tgsi_soa_context
*bld
,
2496 const struct tgsi_full_instruction
*inst
,
2497 LLVMValueRef
*texel
,
2500 unsigned unit
, target
;
2501 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
2502 LLVMValueRef explicit_lod
= NULL
;
2503 LLVMValueRef coords
[5];
2504 LLVMValueRef offsets
[3] = { NULL
};
2505 struct lp_sampler_params params
;
2506 enum lp_sampler_lod_property lod_property
= LP_SAMPLER_LOD_SCALAR
;
2508 unsigned layer_coord
= 0;
2509 unsigned sample_key
= LP_SAMPLER_OP_FETCH
<< LP_SAMPLER_OP_TYPE_SHIFT
;
2511 memset(¶ms
, 0, sizeof(params
));
2513 if (!bld
->sampler
) {
2514 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2515 for (i
= 0; i
< 4; i
++) {
2516 texel
[i
] = coord_undef
;
2521 unit
= inst
->Src
[1].Register
.Index
;
2524 target
= bld
->sv
[unit
].Resource
;
2527 target
= inst
->Texture
.Texture
;
2531 case TGSI_TEXTURE_1D
:
2532 case TGSI_TEXTURE_BUFFER
:
2535 case TGSI_TEXTURE_1D_ARRAY
:
2539 case TGSI_TEXTURE_2D
:
2540 case TGSI_TEXTURE_RECT
:
2541 case TGSI_TEXTURE_2D_MSAA
:
2544 case TGSI_TEXTURE_2D_ARRAY
:
2545 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
2549 case TGSI_TEXTURE_3D
:
2557 /* always have lod except for buffers and msaa targets ? */
2558 if (target
!= TGSI_TEXTURE_BUFFER
&&
2559 target
!= TGSI_TEXTURE_2D_MSAA
&&
2560 target
!= TGSI_TEXTURE_2D_ARRAY_MSAA
) {
2561 sample_key
|= LP_SAMPLER_LOD_EXPLICIT
<< LP_SAMPLER_LOD_CONTROL_SHIFT
;
2562 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 3);
2563 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2566 * XXX: for real msaa support, the w component (or src2.x for sample_i_ms)
2567 * would be the sample index.
2570 for (i
= 0; i
< dims
; i
++) {
2571 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, i
);
2573 /* never use more than 3 coords here but emit_fetch_texel copies all 5 anyway */
2574 for (i
= dims
; i
< 5; i
++) {
2575 coords
[i
] = coord_undef
;
2578 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, layer_coord
);
2580 if (inst
->Texture
.NumOffsets
== 1) {
2582 sample_key
|= LP_SAMPLER_OFFSETS
;
2583 for (dim
= 0; dim
< dims
; dim
++) {
2584 offsets
[dim
] = lp_build_emit_fetch_texoffset(&bld
->bld_base
, inst
, 0, dim
);
2587 sample_key
|= lod_property
<< LP_SAMPLER_LOD_PROPERTY_SHIFT
;
2589 params
.type
= bld
->bld_base
.base
.type
;
2590 params
.sample_key
= sample_key
;
2591 params
.texture_index
= unit
;
2593 * sampler not actually used, set to 0 so it won't exceed PIPE_MAX_SAMPLERS
2594 * and trigger some assertions with d3d10 where the sampler view number
2597 params
.sampler_index
= 0;
2598 params
.context_ptr
= bld
->context_ptr
;
2599 params
.thread_data_ptr
= bld
->thread_data_ptr
;
2600 params
.coords
= coords
;
2601 params
.offsets
= offsets
;
2602 params
.derivs
= NULL
;
2603 params
.lod
= explicit_lod
;
2604 params
.texel
= texel
;
2606 bld
->sampler
->emit_tex_sample(bld
->sampler
,
2607 bld
->bld_base
.base
.gallivm
,
2611 (inst
->Src
[1].Register
.SwizzleX
!= PIPE_SWIZZLE_X
||
2612 inst
->Src
[1].Register
.SwizzleY
!= PIPE_SWIZZLE_Y
||
2613 inst
->Src
[1].Register
.SwizzleZ
!= PIPE_SWIZZLE_Z
||
2614 inst
->Src
[1].Register
.SwizzleW
!= PIPE_SWIZZLE_W
)) {
2615 unsigned char swizzles
[4];
2616 swizzles
[0] = inst
->Src
[1].Register
.SwizzleX
;
2617 swizzles
[1] = inst
->Src
[1].Register
.SwizzleY
;
2618 swizzles
[2] = inst
->Src
[1].Register
.SwizzleZ
;
2619 swizzles
[3] = inst
->Src
[1].Register
.SwizzleW
;
2621 lp_build_swizzle_soa_inplace(&bld
->bld_base
.base
, texel
, swizzles
);
2626 emit_size_query( struct lp_build_tgsi_soa_context
*bld
,
2627 const struct tgsi_full_instruction
*inst
,
2628 LLVMValueRef
*sizes_out
,
2629 boolean is_sviewinfo
)
2631 LLVMValueRef explicit_lod
;
2632 enum lp_sampler_lod_property lod_property
;
2635 unsigned unit
= inst
->Src
[1].Register
.Index
;
2636 unsigned target
, pipe_target
;
2637 struct lp_sampler_size_query_params params
;
2640 target
= bld
->sv
[unit
].Resource
;
2643 target
= inst
->Texture
.Texture
;
2646 case TGSI_TEXTURE_BUFFER
:
2647 case TGSI_TEXTURE_RECT
:
2648 case TGSI_TEXTURE_SHADOWRECT
:
2656 if (!bld
->sampler
) {
2657 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2658 for (i
= 0; i
< 4; i
++)
2659 sizes_out
[i
] = bld
->bld_base
.int_bld
.undef
;
2664 explicit_lod
= lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, 0);
2665 lod_property
= lp_build_lod_property(&bld
->bld_base
, inst
, 0);
2668 explicit_lod
= NULL
;
2669 lod_property
= LP_SAMPLER_LOD_SCALAR
;
2673 pipe_target
= tgsi_to_pipe_tex_target(target
);
2675 params
.int_type
= bld
->bld_base
.int_bld
.type
;
2676 params
.texture_unit
= unit
;
2677 params
.target
= pipe_target
;
2678 params
.context_ptr
= bld
->context_ptr
;
2679 params
.is_sviewinfo
= TRUE
;
2680 params
.lod_property
= lod_property
;
2681 params
.explicit_lod
= explicit_lod
;
2682 params
.sizes_out
= sizes_out
;
2684 bld
->sampler
->emit_size_query(bld
->sampler
,
2685 bld
->bld_base
.base
.gallivm
,
2690 near_end_of_shader(struct lp_build_tgsi_soa_context
*bld
,
2695 for (i
= 0; i
< 5; i
++) {
2696 enum tgsi_opcode opcode
;
2698 if (pc
+ i
>= bld
->bld_base
.info
->num_instructions
)
2701 opcode
= bld
->bld_base
.instructions
[pc
+ i
].Instruction
.Opcode
;
2703 if (opcode
== TGSI_OPCODE_END
)
2706 if (opcode
== TGSI_OPCODE_TEX
||
2707 opcode
== TGSI_OPCODE_TXP
||
2708 opcode
== TGSI_OPCODE_TXD
||
2709 opcode
== TGSI_OPCODE_TXB
||
2710 opcode
== TGSI_OPCODE_TXL
||
2711 opcode
== TGSI_OPCODE_TXF
||
2712 opcode
== TGSI_OPCODE_TXQ
||
2713 opcode
== TGSI_OPCODE_TEX2
||
2714 opcode
== TGSI_OPCODE_TXB2
||
2715 opcode
== TGSI_OPCODE_TXL2
||
2716 opcode
== TGSI_OPCODE_SAMPLE
||
2717 opcode
== TGSI_OPCODE_SAMPLE_B
||
2718 opcode
== TGSI_OPCODE_SAMPLE_C
||
2719 opcode
== TGSI_OPCODE_SAMPLE_C_LZ
||
2720 opcode
== TGSI_OPCODE_SAMPLE_D
||
2721 opcode
== TGSI_OPCODE_SAMPLE_I
||
2722 opcode
== TGSI_OPCODE_SAMPLE_I_MS
||
2723 opcode
== TGSI_OPCODE_SAMPLE_L
||
2724 opcode
== TGSI_OPCODE_SVIEWINFO
||
2725 opcode
== TGSI_OPCODE_CAL
||
2726 opcode
== TGSI_OPCODE_IF
||
2727 opcode
== TGSI_OPCODE_UIF
||
2728 opcode
== TGSI_OPCODE_BGNLOOP
||
2729 opcode
== TGSI_OPCODE_SWITCH
)
2739 * Kill fragment if any of the src register values are negative.
2743 struct lp_build_tgsi_soa_context
*bld
,
2744 const struct tgsi_full_instruction
*inst
,
2747 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2748 const struct tgsi_full_src_register
*reg
= &inst
->Src
[0];
2749 LLVMValueRef terms
[TGSI_NUM_CHANNELS
];
2751 unsigned chan_index
;
2753 memset(&terms
, 0, sizeof terms
);
2755 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2758 /* Unswizzle channel */
2759 swizzle
= tgsi_util_get_full_src_register_swizzle( reg
, chan_index
);
2761 /* Check if the component has not been already tested. */
2762 assert(swizzle
< TGSI_NUM_CHANNELS
);
2763 if( !terms
[swizzle
] )
2764 /* TODO: change the comparison operator instead of setting the sign */
2765 terms
[swizzle
] = lp_build_emit_fetch(&bld
->bld_base
, inst
, 0, chan_index
);
2769 TGSI_FOR_EACH_CHANNEL( chan_index
) {
2770 if(terms
[chan_index
]) {
2771 LLVMValueRef chan_mask
;
2774 * If term < 0 then mask = 0 else mask = ~0.
2776 chan_mask
= lp_build_cmp(&bld
->bld_base
.base
, PIPE_FUNC_GEQUAL
, terms
[chan_index
], bld
->bld_base
.base
.zero
);
2779 mask
= LLVMBuildAnd(builder
, mask
, chan_mask
, "");
2785 if (bld
->exec_mask
.has_mask
) {
2786 LLVMValueRef invmask
;
2787 invmask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2788 mask
= LLVMBuildOr(builder
, mask
, invmask
, "");
2791 lp_build_mask_update(bld
->mask
, mask
);
2792 if (!near_end_of_shader(bld
, pc
))
2793 lp_build_mask_check(bld
->mask
);
2798 * Unconditional fragment kill.
2799 * The only predication is the execution mask which will apply if
2800 * we're inside a loop or conditional.
2803 emit_kill(struct lp_build_tgsi_soa_context
*bld
,
2806 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
2809 /* For those channels which are "alive", disable fragment shader
2812 if (bld
->exec_mask
.has_mask
) {
2813 mask
= LLVMBuildNot(builder
, bld
->exec_mask
.exec_mask
, "kilp");
2816 LLVMValueRef zero
= LLVMConstNull(bld
->bld_base
.base
.int_vec_type
);
2820 lp_build_mask_update(bld
->mask
, mask
);
2822 if (!near_end_of_shader(bld
, pc
))
2823 lp_build_mask_check(bld
->mask
);
2828 * Emit code which will dump the value of all the temporary registers
2832 emit_dump_file(struct lp_build_tgsi_soa_context
*bld
,
2835 const struct tgsi_shader_info
*info
= bld
->bld_base
.info
;
2836 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2837 LLVMBuilderRef builder
= gallivm
->builder
;
2838 LLVMValueRef reg_ptr
;
2840 int max_index
= info
->file_max
[file
];
2843 * Some register files, particularly constants, can be very large,
2844 * and dumping everything could make this unusably slow.
2846 max_index
= MIN2(max_index
, 32);
2848 for (index
= 0; index
<= max_index
; index
++) {
2853 if (index
< 8 * sizeof(unsigned) &&
2854 (info
->file_mask
[file
] & (1u << index
)) == 0) {
2855 /* This was not declared.*/
2859 if (file
== TGSI_FILE_INPUT
) {
2860 mask
= info
->input_usage_mask
[index
];
2862 mask
= TGSI_WRITEMASK_XYZW
;
2865 for (chan
= 0; chan
< 4; chan
++) {
2866 if ((mask
& (1 << chan
)) == 0) {
2867 /* This channel is not used.*/
2871 if (file
== TGSI_FILE_CONSTANT
) {
2872 struct tgsi_full_src_register reg
;
2873 memset(®
, 0, sizeof reg
);
2874 reg
.Register
.File
= file
;
2875 reg
.Register
.Index
= index
;
2876 reg
.Register
.SwizzleX
= 0;
2877 reg
.Register
.SwizzleY
= 1;
2878 reg
.Register
.SwizzleZ
= 2;
2879 reg
.Register
.SwizzleW
= 3;
2881 res
= bld
->bld_base
.emit_fetch_funcs
[file
](&bld
->bld_base
, ®
, TGSI_TYPE_FLOAT
, chan
);
2885 } else if (file
== TGSI_FILE_INPUT
) {
2886 res
= bld
->inputs
[index
][chan
];
2890 } else if (file
== TGSI_FILE_TEMPORARY
) {
2891 reg_ptr
= lp_get_temp_ptr_soa(bld
, index
, chan
);
2893 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2894 } else if (file
== TGSI_FILE_OUTPUT
) {
2895 reg_ptr
= lp_get_output_ptr(bld
, index
, chan
);
2897 res
= LLVMBuildLoad(builder
, reg_ptr
, "");
2903 emit_dump_reg(gallivm
, file
, index
, chan
, res
);
2911 lp_emit_declaration_soa(
2912 struct lp_build_tgsi_context
*bld_base
,
2913 const struct tgsi_full_declaration
*decl
)
2915 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
2916 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
2917 LLVMTypeRef vec_type
= bld
->bld_base
.base
.vec_type
;
2918 const unsigned first
= decl
->Range
.First
;
2919 const unsigned last
= decl
->Range
.Last
;
2922 assert(last
<= bld
->bld_base
.info
->file_max
[decl
->Declaration
.File
]);
2924 switch (decl
->Declaration
.File
) {
2925 case TGSI_FILE_TEMPORARY
:
2926 if (!(bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
))) {
2927 assert(last
< LP_MAX_INLINED_TEMPS
);
2928 for (idx
= first
; idx
<= last
; ++idx
) {
2929 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2930 bld
->temps
[idx
][i
] = lp_build_alloca(gallivm
, vec_type
, "temp");
2935 case TGSI_FILE_OUTPUT
:
2936 if (!(bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
))) {
2937 for (idx
= first
; idx
<= last
; ++idx
) {
2938 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2939 bld
->outputs
[idx
][i
] = lp_build_alloca(gallivm
,
2940 vec_type
, "output");
2945 case TGSI_FILE_ADDRESS
:
2946 /* ADDR registers are only allocated with an integer LLVM IR type,
2947 * as they are guaranteed to always have integers.
2948 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2949 * an ADDR register for that matter).
2951 assert(last
< LP_MAX_TGSI_ADDRS
);
2952 for (idx
= first
; idx
<= last
; ++idx
) {
2953 assert(idx
< LP_MAX_TGSI_ADDRS
);
2954 for (i
= 0; i
< TGSI_NUM_CHANNELS
; i
++)
2955 bld
->addr
[idx
][i
] = lp_build_alloca(gallivm
, bld_base
->base
.int_vec_type
, "addr");
2959 case TGSI_FILE_SAMPLER_VIEW
:
2961 * The target stored here MUST match whatever there actually
2962 * is in the set sampler views (what about return type?).
2964 assert(last
< PIPE_MAX_SHADER_SAMPLER_VIEWS
);
2965 for (idx
= first
; idx
<= last
; ++idx
) {
2966 bld
->sv
[idx
] = decl
->SamplerView
;
2970 case TGSI_FILE_CONSTANT
:
2973 * We could trivially fetch the per-buffer pointer when fetching the
2974 * constant, relying on llvm to figure out it's always the same pointer
2975 * anyway. However, doing so results in a huge (more than factor of 10)
2976 * slowdown in llvm compilation times for some (but not all) shaders
2977 * (more specifically, the IR optimization spends way more time in
2978 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
2980 unsigned idx2D
= decl
->Dim
.Index2D
;
2981 LLVMValueRef index2D
= lp_build_const_int32(gallivm
, idx2D
);
2982 assert(idx2D
< LP_MAX_TGSI_CONST_BUFFERS
);
2983 bld
->consts
[idx2D
] =
2984 lp_build_array_get(gallivm
, bld
->consts_ptr
, index2D
);
2985 bld
->consts_sizes
[idx2D
] =
2986 lp_build_array_get(gallivm
, bld
->const_sizes_ptr
, index2D
);
2989 case TGSI_FILE_BUFFER
:
2991 unsigned idx
= decl
->Range
.First
;
2992 LLVMValueRef index
= lp_build_const_int32(gallivm
, idx
);
2993 assert(idx
< LP_MAX_TGSI_SHADER_BUFFERS
);
2995 lp_build_array_get(gallivm
, bld
->ssbo_ptr
, index
);
2996 bld
->ssbo_sizes
[idx
] =
2997 lp_build_array_get(gallivm
, bld
->ssbo_sizes_ptr
, index
);
3002 /* don't need to declare other vars */
3008 void lp_emit_immediate_soa(
3009 struct lp_build_tgsi_context
*bld_base
,
3010 const struct tgsi_full_immediate
*imm
)
3012 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3013 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3014 LLVMValueRef imms
[4];
3016 const uint size
= imm
->Immediate
.NrTokens
- 1;
3018 switch (imm
->Immediate
.DataType
) {
3019 case TGSI_IMM_FLOAT32
:
3020 for( i
= 0; i
< size
; ++i
)
3022 lp_build_const_vec(gallivm
, bld_base
->base
.type
, imm
->u
[i
].Float
);
3025 case TGSI_IMM_FLOAT64
:
3026 case TGSI_IMM_UINT64
:
3027 case TGSI_IMM_INT64
:
3028 case TGSI_IMM_UINT32
:
3029 for( i
= 0; i
< size
; ++i
) {
3030 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->uint_bld
.type
, imm
->u
[i
].Uint
);
3031 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
3035 case TGSI_IMM_INT32
:
3036 for( i
= 0; i
< size
; ++i
) {
3037 LLVMValueRef tmp
= lp_build_const_vec(gallivm
, bld_base
->int_bld
.type
, imm
->u
[i
].Int
);
3038 imms
[i
] = LLVMConstBitCast(tmp
, bld_base
->base
.vec_type
);
3043 for( i
= size
; i
< 4; ++i
)
3044 imms
[i
] = bld_base
->base
.undef
;
3046 if (bld
->use_immediates_array
) {
3047 unsigned index
= bld
->num_immediates
;
3048 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
3049 LLVMBuilderRef builder
= gallivm
->builder
;
3050 LLVMValueRef gep
[2];
3051 gep
[0] = lp_build_const_int32(gallivm
, 0);
3053 assert(bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
));
3054 for (i
= 0; i
< 4; ++i
) {
3055 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
3056 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3057 bld
->imms_array
, gep
, 2, "");
3058 LLVMBuildStore(builder
, imms
[i
], imm_ptr
);
3061 /* simply copy the immediate values into the next immediates[] slot */
3063 assert(imm
->Immediate
.NrTokens
- 1 <= 4);
3064 assert(bld
->num_immediates
< LP_MAX_INLINED_IMMEDIATES
);
3066 for(i
= 0; i
< 4; ++i
)
3067 bld
->immediates
[bld
->num_immediates
][i
] = imms
[i
];
3069 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
3070 unsigned index
= bld
->num_immediates
;
3071 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
3072 LLVMBuilderRef builder
= gallivm
->builder
;
3073 LLVMValueRef gep
[2];
3074 gep
[0] = lp_build_const_int32(gallivm
, 0);
3075 for (i
= 0; i
< 4; ++i
) {
3076 gep
[1] = lp_build_const_int32(gallivm
, index
* 4 + i
);
3077 LLVMValueRef imm_ptr
= LLVMBuildGEP(builder
,
3078 bld
->imms_array
, gep
, 2, "");
3079 LLVMBuildStore(builder
,
3080 bld
->immediates
[index
][i
],
3086 bld
->num_immediates
++;
3091 const struct lp_build_tgsi_action
* action
,
3092 struct lp_build_tgsi_context
* bld_base
,
3093 struct lp_build_emit_data
* emit_data
)
3095 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3097 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
,
3098 &emit_data
->output
[emit_data
->chan
], NULL
);
3103 const struct lp_build_tgsi_action
* action
,
3104 struct lp_build_tgsi_context
* bld_base
,
3105 struct lp_build_emit_data
* emit_data
)
3107 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3109 emit_fetch_deriv(bld
, emit_data
->args
[0], NULL
, NULL
,
3110 &emit_data
->output
[emit_data
->chan
]);
3115 const struct lp_build_tgsi_action
* action
,
3116 struct lp_build_tgsi_context
* bld_base
,
3117 struct lp_build_emit_data
* emit_data
)
3119 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3121 emit_kill(bld
, bld_base
->pc
- 1);
3126 const struct lp_build_tgsi_action
* action
,
3127 struct lp_build_tgsi_context
* bld_base
,
3128 struct lp_build_emit_data
* emit_data
)
3130 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3132 emit_kill_if(bld
, emit_data
->inst
, bld_base
->pc
- 1);
3137 const struct lp_build_tgsi_action
* action
,
3138 struct lp_build_tgsi_context
* bld_base
,
3139 struct lp_build_emit_data
* emit_data
)
3141 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3143 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3144 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3149 const struct lp_build_tgsi_action
* action
,
3150 struct lp_build_tgsi_context
* bld_base
,
3151 struct lp_build_emit_data
* emit_data
)
3153 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3155 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3156 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3161 const struct lp_build_tgsi_action
* action
,
3162 struct lp_build_tgsi_context
* bld_base
,
3163 struct lp_build_emit_data
* emit_data
)
3165 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3167 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3168 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3173 const struct lp_build_tgsi_action
* action
,
3174 struct lp_build_tgsi_context
* bld_base
,
3175 struct lp_build_emit_data
* emit_data
)
3177 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3179 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3180 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3185 const struct lp_build_tgsi_action
* action
,
3186 struct lp_build_tgsi_context
* bld_base
,
3187 struct lp_build_emit_data
* emit_data
)
3189 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3191 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3192 emit_data
->output
, 3, LP_SAMPLER_OP_TEXTURE
);
3197 const struct lp_build_tgsi_action
* action
,
3198 struct lp_build_tgsi_context
* bld_base
,
3199 struct lp_build_emit_data
* emit_data
)
3201 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3203 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3204 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3209 const struct lp_build_tgsi_action
* action
,
3210 struct lp_build_tgsi_context
* bld_base
,
3211 struct lp_build_emit_data
* emit_data
)
3213 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3215 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3216 emit_data
->output
, 2, LP_SAMPLER_OP_TEXTURE
);
3221 const struct lp_build_tgsi_action
* action
,
3222 struct lp_build_tgsi_context
* bld_base
,
3223 struct lp_build_emit_data
* emit_data
)
3225 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3227 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_PROJECTED
,
3228 emit_data
->output
, 1, LP_SAMPLER_OP_TEXTURE
);
3233 const struct lp_build_tgsi_action
* action
,
3234 struct lp_build_tgsi_context
* bld_base
,
3235 struct lp_build_emit_data
* emit_data
)
3237 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3239 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3240 emit_data
->output
, 2, LP_SAMPLER_OP_GATHER
);
3245 const struct lp_build_tgsi_action
* action
,
3246 struct lp_build_tgsi_context
* bld_base
,
3247 struct lp_build_emit_data
* emit_data
)
3249 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3251 emit_tex(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3252 emit_data
->output
, 1, LP_SAMPLER_OP_LODQ
);
3257 const struct lp_build_tgsi_action
* action
,
3258 struct lp_build_tgsi_context
* bld_base
,
3259 struct lp_build_emit_data
* emit_data
)
3261 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3263 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3268 const struct lp_build_tgsi_action
* action
,
3269 struct lp_build_tgsi_context
* bld_base
,
3270 struct lp_build_emit_data
* emit_data
)
3272 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3274 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, FALSE
);
3279 const struct lp_build_tgsi_action
* action
,
3280 struct lp_build_tgsi_context
* bld_base
,
3281 struct lp_build_emit_data
* emit_data
)
3283 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3285 emit_fetch_texels(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3290 const struct lp_build_tgsi_action
* action
,
3291 struct lp_build_tgsi_context
* bld_base
,
3292 struct lp_build_emit_data
* emit_data
)
3294 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3296 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3297 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3302 const struct lp_build_tgsi_action
* action
,
3303 struct lp_build_tgsi_context
* bld_base
,
3304 struct lp_build_emit_data
* emit_data
)
3306 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3308 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_BIAS
,
3309 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3314 const struct lp_build_tgsi_action
* action
,
3315 struct lp_build_tgsi_context
* bld_base
,
3316 struct lp_build_emit_data
* emit_data
)
3318 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3320 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3321 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3326 const struct lp_build_tgsi_action
* action
,
3327 struct lp_build_tgsi_context
* bld_base
,
3328 struct lp_build_emit_data
* emit_data
)
3330 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3332 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_LOD_ZERO
,
3333 TRUE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3338 const struct lp_build_tgsi_action
* action
,
3339 struct lp_build_tgsi_context
* bld_base
,
3340 struct lp_build_emit_data
* emit_data
)
3342 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3344 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV
,
3345 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3350 const struct lp_build_tgsi_action
* action
,
3351 struct lp_build_tgsi_context
* bld_base
,
3352 struct lp_build_emit_data
* emit_data
)
3354 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3356 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD
,
3357 FALSE
, LP_SAMPLER_OP_TEXTURE
, emit_data
->output
);
3362 const struct lp_build_tgsi_action
* action
,
3363 struct lp_build_tgsi_context
* bld_base
,
3364 struct lp_build_emit_data
* emit_data
)
3366 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3368 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3369 FALSE
, LP_SAMPLER_OP_GATHER
, emit_data
->output
);
3374 const struct lp_build_tgsi_action
* action
,
3375 struct lp_build_tgsi_context
* bld_base
,
3376 struct lp_build_emit_data
* emit_data
)
3378 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3380 emit_size_query(bld
, emit_data
->inst
, emit_data
->output
, TRUE
);
3385 const struct lp_build_tgsi_action
* action
,
3386 struct lp_build_tgsi_context
* bld_base
,
3387 struct lp_build_emit_data
* emit_data
)
3389 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3391 emit_sample(bld
, emit_data
->inst
, LP_BLD_TEX_MODIFIER_NONE
,
3392 FALSE
, LP_SAMPLER_OP_LODQ
, emit_data
->output
);
3395 static void target_to_dims_layer(unsigned target
,
3397 unsigned *layer_coord
)
3401 case TGSI_TEXTURE_1D
:
3402 case TGSI_TEXTURE_BUFFER
:
3405 case TGSI_TEXTURE_1D_ARRAY
:
3409 case TGSI_TEXTURE_2D
:
3410 case TGSI_TEXTURE_RECT
:
3413 case TGSI_TEXTURE_2D_ARRAY
:
3417 case TGSI_TEXTURE_3D
:
3418 case TGSI_TEXTURE_CUBE
:
3419 case TGSI_TEXTURE_CUBE_ARRAY
:
3430 const struct lp_build_tgsi_action
* action
,
3431 struct lp_build_tgsi_context
* bld_base
,
3432 struct lp_build_emit_data
* emit_data
)
3434 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3435 struct lp_img_params params
;
3436 LLVMValueRef coords
[5];
3437 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
3439 unsigned target
= emit_data
->inst
->Memory
.Texture
;
3440 unsigned layer_coord
;
3442 target_to_dims_layer(target
, &dims
, &layer_coord
);
3444 for (unsigned i
= 0; i
< dims
; i
++) {
3445 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, i
);
3447 for (unsigned i
= dims
; i
< 5; i
++) {
3448 coords
[i
] = coord_undef
;
3451 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, layer_coord
);
3453 memset(¶ms
, 0, sizeof(params
));
3455 params
.type
= bld
->bld_base
.base
.type
;
3456 params
.context_ptr
= bld
->context_ptr
;
3457 params
.thread_data_ptr
= bld
->thread_data_ptr
;
3458 params
.coords
= coords
;
3459 params
.outdata
= emit_data
->output
;
3460 params
.target
= tgsi_to_pipe_tex_target(target
);
3461 params
.image_index
= emit_data
->inst
->Src
[0].Register
.Index
;
3462 params
.img_op
= LP_IMG_LOAD
;
3463 bld
->image
->emit_op(bld
->image
,
3464 bld
->bld_base
.base
.gallivm
,
3470 const struct lp_build_tgsi_action
* action
,
3471 struct lp_build_tgsi_context
* bld_base
,
3472 struct lp_build_emit_data
* emit_data
)
3474 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3475 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3476 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3477 const struct tgsi_full_src_register
*bufreg
= &emit_data
->inst
->Src
[0];
3478 unsigned buf
= bufreg
->Register
.Index
;
3479 assert(bufreg
->Register
.File
== TGSI_FILE_BUFFER
|| bufreg
->Register
.File
== TGSI_FILE_IMAGE
);
3480 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3482 if (bufreg
->Register
.File
== TGSI_FILE_IMAGE
)
3483 img_load_emit(action
, bld_base
, emit_data
);
3485 /* for indirect support with ARB_gpu_shader5 */
3488 LLVMValueRef scalar
, scalar_ptr
;
3489 unsigned chan_index
;
3491 index
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, 0);
3492 index
= lp_build_shr_imm(uint_bld
, index
, 2);
3494 scalar_ptr
= bld
->ssbos
[buf
];
3496 LLVMValueRef ssbo_limit
;
3498 ssbo_limit
= LLVMBuildAShr(gallivm
->builder
, bld
->ssbo_sizes
[buf
], lp_build_const_int32(gallivm
, 2), "");
3499 ssbo_limit
= lp_build_broadcast_scalar(uint_bld
, ssbo_limit
);
3501 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(emit_data
->inst
, chan_index
) {
3502 LLVMValueRef loop_index
= lp_build_add(uint_bld
, index
, lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
));
3504 LLVMValueRef exec_mask
= mask_vec(bld_base
);
3505 LLVMValueRef ssbo_oob_cmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_LESS
, loop_index
, ssbo_limit
);
3506 exec_mask
= LLVMBuildAnd(builder
, exec_mask
, ssbo_oob_cmp
, "");
3508 LLVMValueRef result
= lp_build_alloca(gallivm
, uint_bld
->vec_type
, "");
3509 struct lp_build_loop_state loop_state
;
3510 lp_build_loop_begin(&loop_state
, gallivm
, lp_build_const_int32(gallivm
, 0));
3512 struct lp_build_if_state ifthen
;
3513 LLVMValueRef cond
, temp_res
;
3515 loop_index
= LLVMBuildExtractElement(gallivm
->builder
, loop_index
,
3516 loop_state
.counter
, "");
3518 cond
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
, exec_mask
, uint_bld
->zero
, "");
3519 cond
= LLVMBuildExtractElement(gallivm
->builder
, cond
, loop_state
.counter
, "");
3521 lp_build_if(&ifthen
, gallivm
, cond
);
3522 scalar
= lp_build_pointer_get(builder
, scalar_ptr
, loop_index
);
3524 temp_res
= LLVMBuildLoad(builder
, result
, "");
3525 temp_res
= LLVMBuildInsertElement(builder
, temp_res
, scalar
, loop_state
.counter
, "");
3526 LLVMBuildStore(builder
, temp_res
, result
);
3527 lp_build_else(&ifthen
);
3528 temp_res
= LLVMBuildLoad(builder
, result
, "");
3529 temp_res
= LLVMBuildInsertElement(builder
, temp_res
, lp_build_const_int32(gallivm
, 0), loop_state
.counter
, "");
3530 LLVMBuildStore(builder
, temp_res
, result
);
3531 lp_build_endif(&ifthen
);
3532 lp_build_loop_end_cond(&loop_state
, lp_build_const_int32(gallivm
, uint_bld
->type
.length
),
3534 emit_data
->output
[chan_index
] = LLVMBuildLoad(gallivm
->builder
, result
, "");
3541 const struct lp_build_tgsi_action
* action
,
3542 struct lp_build_tgsi_context
* bld_base
,
3543 struct lp_build_emit_data
* emit_data
)
3545 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3546 struct lp_img_params params
;
3547 LLVMValueRef coords
[5];
3548 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
3550 unsigned target
= emit_data
->inst
->Memory
.Texture
;
3551 unsigned layer_coord
;
3553 target_to_dims_layer(target
, &dims
, &layer_coord
);
3554 for (unsigned i
= 0; i
< dims
; i
++) {
3555 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, i
);
3557 for (unsigned i
= dims
; i
< 5; i
++) {
3558 coords
[i
] = coord_undef
;
3561 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, layer_coord
);
3562 memset(¶ms
, 0, sizeof(params
));
3564 params
.type
= bld
->bld_base
.base
.type
;
3565 params
.context_ptr
= bld
->context_ptr
;
3566 params
.thread_data_ptr
= bld
->thread_data_ptr
;
3567 params
.coords
= coords
;
3568 params
.outdata
= NULL
;
3569 params
.exec_mask
= mask_vec(bld_base
);
3570 params
.target
= tgsi_to_pipe_tex_target(target
);
3571 params
.image_index
= emit_data
->inst
->Dst
[0].Register
.Index
;
3572 params
.img_op
= LP_IMG_STORE
;
3573 for (unsigned i
= 0; i
< 4; i
++)
3574 params
.indata
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, i
);
3576 bld
->image
->emit_op(bld
->image
,
3577 bld
->bld_base
.base
.gallivm
,
3583 const struct lp_build_tgsi_action
* action
,
3584 struct lp_build_tgsi_context
* bld_base
,
3585 struct lp_build_emit_data
* emit_data
)
3587 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3588 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3589 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3590 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3591 const struct tgsi_full_dst_register
*bufreg
= &emit_data
->inst
->Dst
[0];
3592 unsigned buf
= bufreg
->Register
.Index
;
3593 assert(bufreg
->Register
.File
== TGSI_FILE_BUFFER
|| bufreg
->Register
.File
== TGSI_FILE_IMAGE
);
3595 if (bufreg
->Register
.File
== TGSI_FILE_IMAGE
) {
3596 img_store_emit(action
, bld_base
, emit_data
);
3600 LLVMValueRef index
; /* index into the const buffer */
3601 LLVMValueRef scalar_ptr
;
3603 unsigned chan_index
;
3605 index
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 0, 0);
3606 index
= lp_build_shr_imm(uint_bld
, index
, 2);
3608 scalar_ptr
= bld
->ssbos
[buf
];
3610 LLVMValueRef ssbo_limit
;
3612 ssbo_limit
= LLVMBuildAShr(gallivm
->builder
, bld
->ssbo_sizes
[buf
], lp_build_const_int32(gallivm
, 2), "");
3613 ssbo_limit
= lp_build_broadcast_scalar(uint_bld
, ssbo_limit
);
3615 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(emit_data
->inst
, chan_index
) {
3616 LLVMValueRef loop_index
= lp_build_add(uint_bld
, index
, lp_build_const_int_vec(gallivm
, uint_bld
->type
, chan_index
));
3618 value
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, chan_index
);
3620 LLVMValueRef exec_mask
= mask_vec(bld_base
);
3621 LLVMValueRef ssbo_oob_cmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_LESS
, loop_index
, ssbo_limit
);
3622 exec_mask
= LLVMBuildAnd(builder
, exec_mask
, ssbo_oob_cmp
, "");
3624 struct lp_build_loop_state loop_state
;
3625 lp_build_loop_begin(&loop_state
, gallivm
, lp_build_const_int32(gallivm
, 0));
3627 LLVMValueRef value_ptr
= LLVMBuildExtractElement(gallivm
->builder
, value
,
3628 loop_state
.counter
, "");
3629 value_ptr
= LLVMBuildBitCast(gallivm
->builder
, value_ptr
, uint_bld
->elem_type
, "");
3631 struct lp_build_if_state ifthen
;
3634 loop_index
= LLVMBuildExtractElement(gallivm
->builder
, loop_index
,
3635 loop_state
.counter
, "");
3637 cond
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
, exec_mask
, uint_bld
->zero
, "");
3638 cond
= LLVMBuildExtractElement(gallivm
->builder
, cond
, loop_state
.counter
, "");
3639 lp_build_if(&ifthen
, gallivm
, cond
);
3641 lp_build_pointer_set(builder
, scalar_ptr
, loop_index
, value_ptr
);
3643 lp_build_endif(&ifthen
);
3644 lp_build_loop_end_cond(&loop_state
, lp_build_const_int32(gallivm
, uint_bld
->type
.length
),
3652 const struct lp_build_tgsi_action
* action
,
3653 struct lp_build_tgsi_context
* bld_base
,
3654 struct lp_build_emit_data
* emit_data
)
3656 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3657 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3658 const struct tgsi_full_src_register
*bufreg
= &emit_data
->inst
->Src
[0];
3660 unsigned buf
= bufreg
->Register
.Index
;
3661 assert(bufreg
->Register
.File
== TGSI_FILE_BUFFER
|| bufreg
->Register
.File
== TGSI_FILE_IMAGE
);
3663 if (bufreg
->Register
.File
== TGSI_FILE_IMAGE
) {
3664 unsigned target
= emit_data
->inst
->Memory
.Texture
;
3665 struct lp_sampler_size_query_params params
= { 0 };
3666 params
.int_type
= bld
->bld_base
.int_bld
.type
;
3667 params
.texture_unit
= buf
;
3668 params
.target
= tgsi_to_pipe_tex_target(target
);
3669 params
.context_ptr
= bld
->context_ptr
;
3670 params
.sizes_out
= emit_data
->output
;
3672 bld
->image
->emit_size_query(bld
->image
,
3673 bld
->bld_base
.base
.gallivm
,
3676 LLVMValueRef num_ssbo
= bld
->ssbo_sizes
[buf
];
3678 emit_data
->output
[emit_data
->chan
] = lp_build_broadcast_scalar(uint_bld
, num_ssbo
);
3684 const struct lp_build_tgsi_action
* action
,
3685 struct lp_build_tgsi_context
* bld_base
,
3686 struct lp_build_emit_data
* emit_data
,
3687 LLVMAtomicRMWBinOp op
)
3689 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3690 struct lp_img_params params
;
3691 LLVMValueRef coords
[5];
3692 LLVMValueRef coord_undef
= LLVMGetUndef(bld
->bld_base
.base
.int_vec_type
);
3694 unsigned layer_coord
;
3695 unsigned target
= emit_data
->inst
->Memory
.Texture
;
3697 target_to_dims_layer(target
, &dims
, &layer_coord
);
3699 for (unsigned i
= 0; i
< dims
; i
++) {
3700 coords
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, i
);
3702 for (unsigned i
= dims
; i
< 5; i
++) {
3703 coords
[i
] = coord_undef
;
3706 coords
[2] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, layer_coord
);
3707 memset(¶ms
, 0, sizeof(params
));
3709 params
.type
= bld
->bld_base
.base
.type
;
3710 params
.context_ptr
= bld
->context_ptr
;
3711 params
.thread_data_ptr
= bld
->thread_data_ptr
;
3712 params
.exec_mask
= mask_vec(bld_base
);
3713 params
.image_index
= emit_data
->inst
->Src
[0].Register
.Index
;
3714 params
.coords
= coords
;
3715 params
.target
= tgsi_to_pipe_tex_target(target
);
3717 params
.outdata
= emit_data
->output
;
3718 params
.img_op
= (emit_data
->inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) ? LP_IMG_ATOMIC_CAS
: LP_IMG_ATOMIC
;
3720 for (unsigned i
= 0; i
< 4; i
++)
3721 params
.indata
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 2, i
);
3722 if (emit_data
->inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
3723 for (unsigned i
= 0; i
< 4; i
++)
3724 params
.indata2
[i
] = lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 3, i
);
3726 bld
->image
->emit_op(bld
->image
,
3727 bld
->bld_base
.base
.gallivm
,
3733 const struct lp_build_tgsi_action
* action
,
3734 struct lp_build_tgsi_context
* bld_base
,
3735 struct lp_build_emit_data
* emit_data
)
3737 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
3738 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
3739 LLVMBuilderRef builder
= gallivm
->builder
;
3740 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3741 const struct tgsi_full_src_register
*bufreg
= &emit_data
->inst
->Src
[0];
3743 assert(bufreg
->Register
.File
== TGSI_FILE_BUFFER
|| bufreg
->Register
.File
== TGSI_FILE_IMAGE
);
3744 unsigned buf
= bufreg
->Register
.Index
;
3746 LLVMAtomicRMWBinOp op
;
3747 switch (emit_data
->inst
->Instruction
.Opcode
) {
3748 case TGSI_OPCODE_ATOMUADD
:
3749 op
= LLVMAtomicRMWBinOpAdd
;
3751 case TGSI_OPCODE_ATOMXCHG
:
3752 op
= LLVMAtomicRMWBinOpXchg
;
3754 case TGSI_OPCODE_ATOMAND
:
3755 op
= LLVMAtomicRMWBinOpAnd
;
3757 case TGSI_OPCODE_ATOMOR
:
3758 op
= LLVMAtomicRMWBinOpOr
;
3760 case TGSI_OPCODE_ATOMXOR
:
3761 op
= LLVMAtomicRMWBinOpXor
;
3763 case TGSI_OPCODE_ATOMUMIN
:
3764 op
= LLVMAtomicRMWBinOpUMin
;
3766 case TGSI_OPCODE_ATOMUMAX
:
3767 op
= LLVMAtomicRMWBinOpUMax
;
3769 case TGSI_OPCODE_ATOMIMIN
:
3770 op
= LLVMAtomicRMWBinOpMin
;
3772 case TGSI_OPCODE_ATOMIMAX
:
3773 op
= LLVMAtomicRMWBinOpMax
;
3775 case TGSI_OPCODE_ATOMCAS
:
3782 if (bufreg
->Register
.File
== TGSI_FILE_IMAGE
) {
3783 img_atomic_emit(action
, bld_base
, emit_data
, op
);
3786 LLVMValueRef index
; /* index into the const buffer */
3787 LLVMValueRef scalar
, scalar_ptr
;
3790 index
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 1, 0);
3791 value
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 2, 0);
3793 index
= lp_build_shr_imm(uint_bld
, index
, 2);
3794 index
= lp_build_add(uint_bld
, index
, lp_build_const_int_vec(gallivm
, uint_bld
->type
, emit_data
->chan
));
3796 scalar_ptr
= bld
->ssbos
[buf
];
3798 LLVMValueRef atom_res
= lp_build_alloca(gallivm
,
3799 uint_bld
->vec_type
, "");
3801 LLVMValueRef ssbo_limit
;
3802 ssbo_limit
= LLVMBuildAShr(gallivm
->builder
, bld
->ssbo_sizes
[buf
], lp_build_const_int32(gallivm
, 2), "");
3803 ssbo_limit
= lp_build_broadcast_scalar(uint_bld
, ssbo_limit
);
3805 LLVMValueRef exec_mask
= mask_vec(bld_base
);
3806 LLVMValueRef ssbo_oob_cmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_LESS
, index
, ssbo_limit
);
3807 exec_mask
= LLVMBuildAnd(builder
, exec_mask
, ssbo_oob_cmp
, "");
3809 struct lp_build_loop_state loop_state
;
3810 lp_build_loop_begin(&loop_state
, gallivm
, lp_build_const_int32(gallivm
, 0));
3812 LLVMValueRef value_ptr
= LLVMBuildExtractElement(gallivm
->builder
, value
,
3813 loop_state
.counter
, "");
3814 value_ptr
= LLVMBuildBitCast(gallivm
->builder
, value_ptr
, uint_bld
->elem_type
, "");
3816 index
= LLVMBuildExtractElement(gallivm
->builder
, index
,
3817 loop_state
.counter
, "");
3819 scalar_ptr
= LLVMBuildGEP(builder
, scalar_ptr
,
3822 struct lp_build_if_state ifthen
;
3823 LLVMValueRef cond
, temp_res
;
3825 cond
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
, exec_mask
, uint_bld
->zero
, "");
3826 cond
= LLVMBuildExtractElement(gallivm
->builder
, cond
, loop_state
.counter
, "");
3827 lp_build_if(&ifthen
, gallivm
, cond
);
3829 if (emit_data
->inst
->Instruction
.Opcode
== TGSI_OPCODE_ATOMCAS
) {
3830 LLVMValueRef cas_src
= lp_build_emit_fetch(&bld
->bld_base
, emit_data
->inst
, 3, 0);
3831 LLVMValueRef cas_src_ptr
= LLVMBuildExtractElement(gallivm
->builder
, cas_src
,
3832 loop_state
.counter
, "");
3833 cas_src_ptr
= LLVMBuildBitCast(gallivm
->builder
, cas_src_ptr
, uint_bld
->elem_type
, "");
3834 scalar
= LLVMBuildAtomicCmpXchg(builder
, scalar_ptr
, value_ptr
,
3836 LLVMAtomicOrderingSequentiallyConsistent
,
3837 LLVMAtomicOrderingSequentiallyConsistent
,
3839 scalar
= LLVMBuildExtractValue(gallivm
->builder
, scalar
, 0, "");
3841 scalar
= LLVMBuildAtomicRMW(builder
, op
,
3842 scalar_ptr
, value_ptr
,
3843 LLVMAtomicOrderingSequentiallyConsistent
,
3846 temp_res
= LLVMBuildLoad(builder
, atom_res
, "");
3847 temp_res
= LLVMBuildInsertElement(builder
, temp_res
, scalar
, loop_state
.counter
, "");
3848 LLVMBuildStore(builder
, temp_res
, atom_res
);
3849 lp_build_else(&ifthen
);
3850 temp_res
= LLVMBuildLoad(builder
, atom_res
, "");
3851 temp_res
= LLVMBuildInsertElement(builder
, temp_res
, lp_build_const_int32(gallivm
, 0), loop_state
.counter
, "");
3852 LLVMBuildStore(builder
, temp_res
, atom_res
);
3853 lp_build_endif(&ifthen
);
3855 lp_build_loop_end_cond(&loop_state
, lp_build_const_int32(gallivm
, uint_bld
->type
.length
),
3857 emit_data
->output
[emit_data
->chan
] = LLVMBuildLoad(gallivm
->builder
, atom_res
, "");
3863 const struct lp_build_tgsi_action
* action
,
3864 struct lp_build_tgsi_context
* bld_base
,
3865 struct lp_build_emit_data
* emit_data
)
3867 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3868 LLVMBuildFence(builder
, LLVMAtomicOrderingSequentiallyConsistent
, false, "");
3872 increment_vec_ptr_by_mask(struct lp_build_tgsi_context
* bld_base
,
3876 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3877 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3879 current_vec
= LLVMBuildSub(builder
, current_vec
, mask
, "");
3881 LLVMBuildStore(builder
, current_vec
, ptr
);
3885 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context
* bld_base
,
3889 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
3890 LLVMValueRef current_vec
= LLVMBuildLoad(builder
, ptr
, "");
3892 current_vec
= lp_build_select(&bld_base
->uint_bld
,
3894 bld_base
->uint_bld
.zero
,
3897 LLVMBuildStore(builder
, current_vec
, ptr
);
3901 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context
* bld
,
3902 LLVMValueRef current_mask_vec
,
3903 LLVMValueRef total_emitted_vertices_vec
)
3905 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3906 struct lp_build_context
*int_bld
= &bld
->bld_base
.int_bld
;
3907 LLVMValueRef max_mask
= lp_build_cmp(int_bld
, PIPE_FUNC_LESS
,
3908 total_emitted_vertices_vec
,
3909 bld
->max_output_vertices_vec
);
3911 return LLVMBuildAnd(builder
, current_mask_vec
, max_mask
, "");
3916 const struct lp_build_tgsi_action
* action
,
3917 struct lp_build_tgsi_context
* bld_base
,
3918 struct lp_build_emit_data
* emit_data
)
3920 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3921 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3923 if (bld
->gs_iface
->emit_vertex
) {
3924 LLVMValueRef mask
= mask_vec(bld_base
);
3925 LLVMValueRef total_emitted_vertices_vec
=
3926 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
3927 mask
= clamp_mask_to_max_output_vertices(bld
, mask
,
3928 total_emitted_vertices_vec
);
3929 gather_outputs(bld
);
3930 bld
->gs_iface
->emit_vertex(bld
->gs_iface
, &bld
->bld_base
,
3932 total_emitted_vertices_vec
);
3933 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3935 increment_vec_ptr_by_mask(bld_base
, bld
->total_emitted_vertices_vec_ptr
,
3938 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3939 " +++ emit vertex masked ones = ",
3941 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3942 " +++ emit vertex emitted = ",
3943 total_emitted_vertices_vec
);
3950 end_primitive_masked(struct lp_build_tgsi_context
* bld_base
,
3953 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
3954 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
3956 if (bld
->gs_iface
->end_primitive
) {
3957 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
3958 LLVMValueRef emitted_vertices_vec
=
3959 LLVMBuildLoad(builder
, bld
->emitted_vertices_vec_ptr
, "");
3960 LLVMValueRef emitted_prims_vec
=
3961 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
3963 LLVMValueRef emitted_mask
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
3964 emitted_vertices_vec
,
3966 /* We need to combine the current execution mask with the mask
3967 telling us which, if any, execution slots actually have
3968 unemitted primitives, this way we make sure that end_primitives
3969 executes only on the paths that have unflushed vertices */
3970 mask
= LLVMBuildAnd(builder
, mask
, emitted_mask
, "");
3972 bld
->gs_iface
->end_primitive(bld
->gs_iface
, &bld
->bld_base
,
3973 emitted_vertices_vec
,
3977 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3978 " +++ end prim masked ones = ",
3980 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3981 " +++ end prim emitted verts1 = ",
3982 emitted_vertices_vec
);
3983 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3984 " +++ end prim emitted prims1 = ",
3985 LLVMBuildLoad(builder
,
3986 bld
->emitted_prims_vec_ptr
, ""));
3988 increment_vec_ptr_by_mask(bld_base
, bld
->emitted_prims_vec_ptr
,
3990 clear_uint_vec_ptr_from_mask(bld_base
, bld
->emitted_vertices_vec_ptr
,
3993 lp_build_print_value(bld
->bld_base
.base
.gallivm
,
3994 " +++ end prim emitted verts2 = ",
3995 LLVMBuildLoad(builder
,
3996 bld
->emitted_vertices_vec_ptr
, ""));
4004 const struct lp_build_tgsi_action
* action
,
4005 struct lp_build_tgsi_context
* bld_base
,
4006 struct lp_build_emit_data
* emit_data
)
4008 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4010 if (bld
->gs_iface
->end_primitive
) {
4011 LLVMValueRef mask
= mask_vec(bld_base
);
4012 end_primitive_masked(bld_base
, mask
);
4018 const struct lp_build_tgsi_action
* action
,
4019 struct lp_build_tgsi_context
* bld_base
,
4020 struct lp_build_emit_data
* emit_data
)
4022 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4024 lp_exec_mask_call(&bld
->exec_mask
, emit_data
->inst
->Label
.Label
,
4030 const struct lp_build_tgsi_action
* action
,
4031 struct lp_build_tgsi_context
* bld_base
,
4032 struct lp_build_emit_data
* emit_data
)
4034 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4036 lp_exec_mask_ret(&bld
->exec_mask
, &bld_base
->pc
);
4041 const struct lp_build_tgsi_action
* action
,
4042 struct lp_build_tgsi_context
* bld_base
,
4043 struct lp_build_emit_data
* emit_data
)
4045 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4047 lp_exec_break(&bld
->exec_mask
, bld_base
);
4052 const struct lp_build_tgsi_action
* action
,
4053 struct lp_build_tgsi_context
* bld_base
,
4054 struct lp_build_emit_data
* emit_data
)
4057 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4059 tmp
= lp_build_cmp(&bld_base
->base
, PIPE_FUNC_NOTEQUAL
,
4060 emit_data
->args
[0], bld
->bld_base
.base
.zero
);
4061 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
4066 const struct lp_build_tgsi_action
* action
,
4067 struct lp_build_tgsi_context
* bld_base
,
4068 struct lp_build_emit_data
* emit_data
)
4071 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4072 struct lp_build_context
*uint_bld
= &bld_base
->uint_bld
;
4074 tmp
= lp_build_cmp(uint_bld
, PIPE_FUNC_NOTEQUAL
,
4075 emit_data
->args
[0], uint_bld
->zero
);
4076 lp_exec_mask_cond_push(&bld
->exec_mask
, tmp
);
4081 const struct lp_build_tgsi_action
* action
,
4082 struct lp_build_tgsi_context
* bld_base
,
4083 struct lp_build_emit_data
* emit_data
)
4085 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4087 lp_exec_case(&bld
->exec_mask
, emit_data
->args
[0]);
4092 const struct lp_build_tgsi_action
* action
,
4093 struct lp_build_tgsi_context
* bld_base
,
4094 struct lp_build_emit_data
* emit_data
)
4096 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4098 lp_exec_default(&bld
->exec_mask
, bld_base
);
4103 const struct lp_build_tgsi_action
* action
,
4104 struct lp_build_tgsi_context
* bld_base
,
4105 struct lp_build_emit_data
* emit_data
)
4107 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4109 lp_exec_switch(&bld
->exec_mask
, emit_data
->args
[0]);
4114 const struct lp_build_tgsi_action
* action
,
4115 struct lp_build_tgsi_context
* bld_base
,
4116 struct lp_build_emit_data
* emit_data
)
4118 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4120 lp_exec_endswitch(&bld
->exec_mask
, bld_base
);
4125 const struct lp_build_tgsi_action
* action
,
4126 struct lp_build_tgsi_context
* bld_base
,
4127 struct lp_build_emit_data
* emit_data
)
4129 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4131 lp_exec_bgnloop(&bld
->exec_mask
);
4136 const struct lp_build_tgsi_action
* action
,
4137 struct lp_build_tgsi_context
* bld_base
,
4138 struct lp_build_emit_data
* emit_data
)
4140 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4142 lp_exec_mask_bgnsub(&bld
->exec_mask
);
4147 const struct lp_build_tgsi_action
* action
,
4148 struct lp_build_tgsi_context
* bld_base
,
4149 struct lp_build_emit_data
* emit_data
)
4151 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4153 lp_exec_mask_cond_invert(&bld
->exec_mask
);
4158 const struct lp_build_tgsi_action
* action
,
4159 struct lp_build_tgsi_context
* bld_base
,
4160 struct lp_build_emit_data
* emit_data
)
4162 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4164 lp_exec_mask_cond_pop(&bld
->exec_mask
);
4169 const struct lp_build_tgsi_action
* action
,
4170 struct lp_build_tgsi_context
* bld_base
,
4171 struct lp_build_emit_data
* emit_data
)
4173 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4175 lp_exec_endloop(bld_base
->base
.gallivm
, &bld
->exec_mask
);
4180 const struct lp_build_tgsi_action
* action
,
4181 struct lp_build_tgsi_context
* bld_base
,
4182 struct lp_build_emit_data
* emit_data
)
4184 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4186 lp_exec_mask_endsub(&bld
->exec_mask
, &bld_base
->pc
);
4191 const struct lp_build_tgsi_action
* action
,
4192 struct lp_build_tgsi_context
* bld_base
,
4193 struct lp_build_emit_data
* emit_data
)
4195 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4197 lp_exec_continue(&bld
->exec_mask
);
4200 static void emit_prologue(struct lp_build_tgsi_context
* bld_base
)
4202 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4203 struct gallivm_state
* gallivm
= bld_base
->base
.gallivm
;
4205 if (bld
->indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) {
4206 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] * 4 + 4;
4207 bld
->temps_array
= lp_build_alloca_undef(gallivm
,
4208 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
4212 if (bld
->indirect_files
& (1 << TGSI_FILE_OUTPUT
)) {
4213 LLVMValueRef array_size
=
4214 lp_build_const_int32(gallivm
,
4215 bld_base
->info
->file_max
[TGSI_FILE_OUTPUT
] * 4 + 4);
4216 bld
->outputs_array
= lp_build_array_alloca(gallivm
,
4217 bld_base
->base
.vec_type
, array_size
,
4221 if (bld
->indirect_files
& (1 << TGSI_FILE_IMMEDIATE
)) {
4222 unsigned array_size
= bld_base
->info
->file_max
[TGSI_FILE_IMMEDIATE
] * 4 + 4;
4223 bld
->imms_array
= lp_build_alloca_undef(gallivm
,
4224 LLVMArrayType(bld_base
->base
.vec_type
, array_size
),
4228 /* If we have indirect addressing in inputs we need to copy them into
4229 * our alloca array to be able to iterate over them */
4230 if (bld
->indirect_files
& (1 << TGSI_FILE_INPUT
) && !bld
->gs_iface
) {
4231 unsigned index
, chan
;
4232 LLVMTypeRef vec_type
= bld_base
->base
.vec_type
;
4233 LLVMValueRef array_size
= lp_build_const_int32(gallivm
,
4234 bld_base
->info
->file_max
[TGSI_FILE_INPUT
]*4 + 4);
4235 bld
->inputs_array
= lp_build_array_alloca(gallivm
,
4236 vec_type
, array_size
,
4239 assert(bld_base
->info
->num_inputs
4240 <= bld_base
->info
->file_max
[TGSI_FILE_INPUT
] + 1);
4242 for (index
= 0; index
< bld_base
->info
->num_inputs
; ++index
) {
4243 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; ++chan
) {
4244 LLVMValueRef lindex
=
4245 lp_build_const_int32(gallivm
, index
* 4 + chan
);
4246 LLVMValueRef input_ptr
=
4247 LLVMBuildGEP(gallivm
->builder
, bld
->inputs_array
,
4249 LLVMValueRef value
= bld
->inputs
[index
][chan
];
4251 LLVMBuildStore(gallivm
->builder
, value
, input_ptr
);
4256 if (bld
->gs_iface
) {
4257 struct lp_build_context
*uint_bld
= &bld
->bld_base
.uint_bld
;
4258 bld
->emitted_prims_vec_ptr
=
4259 lp_build_alloca(gallivm
,
4261 "emitted_prims_ptr");
4262 bld
->emitted_vertices_vec_ptr
=
4263 lp_build_alloca(gallivm
,
4265 "emitted_vertices_ptr");
4266 bld
->total_emitted_vertices_vec_ptr
=
4267 lp_build_alloca(gallivm
,
4269 "total_emitted_vertices_ptr");
4271 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
4272 bld
->emitted_prims_vec_ptr
);
4273 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
4274 bld
->emitted_vertices_vec_ptr
);
4275 LLVMBuildStore(gallivm
->builder
, uint_bld
->zero
,
4276 bld
->total_emitted_vertices_vec_ptr
);
4279 if (DEBUG_EXECUTION
) {
4280 lp_build_printf(gallivm
, "\n");
4281 emit_dump_file(bld
, TGSI_FILE_CONSTANT
);
4283 emit_dump_file(bld
, TGSI_FILE_INPUT
);
4287 static void emit_epilogue(struct lp_build_tgsi_context
* bld_base
)
4289 struct lp_build_tgsi_soa_context
* bld
= lp_soa_context(bld_base
);
4290 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
4292 if (DEBUG_EXECUTION
) {
4295 emit_dump_file(bld
, TGSI_FILE_TEMPORARY
);
4297 emit_dump_file(bld
, TGSI_FILE_OUTPUT
);
4298 lp_build_printf(bld_base
->base
.gallivm
, "\n");
4301 /* If we have indirect addressing in outputs we need to copy our alloca array
4302 * to the outputs slots specified by the caller */
4303 if (bld
->gs_iface
) {
4304 LLVMValueRef total_emitted_vertices_vec
;
4305 LLVMValueRef emitted_prims_vec
;
4306 /* implicit end_primitives, needed in case there are any unflushed
4307 vertices in the cache. Note must not call end_primitive here
4308 since the exec_mask is not valid at this point. */
4309 end_primitive_masked(bld_base
, lp_build_mask_value(bld
->mask
));
4311 total_emitted_vertices_vec
=
4312 LLVMBuildLoad(builder
, bld
->total_emitted_vertices_vec_ptr
, "");
4314 LLVMBuildLoad(builder
, bld
->emitted_prims_vec_ptr
, "");
4316 bld
->gs_iface
->gs_epilogue(bld
->gs_iface
,
4318 total_emitted_vertices_vec
,
4321 gather_outputs(bld
);
4326 lp_build_tgsi_soa(struct gallivm_state
*gallivm
,
4327 const struct tgsi_token
*tokens
,
4328 const struct lp_build_tgsi_params
*params
,
4329 LLVMValueRef (*outputs
)[TGSI_NUM_CHANNELS
])
4331 struct lp_build_tgsi_soa_context bld
;
4332 struct lp_type type
= params
->type
;
4333 struct lp_type res_type
;
4335 assert(type
.length
<= LP_MAX_VECTOR_LENGTH
);
4336 memset(&res_type
, 0, sizeof res_type
);
4337 res_type
.width
= type
.width
;
4338 res_type
.length
= type
.length
;
4341 /* Setup build context */
4342 memset(&bld
, 0, sizeof bld
);
4343 lp_build_context_init(&bld
.bld_base
.base
, gallivm
, type
);
4344 lp_build_context_init(&bld
.bld_base
.uint_bld
, gallivm
, lp_uint_type(type
));
4345 lp_build_context_init(&bld
.bld_base
.int_bld
, gallivm
, lp_int_type(type
));
4346 lp_build_context_init(&bld
.elem_bld
, gallivm
, lp_elem_type(type
));
4348 struct lp_type dbl_type
;
4350 dbl_type
.width
*= 2;
4351 lp_build_context_init(&bld
.bld_base
.dbl_bld
, gallivm
, dbl_type
);
4354 struct lp_type uint64_type
;
4355 uint64_type
= lp_uint_type(type
);
4356 uint64_type
.width
*= 2;
4357 lp_build_context_init(&bld
.bld_base
.uint64_bld
, gallivm
, uint64_type
);
4360 struct lp_type int64_type
;
4361 int64_type
= lp_int_type(type
);
4362 int64_type
.width
*= 2;
4363 lp_build_context_init(&bld
.bld_base
.int64_bld
, gallivm
, int64_type
);
4365 bld
.mask
= params
->mask
;
4366 bld
.inputs
= params
->inputs
;
4367 bld
.outputs
= outputs
;
4368 bld
.consts_ptr
= params
->consts_ptr
;
4369 bld
.const_sizes_ptr
= params
->const_sizes_ptr
;
4370 bld
.ssbo_ptr
= params
->ssbo_ptr
;
4371 bld
.ssbo_sizes_ptr
= params
->ssbo_sizes_ptr
;
4372 bld
.sampler
= params
->sampler
;
4373 bld
.bld_base
.info
= params
->info
;
4374 bld
.indirect_files
= params
->info
->indirect_files
;
4375 bld
.context_ptr
= params
->context_ptr
;
4376 bld
.thread_data_ptr
= params
->thread_data_ptr
;
4377 bld
.image
= params
->image
;
4380 * If the number of temporaries is rather large then we just
4381 * allocate them as an array right from the start and treat
4382 * like indirect temporaries.
4384 if (params
->info
->file_max
[TGSI_FILE_TEMPORARY
] >= LP_MAX_INLINED_TEMPS
) {
4385 bld
.indirect_files
|= (1 << TGSI_FILE_TEMPORARY
);
4388 * For performance reason immediates are always backed in a static
4389 * array, but if their number is too great, we have to use just
4390 * a dynamically allocated array.
4392 bld
.use_immediates_array
=
4393 (params
->info
->file_max
[TGSI_FILE_IMMEDIATE
] >= LP_MAX_INLINED_IMMEDIATES
);
4394 if (bld
.use_immediates_array
) {
4395 bld
.indirect_files
|= (1 << TGSI_FILE_IMMEDIATE
);
4399 bld
.bld_base
.soa
= TRUE
;
4400 bld
.bld_base
.emit_debug
= emit_debug
;
4401 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_CONSTANT
] = emit_fetch_constant
;
4402 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = emit_fetch_immediate
;
4403 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_input
;
4404 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = emit_fetch_temporary
;
4405 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = emit_fetch_system_value
;
4406 bld
.bld_base
.emit_store
= emit_store
;
4408 bld
.bld_base
.emit_declaration
= lp_emit_declaration_soa
;
4409 bld
.bld_base
.emit_immediate
= lp_emit_immediate_soa
;
4411 bld
.bld_base
.emit_prologue
= emit_prologue
;
4412 bld
.bld_base
.emit_epilogue
= emit_epilogue
;
4414 /* Set opcode actions */
4415 lp_set_default_actions_cpu(&bld
.bld_base
);
4417 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
4418 bld
.bld_base
.op_actions
[TGSI_OPCODE_BGNSUB
].emit
= bgnsub_emit
;
4419 bld
.bld_base
.op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
4420 bld
.bld_base
.op_actions
[TGSI_OPCODE_CAL
].emit
= cal_emit
;
4421 bld
.bld_base
.op_actions
[TGSI_OPCODE_CASE
].emit
= case_emit
;
4422 bld
.bld_base
.op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
4423 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDX
].emit
= ddx_emit
;
4424 bld
.bld_base
.op_actions
[TGSI_OPCODE_DDY
].emit
= ddy_emit
;
4425 bld
.bld_base
.op_actions
[TGSI_OPCODE_DEFAULT
].emit
= default_emit
;
4426 bld
.bld_base
.op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
4427 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
4428 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
4429 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSUB
].emit
= endsub_emit
;
4430 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDSWITCH
].emit
= endswitch_emit
;
4431 bld
.bld_base
.op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
4432 bld
.bld_base
.op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
4433 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL_IF
].emit
= kill_if_emit
;
4434 bld
.bld_base
.op_actions
[TGSI_OPCODE_KILL
].emit
= kill_emit
;
4435 bld
.bld_base
.op_actions
[TGSI_OPCODE_RET
].emit
= ret_emit
;
4436 bld
.bld_base
.op_actions
[TGSI_OPCODE_SWITCH
].emit
= switch_emit
;
4437 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX
].emit
= tex_emit
;
4438 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB
].emit
= txb_emit
;
4439 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXD
].emit
= txd_emit
;
4440 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL
].emit
= txl_emit
;
4441 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXP
].emit
= txp_emit
;
4442 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXQ
].emit
= txq_emit
;
4443 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXF
].emit
= txf_emit
;
4444 bld
.bld_base
.op_actions
[TGSI_OPCODE_TEX2
].emit
= tex2_emit
;
4445 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXB2
].emit
= txb2_emit
;
4446 bld
.bld_base
.op_actions
[TGSI_OPCODE_TXL2
].emit
= txl2_emit
;
4447 bld
.bld_base
.op_actions
[TGSI_OPCODE_TG4
].emit
= tg4_emit
;
4448 bld
.bld_base
.op_actions
[TGSI_OPCODE_LODQ
].emit
= lodq_emit
;
4449 /* DX10 sampling ops */
4450 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE
].emit
= sample_emit
;
4451 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_B
].emit
= sample_b_emit
;
4452 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C
].emit
= sample_c_emit
;
4453 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_C_LZ
].emit
= sample_c_lz_emit
;
4454 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_D
].emit
= sample_d_emit
;
4455 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I
].emit
= sample_i_emit
;
4456 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_I_MS
].emit
= sample_i_emit
;
4457 bld
.bld_base
.op_actions
[TGSI_OPCODE_SAMPLE_L
].emit
= sample_l_emit
;
4458 bld
.bld_base
.op_actions
[TGSI_OPCODE_GATHER4
].emit
= gather4_emit
;
4459 bld
.bld_base
.op_actions
[TGSI_OPCODE_SVIEWINFO
].emit
= sviewinfo_emit
;
4460 bld
.bld_base
.op_actions
[TGSI_OPCODE_LOD
].emit
= lod_emit
;
4462 bld
.bld_base
.op_actions
[TGSI_OPCODE_LOAD
].emit
= load_emit
;
4463 bld
.bld_base
.op_actions
[TGSI_OPCODE_STORE
].emit
= store_emit
;
4464 bld
.bld_base
.op_actions
[TGSI_OPCODE_RESQ
].emit
= resq_emit
;
4466 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMUADD
].emit
= atomic_emit
;
4467 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMXCHG
].emit
= atomic_emit
;
4468 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMCAS
].emit
= atomic_emit
;
4469 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMAND
].emit
= atomic_emit
;
4470 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMOR
].emit
= atomic_emit
;
4471 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMXOR
].emit
= atomic_emit
;
4472 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMUMIN
].emit
= atomic_emit
;
4473 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMUMAX
].emit
= atomic_emit
;
4474 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMIMIN
].emit
= atomic_emit
;
4475 bld
.bld_base
.op_actions
[TGSI_OPCODE_ATOMIMAX
].emit
= atomic_emit
;
4477 bld
.bld_base
.op_actions
[TGSI_OPCODE_MEMBAR
].emit
= membar_emit
;
4478 if (params
->gs_iface
) {
4479 /* There's no specific value for this because it should always
4480 * be set, but apps using ext_geometry_shader4 quite often
4481 * were forgetting so we're using MAX_VERTEX_VARYING from
4482 * that spec even though we could debug_assert if it's not
4483 * set, but that's a lot uglier. */
4484 uint max_output_vertices
;
4486 /* inputs are always indirect with gs */
4487 bld
.indirect_files
|= (1 << TGSI_FILE_INPUT
);
4488 bld
.gs_iface
= params
->gs_iface
;
4489 bld
.bld_base
.emit_fetch_funcs
[TGSI_FILE_INPUT
] = emit_fetch_gs_input
;
4490 bld
.bld_base
.op_actions
[TGSI_OPCODE_EMIT
].emit
= emit_vertex
;
4491 bld
.bld_base
.op_actions
[TGSI_OPCODE_ENDPRIM
].emit
= end_primitive
;
4493 max_output_vertices
=
4494 params
->info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
];
4495 if (!max_output_vertices
)
4496 max_output_vertices
= 32;
4498 bld
.max_output_vertices_vec
=
4499 lp_build_const_int_vec(gallivm
, bld
.bld_base
.int_bld
.type
,
4500 max_output_vertices
);
4503 lp_exec_mask_init(&bld
.exec_mask
, &bld
.bld_base
.int_bld
);
4505 bld
.system_values
= *params
->system_values
;
4507 lp_build_tgsi_llvm(&bld
.bld_base
, tokens
);
4510 LLVMBasicBlockRef block
= LLVMGetInsertBlock(gallivm
->builder
);
4511 LLVMValueRef function
= LLVMGetBasicBlockParent(block
);
4512 debug_printf("11111111111111111111111111111 \n");
4513 tgsi_dump(tokens
, 0);
4514 lp_debug_dump_value(function
);
4515 debug_printf("2222222222222222222222222222 \n");
4519 LLVMModuleRef module
= LLVMGetGlobalParent(
4520 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm
->builder
)));
4521 LLVMDumpModule(module
);
4524 lp_exec_mask_fini(&bld
.exec_mask
);