gallivm, tgsi: provide fake sample_i_ms implementations
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "tgsi/tgsi_strings.h"
51 #include "lp_bld_tgsi_action.h"
52 #include "lp_bld_type.h"
53 #include "lp_bld_const.h"
54 #include "lp_bld_arit.h"
55 #include "lp_bld_bitarit.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_init.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_swizzle.h"
60 #include "lp_bld_flow.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_tgsi.h"
63 #include "lp_bld_limits.h"
64 #include "lp_bld_debug.h"
65 #include "lp_bld_printf.h"
66 #include "lp_bld_sample.h"
67 #include "lp_bld_struct.h"
68
69 /* SM 4.0 says that subroutines can nest 32 deep and
70 * we need one more for our main function */
71 #define LP_MAX_NUM_FUNCS 33
72
73 #define DUMP_GS_EMITS 0
74
75 /*
76 * If non-zero, the generated LLVM IR will print intermediate results on every TGSI
77 * instruction.
78 *
79 * TODO:
80 * - take execution masks in consideration
81 * - debug control-flow instructions
82 */
83 #define DEBUG_EXECUTION 0
84
85
86 /*
87 * Emit code to print a register value.
88 */
89 static void
90 emit_dump_reg(struct gallivm_state *gallivm,
91 unsigned file,
92 unsigned index,
93 unsigned chan,
94 LLVMValueRef value)
95 {
96 char buf[32];
97
98 util_snprintf(buf, sizeof buf, " %s[%u].%c = ",
99 tgsi_file_name(file),
100 index, "xyzw"[chan]);
101
102 lp_build_print_value(gallivm, buf, value);
103 }
104
105 /*
106 * Return the context for the current function.
107 * (always 'main', if shader doesn't do any function calls)
108 */
109 static inline struct function_ctx *
110 func_ctx(struct lp_exec_mask *mask)
111 {
112 assert(mask->function_stack_size > 0);
113 assert(mask->function_stack_size <= LP_MAX_NUM_FUNCS);
114 return &mask->function_stack[mask->function_stack_size - 1];
115 }
116
117 /*
118 * Returns true if we're in a loop.
119 * It's global, meaning that it returns true even if there's
120 * no loop inside the current function, but we were inside
121 * a loop inside another function, from which this one was called.
122 */
123 static inline boolean
124 mask_has_loop(struct lp_exec_mask *mask)
125 {
126 int i;
127 for (i = mask->function_stack_size - 1; i >= 0; --i) {
128 const struct function_ctx *ctx = &mask->function_stack[i];
129 if (ctx->loop_stack_size > 0)
130 return TRUE;
131 }
132 return FALSE;
133 }
134
135 /*
136 * Returns true if we're inside a switch statement.
137 * It's global, meaning that it returns true even if there's
138 * no switch in the current function, but we were inside
139 * a switch inside another function, from which this one was called.
140 */
141 static inline boolean
142 mask_has_switch(struct lp_exec_mask *mask)
143 {
144 int i;
145 for (i = mask->function_stack_size - 1; i >= 0; --i) {
146 const struct function_ctx *ctx = &mask->function_stack[i];
147 if (ctx->switch_stack_size > 0)
148 return TRUE;
149 }
150 return FALSE;
151 }
152
153 /*
154 * Returns true if we're inside a conditional.
155 * It's global, meaning that it returns true even if there's
156 * no conditional in the current function, but we were inside
157 * a conditional inside another function, from which this one was called.
158 */
159 static inline boolean
160 mask_has_cond(struct lp_exec_mask *mask)
161 {
162 int i;
163 for (i = mask->function_stack_size - 1; i >= 0; --i) {
164 const struct function_ctx *ctx = &mask->function_stack[i];
165 if (ctx->cond_stack_size > 0)
166 return TRUE;
167 }
168 return FALSE;
169 }
170
171
172 /*
173 * Initialize a function context at the specified index.
174 */
175 static void
176 lp_exec_mask_function_init(struct lp_exec_mask *mask, int function_idx)
177 {
178 LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
179 LLVMBuilderRef builder = mask->bld->gallivm->builder;
180 struct function_ctx *ctx = &mask->function_stack[function_idx];
181
182 ctx->cond_stack_size = 0;
183 ctx->loop_stack_size = 0;
184 ctx->switch_stack_size = 0;
185
186 if (function_idx == 0) {
187 ctx->ret_mask = mask->ret_mask;
188 }
189
190 ctx->loop_limiter = lp_build_alloca(mask->bld->gallivm,
191 int_type, "looplimiter");
192 LLVMBuildStore(
193 builder,
194 LLVMConstInt(int_type, LP_MAX_TGSI_LOOP_ITERATIONS, false),
195 ctx->loop_limiter);
196 }
197
198 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
199 {
200 mask->bld = bld;
201 mask->has_mask = FALSE;
202 mask->ret_in_main = FALSE;
203 /* For the main function */
204 mask->function_stack_size = 1;
205
206 mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
207 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask =
208 mask->cond_mask = mask->switch_mask =
209 LLVMConstAllOnes(mask->int_vec_type);
210
211 mask->function_stack = CALLOC(LP_MAX_NUM_FUNCS,
212 sizeof(mask->function_stack[0]));
213 lp_exec_mask_function_init(mask, 0);
214 }
215
216 static void
217 lp_exec_mask_fini(struct lp_exec_mask *mask)
218 {
219 FREE(mask->function_stack);
220 }
221
222 static void lp_exec_mask_update(struct lp_exec_mask *mask)
223 {
224 LLVMBuilderRef builder = mask->bld->gallivm->builder;
225 boolean has_loop_mask = mask_has_loop(mask);
226 boolean has_cond_mask = mask_has_cond(mask);
227 boolean has_switch_mask = mask_has_switch(mask);
228 boolean has_ret_mask = mask->function_stack_size > 1 ||
229 mask->ret_in_main;
230
231 if (has_loop_mask) {
232 /*for loops we need to update the entire mask at runtime */
233 LLVMValueRef tmp;
234 assert(mask->break_mask);
235 tmp = LLVMBuildAnd(builder,
236 mask->cont_mask,
237 mask->break_mask,
238 "maskcb");
239 mask->exec_mask = LLVMBuildAnd(builder,
240 mask->cond_mask,
241 tmp,
242 "maskfull");
243 } else
244 mask->exec_mask = mask->cond_mask;
245
246 if (has_switch_mask) {
247 mask->exec_mask = LLVMBuildAnd(builder,
248 mask->exec_mask,
249 mask->switch_mask,
250 "switchmask");
251 }
252
253 if (has_ret_mask) {
254 mask->exec_mask = LLVMBuildAnd(builder,
255 mask->exec_mask,
256 mask->ret_mask,
257 "callmask");
258 }
259
260 mask->has_mask = (has_cond_mask ||
261 has_loop_mask ||
262 has_switch_mask ||
263 has_ret_mask);
264 }
265
266 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
267 LLVMValueRef val)
268 {
269 LLVMBuilderRef builder = mask->bld->gallivm->builder;
270 struct function_ctx *ctx = func_ctx(mask);
271
272 if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING) {
273 ctx->cond_stack_size++;
274 return;
275 }
276 if (ctx->cond_stack_size == 0 && mask->function_stack_size == 1) {
277 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
278 }
279 ctx->cond_stack[ctx->cond_stack_size++] = mask->cond_mask;
280 assert(LLVMTypeOf(val) == mask->int_vec_type);
281 mask->cond_mask = LLVMBuildAnd(builder,
282 mask->cond_mask,
283 val,
284 "");
285 lp_exec_mask_update(mask);
286 }
287
288 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
289 {
290 LLVMBuilderRef builder = mask->bld->gallivm->builder;
291 struct function_ctx *ctx = func_ctx(mask);
292 LLVMValueRef prev_mask;
293 LLVMValueRef inv_mask;
294
295 assert(ctx->cond_stack_size);
296 if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
297 return;
298 prev_mask = ctx->cond_stack[ctx->cond_stack_size - 1];
299 if (ctx->cond_stack_size == 1 && mask->function_stack_size == 1) {
300 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
301 }
302
303 inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
304
305 mask->cond_mask = LLVMBuildAnd(builder,
306 inv_mask,
307 prev_mask, "");
308 lp_exec_mask_update(mask);
309 }
310
311 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
312 {
313 struct function_ctx *ctx = func_ctx(mask);
314 assert(ctx->cond_stack_size);
315 --ctx->cond_stack_size;
316 if (ctx->cond_stack_size >= LP_MAX_TGSI_NESTING)
317 return;
318 mask->cond_mask = ctx->cond_stack[ctx->cond_stack_size];
319 lp_exec_mask_update(mask);
320 }
321
322 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
323 {
324 LLVMBuilderRef builder = mask->bld->gallivm->builder;
325 struct function_ctx *ctx = func_ctx(mask);
326
327 if (ctx->loop_stack_size >= LP_MAX_TGSI_NESTING) {
328 ++ctx->loop_stack_size;
329 return;
330 }
331
332 ctx->break_type_stack[ctx->loop_stack_size + ctx->switch_stack_size] =
333 ctx->break_type;
334 ctx->break_type = LP_EXEC_MASK_BREAK_TYPE_LOOP;
335
336 ctx->loop_stack[ctx->loop_stack_size].loop_block = ctx->loop_block;
337 ctx->loop_stack[ctx->loop_stack_size].cont_mask = mask->cont_mask;
338 ctx->loop_stack[ctx->loop_stack_size].break_mask = mask->break_mask;
339 ctx->loop_stack[ctx->loop_stack_size].break_var = ctx->break_var;
340 ++ctx->loop_stack_size;
341
342 ctx->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
343 LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
344
345 ctx->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
346
347 LLVMBuildBr(builder, ctx->loop_block);
348 LLVMPositionBuilderAtEnd(builder, ctx->loop_block);
349
350 mask->break_mask = LLVMBuildLoad(builder, ctx->break_var, "");
351
352 lp_exec_mask_update(mask);
353 }
354
355 static void lp_exec_break(struct lp_exec_mask *mask,
356 struct lp_build_tgsi_context * bld_base)
357 {
358 LLVMBuilderRef builder = mask->bld->gallivm->builder;
359 struct function_ctx *ctx = func_ctx(mask);
360
361 if (ctx->break_type == LP_EXEC_MASK_BREAK_TYPE_LOOP) {
362 LLVMValueRef exec_mask = LLVMBuildNot(builder,
363 mask->exec_mask,
364 "break");
365
366 mask->break_mask = LLVMBuildAnd(builder,
367 mask->break_mask,
368 exec_mask, "break_full");
369 }
370 else {
371 unsigned opcode = bld_base->instructions[bld_base->pc + 1].Instruction.Opcode;
372 boolean break_always = (opcode == TGSI_OPCODE_ENDSWITCH ||
373 opcode == TGSI_OPCODE_CASE);
374
375
376 if (ctx->switch_in_default) {
377 /*
378 * stop default execution but only if this is an unconditional switch.
379 * (The condition here is not perfect since dead code after break is
380 * allowed but should be sufficient since false negatives are just
381 * unoptimized - so we don't have to pre-evaluate that).
382 */
383 if(break_always && ctx->switch_pc) {
384 bld_base->pc = ctx->switch_pc;
385 return;
386 }
387 }
388
389 if (break_always) {
390 mask->switch_mask = LLVMConstNull(mask->bld->int_vec_type);
391 }
392 else {
393 LLVMValueRef exec_mask = LLVMBuildNot(builder,
394 mask->exec_mask,
395 "break");
396 mask->switch_mask = LLVMBuildAnd(builder,
397 mask->switch_mask,
398 exec_mask, "break_switch");
399 }
400 }
401
402 lp_exec_mask_update(mask);
403 }
404
405 static void lp_exec_break_condition(struct lp_exec_mask *mask,
406 LLVMValueRef cond)
407 {
408 LLVMBuilderRef builder = mask->bld->gallivm->builder;
409 struct function_ctx *ctx = func_ctx(mask);
410 LLVMValueRef cond_mask = LLVMBuildAnd(builder,
411 mask->exec_mask,
412 cond, "cond_mask");
413 cond_mask = LLVMBuildNot(builder, cond_mask, "break_cond");
414
415 if (ctx->break_type == LP_EXEC_MASK_BREAK_TYPE_LOOP) {
416 mask->break_mask = LLVMBuildAnd(builder,
417 mask->break_mask,
418 cond_mask, "breakc_full");
419 }
420 else {
421 mask->switch_mask = LLVMBuildAnd(builder,
422 mask->switch_mask,
423 cond_mask, "breakc_switch");
424 }
425
426 lp_exec_mask_update(mask);
427 }
428
429 static void lp_exec_continue(struct lp_exec_mask *mask)
430 {
431 LLVMBuilderRef builder = mask->bld->gallivm->builder;
432 LLVMValueRef exec_mask = LLVMBuildNot(builder,
433 mask->exec_mask,
434 "");
435
436 mask->cont_mask = LLVMBuildAnd(builder,
437 mask->cont_mask,
438 exec_mask, "");
439
440 lp_exec_mask_update(mask);
441 }
442
443
444 static void lp_exec_endloop(struct gallivm_state *gallivm,
445 struct lp_exec_mask *mask)
446 {
447 LLVMBuilderRef builder = mask->bld->gallivm->builder;
448 struct function_ctx *ctx = func_ctx(mask);
449 LLVMBasicBlockRef endloop;
450 LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
451 LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
452 mask->bld->type.width *
453 mask->bld->type.length);
454 LLVMValueRef i1cond, i2cond, icond, limiter;
455
456 assert(mask->break_mask);
457
458
459 assert(ctx->loop_stack_size);
460 if (ctx->loop_stack_size > LP_MAX_TGSI_NESTING) {
461 --ctx->loop_stack_size;
462 return;
463 }
464
465 /*
466 * Restore the cont_mask, but don't pop
467 */
468 mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size - 1].cont_mask;
469 lp_exec_mask_update(mask);
470
471 /*
472 * Unlike the continue mask, the break_mask must be preserved across loop
473 * iterations
474 */
475 LLVMBuildStore(builder, mask->break_mask, ctx->break_var);
476
477 /* Decrement the loop limiter */
478 limiter = LLVMBuildLoad(builder, ctx->loop_limiter, "");
479
480 limiter = LLVMBuildSub(
481 builder,
482 limiter,
483 LLVMConstInt(int_type, 1, false),
484 "");
485
486 LLVMBuildStore(builder, limiter, ctx->loop_limiter);
487
488 /* i1cond = (mask != 0) */
489 i1cond = LLVMBuildICmp(
490 builder,
491 LLVMIntNE,
492 LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
493 LLVMConstNull(reg_type), "i1cond");
494
495 /* i2cond = (looplimiter > 0) */
496 i2cond = LLVMBuildICmp(
497 builder,
498 LLVMIntSGT,
499 limiter,
500 LLVMConstNull(int_type), "i2cond");
501
502 /* if( i1cond && i2cond ) */
503 icond = LLVMBuildAnd(builder, i1cond, i2cond, "");
504
505 endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
506
507 LLVMBuildCondBr(builder,
508 icond, ctx->loop_block, endloop);
509
510 LLVMPositionBuilderAtEnd(builder, endloop);
511
512 assert(ctx->loop_stack_size);
513 --ctx->loop_stack_size;
514 mask->cont_mask = ctx->loop_stack[ctx->loop_stack_size].cont_mask;
515 mask->break_mask = ctx->loop_stack[ctx->loop_stack_size].break_mask;
516 ctx->loop_block = ctx->loop_stack[ctx->loop_stack_size].loop_block;
517 ctx->break_var = ctx->loop_stack[ctx->loop_stack_size].break_var;
518 ctx->break_type = ctx->break_type_stack[ctx->loop_stack_size +
519 ctx->switch_stack_size];
520
521 lp_exec_mask_update(mask);
522 }
523
524 static void lp_exec_switch(struct lp_exec_mask *mask,
525 LLVMValueRef switchval)
526 {
527 struct function_ctx *ctx = func_ctx(mask);
528
529 if (ctx->switch_stack_size >= LP_MAX_TGSI_NESTING ||
530 ctx->loop_stack_size > LP_MAX_TGSI_NESTING) {
531 ctx->switch_stack_size++;
532 return;
533 }
534
535 ctx->break_type_stack[ctx->loop_stack_size + ctx->switch_stack_size] =
536 ctx->break_type;
537 ctx->break_type = LP_EXEC_MASK_BREAK_TYPE_SWITCH;
538
539 ctx->switch_stack[ctx->switch_stack_size].switch_mask = mask->switch_mask;
540 ctx->switch_stack[ctx->switch_stack_size].switch_val = ctx->switch_val;
541 ctx->switch_stack[ctx->switch_stack_size].switch_mask_default = ctx->switch_mask_default;
542 ctx->switch_stack[ctx->switch_stack_size].switch_in_default = ctx->switch_in_default;
543 ctx->switch_stack[ctx->switch_stack_size].switch_pc = ctx->switch_pc;
544 ctx->switch_stack_size++;
545
546 mask->switch_mask = LLVMConstNull(mask->int_vec_type);
547 ctx->switch_val = switchval;
548 ctx->switch_mask_default = LLVMConstNull(mask->int_vec_type);
549 ctx->switch_in_default = false;
550 ctx->switch_pc = 0;
551
552 lp_exec_mask_update(mask);
553 }
554
555 static void lp_exec_endswitch(struct lp_exec_mask *mask,
556 struct lp_build_tgsi_context * bld_base)
557 {
558 LLVMBuilderRef builder = mask->bld->gallivm->builder;
559 struct function_ctx *ctx = func_ctx(mask);
560
561 if (ctx->switch_stack_size > LP_MAX_TGSI_NESTING) {
562 ctx->switch_stack_size--;
563 return;
564 }
565
566 /* check if there's deferred default if so do it now */
567 if (ctx->switch_pc && !ctx->switch_in_default) {
568 LLVMValueRef prevmask, defaultmask;
569 unsigned tmp_pc;
570 prevmask = ctx->switch_stack[ctx->switch_stack_size - 1].switch_mask;
571 defaultmask = LLVMBuildNot(builder, ctx->switch_mask_default, "sw_default_mask");
572 mask->switch_mask = LLVMBuildAnd(builder, prevmask, defaultmask, "sw_mask");
573 ctx->switch_in_default = true;
574
575 lp_exec_mask_update(mask);
576
577 assert(bld_base->instructions[ctx->switch_pc - 1].Instruction.Opcode ==
578 TGSI_OPCODE_DEFAULT);
579
580 tmp_pc = bld_base->pc;
581 bld_base->pc = ctx->switch_pc;
582 /*
583 * re-purpose switch_pc to point to here again, since we stop execution of
584 * the deferred default after next break.
585 */
586 ctx->switch_pc = tmp_pc - 1;
587
588 return;
589 }
590
591 else if (ctx->switch_pc && ctx->switch_in_default) {
592 assert(bld_base->pc == ctx->switch_pc + 1);
593 }
594
595 ctx->switch_stack_size--;
596 mask->switch_mask = ctx->switch_stack[ctx->switch_stack_size].switch_mask;
597 ctx->switch_val = ctx->switch_stack[ctx->switch_stack_size].switch_val;
598 ctx->switch_mask_default = ctx->switch_stack[ctx->switch_stack_size].switch_mask_default;
599 ctx->switch_in_default = ctx->switch_stack[ctx->switch_stack_size].switch_in_default;
600 ctx->switch_pc = ctx->switch_stack[ctx->switch_stack_size].switch_pc;
601
602 ctx->break_type = ctx->break_type_stack[ctx->loop_stack_size + ctx->switch_stack_size];
603
604 lp_exec_mask_update(mask);
605 }
606
607 static void lp_exec_case(struct lp_exec_mask *mask,
608 LLVMValueRef caseval)
609 {
610 LLVMBuilderRef builder = mask->bld->gallivm->builder;
611 struct function_ctx *ctx = func_ctx(mask);
612
613 LLVMValueRef casemask, prevmask;
614
615 if (ctx->switch_stack_size > LP_MAX_TGSI_NESTING) {
616 return;
617 }
618
619 /* skipping case mask evaluation here is NOT optional (not in all cases anyway). */
620 if (!ctx->switch_in_default) {
621 prevmask = ctx->switch_stack[ctx->switch_stack_size - 1].switch_mask;
622 casemask = lp_build_cmp(mask->bld, PIPE_FUNC_EQUAL, caseval, ctx->switch_val);
623 ctx->switch_mask_default = LLVMBuildOr(builder, casemask,
624 ctx->switch_mask_default, "sw_default_mask");
625 casemask = LLVMBuildOr(builder, casemask, mask->switch_mask, "");
626 mask->switch_mask = LLVMBuildAnd(builder, casemask, prevmask, "sw_mask");
627
628 lp_exec_mask_update(mask);
629 }
630 }
631
632 /*
633 * Analyse default statement in a switch.
634 * \return true if default is last statement, false otherwise
635 * \param default_pc_start contains pc of instruction to jump to
636 * if default wasn't last but there's no
637 * fallthrough into default.
638 */
639 static boolean default_analyse_is_last(struct lp_exec_mask *mask,
640 struct lp_build_tgsi_context * bld_base,
641 int *default_pc_start)
642 {
643 unsigned pc = bld_base->pc;
644 struct function_ctx *ctx = func_ctx(mask);
645 unsigned curr_switch_stack = ctx->switch_stack_size;
646
647 if (ctx->switch_stack_size > LP_MAX_TGSI_NESTING) {
648 return false;
649 }
650
651 /* skip over case statements which are together with default */
652 while (bld_base->instructions[pc].Instruction.Opcode == TGSI_OPCODE_CASE) {
653 pc++;
654 }
655
656 while (pc != -1 && pc < bld_base->num_instructions) {
657 unsigned opcode = bld_base->instructions[pc].Instruction.Opcode;
658 switch (opcode) {
659 case TGSI_OPCODE_CASE:
660 if (curr_switch_stack == ctx->switch_stack_size) {
661 *default_pc_start = pc - 1;
662 return false;
663 }
664 break;
665 case TGSI_OPCODE_SWITCH:
666 curr_switch_stack++;
667 break;
668 case TGSI_OPCODE_ENDSWITCH:
669 if (curr_switch_stack == ctx->switch_stack_size) {
670 *default_pc_start = pc - 1;
671 return true;
672 }
673 curr_switch_stack--;
674 break;
675 }
676 pc++;
677 }
678 /* should never arrive here */
679 assert(0);
680 return true;
681 }
682
683 static void lp_exec_default(struct lp_exec_mask *mask,
684 struct lp_build_tgsi_context * bld_base)
685 {
686 LLVMBuilderRef builder = mask->bld->gallivm->builder;
687 struct function_ctx *ctx = func_ctx(mask);
688
689 int default_exec_pc;
690 boolean default_is_last;
691
692 if (ctx->switch_stack_size > LP_MAX_TGSI_NESTING) {
693 return;
694 }
695
696 /*
697 * This is a messy opcode, because it may not be always at the end and
698 * there can be fallthrough in and out of it.
699 */
700
701 default_is_last = default_analyse_is_last(mask, bld_base, &default_exec_pc);
702 /*
703 * If it is last statement in switch (note that case statements appearing
704 * "at the same time" as default don't change that) everything is just fine,
705 * update switch mask and go on. This means we can handle default with
706 * fallthrough INTO it without overhead, if it is last.
707 */
708 if (default_is_last) {
709 LLVMValueRef prevmask, defaultmask;
710 prevmask = ctx->switch_stack[ctx->switch_stack_size - 1].switch_mask;
711 defaultmask = LLVMBuildNot(builder, ctx->switch_mask_default, "sw_default_mask");
712 defaultmask = LLVMBuildOr(builder, defaultmask, mask->switch_mask, "");
713 mask->switch_mask = LLVMBuildAnd(builder, prevmask, defaultmask, "sw_mask");
714 ctx->switch_in_default = true;
715
716 lp_exec_mask_update(mask);
717 }
718 else {
719 /*
720 * Technically, "case" immediately before default isn't really a
721 * fallthrough, however we still have to count them as such as we
722 * already have updated the masks.
723 * If that happens in practice could add a switch optimizer pass
724 * which just gets rid of all case statements appearing together with
725 * default (or could do switch analysis at switch start time instead).
726 */
727 unsigned opcode = bld_base->instructions[bld_base->pc - 1].Instruction.Opcode;
728 boolean ft_into = (opcode != TGSI_OPCODE_BRK &&
729 opcode != TGSI_OPCODE_SWITCH);
730 /*
731 * If it is not last statement and there was no fallthrough into it,
732 * we record the PC and continue execution at next case (again, those
733 * case encountered at the same time don't count). At endswitch
734 * time, we update switchmask, and go back executing the code we skipped
735 * until the next break (possibly re-executing some code with changed mask
736 * if there was a fallthrough out of default).
737 * Finally, if it is not last statement and there was a fallthrough into it,
738 * do the same as with the former case, except instead of skipping the code
739 * just execute it without updating the mask, then go back and re-execute.
740 */
741 ctx->switch_pc = bld_base->pc;
742 if (!ft_into) {
743 bld_base->pc = default_exec_pc;
744 }
745 }
746 }
747
748
749 /* stores val into an address pointed to by dst_ptr.
750 * mask->exec_mask is used to figure out which bits of val
751 * should be stored into the address
752 * (0 means don't store this bit, 1 means do store).
753 */
754 static void lp_exec_mask_store(struct lp_exec_mask *mask,
755 struct lp_build_context *bld_store,
756 LLVMValueRef pred,
757 LLVMValueRef val,
758 LLVMValueRef dst_ptr)
759 {
760 LLVMBuilderRef builder = mask->bld->gallivm->builder;
761
762 assert(lp_check_value(bld_store->type, val));
763 assert(LLVMGetTypeKind(LLVMTypeOf(dst_ptr)) == LLVMPointerTypeKind);
764 assert(LLVMGetElementType(LLVMTypeOf(dst_ptr)) == LLVMTypeOf(val));
765
766 /* Mix the predicate and execution mask */
767 if (mask->has_mask) {
768 if (pred) {
769 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
770 } else {
771 pred = mask->exec_mask;
772 }
773 }
774
775 if (pred) {
776 LLVMValueRef res, dst;
777
778 dst = LLVMBuildLoad(builder, dst_ptr, "");
779 res = lp_build_select(bld_store, pred, val, dst);
780 LLVMBuildStore(builder, res, dst_ptr);
781 } else
782 LLVMBuildStore(builder, val, dst_ptr);
783 }
784
785 static void lp_exec_mask_call(struct lp_exec_mask *mask,
786 int func,
787 int *pc)
788 {
789 if (mask->function_stack_size >= LP_MAX_NUM_FUNCS) {
790 return;
791 }
792
793 lp_exec_mask_function_init(mask, mask->function_stack_size);
794 mask->function_stack[mask->function_stack_size].pc = *pc;
795 mask->function_stack[mask->function_stack_size].ret_mask = mask->ret_mask;
796 mask->function_stack_size++;
797 *pc = func;
798 }
799
800 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
801 {
802 LLVMBuilderRef builder = mask->bld->gallivm->builder;
803 struct function_ctx *ctx = func_ctx(mask);
804 LLVMValueRef exec_mask;
805
806 if (ctx->cond_stack_size == 0 &&
807 ctx->loop_stack_size == 0 &&
808 ctx->switch_stack_size == 0 &&
809 mask->function_stack_size == 1) {
810 /* returning from main() */
811 *pc = -1;
812 return;
813 }
814
815 if (mask->function_stack_size == 1) {
816 /*
817 * This requires special handling since we need to ensure
818 * we don't drop the mask even if we have no call stack
819 * (e.g. after a ret in a if clause after the endif)
820 */
821 mask->ret_in_main = TRUE;
822 }
823
824 exec_mask = LLVMBuildNot(builder,
825 mask->exec_mask,
826 "ret");
827
828 mask->ret_mask = LLVMBuildAnd(builder,
829 mask->ret_mask,
830 exec_mask, "ret_full");
831
832 lp_exec_mask_update(mask);
833 }
834
835 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
836 {
837 }
838
839 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
840 {
841 struct function_ctx *ctx;
842
843 assert(mask->function_stack_size > 1);
844 assert(mask->function_stack_size <= LP_MAX_NUM_FUNCS);
845
846 ctx = func_ctx(mask);
847 mask->function_stack_size--;
848
849 *pc = ctx->pc;
850 mask->ret_mask = ctx->ret_mask;
851
852 lp_exec_mask_update(mask);
853 }
854
855
856 static LLVMValueRef
857 get_file_ptr(struct lp_build_tgsi_soa_context *bld,
858 unsigned file,
859 unsigned index,
860 unsigned chan)
861 {
862 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
863 LLVMValueRef (*array_of_vars)[TGSI_NUM_CHANNELS];
864 LLVMValueRef var_of_array;
865
866 switch (file) {
867 case TGSI_FILE_TEMPORARY:
868 array_of_vars = bld->temps;
869 var_of_array = bld->temps_array;
870 break;
871 case TGSI_FILE_OUTPUT:
872 array_of_vars = bld->outputs;
873 var_of_array = bld->outputs_array;
874 break;
875 default:
876 assert(0);
877 return NULL;
878 }
879
880 assert(chan < 4);
881
882 if (bld->indirect_files & (1 << file)) {
883 LLVMValueRef lindex = lp_build_const_int32(bld->bld_base.base.gallivm, index * 4 + chan);
884 return LLVMBuildGEP(builder, var_of_array, &lindex, 1, "");
885 }
886 else {
887 assert(index <= bld->bld_base.info->file_max[file]);
888 return array_of_vars[index][chan];
889 }
890 }
891
892
893 /**
894 * Return pointer to a temporary register channel (src or dest).
895 * Note that indirect addressing cannot be handled here.
896 * \param index which temporary register
897 * \param chan which channel of the temp register.
898 */
899 LLVMValueRef
900 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context *bld,
901 unsigned index,
902 unsigned chan)
903 {
904 return get_file_ptr(bld, TGSI_FILE_TEMPORARY, index, chan);
905 }
906
907 /**
908 * Return pointer to a output register channel (src or dest).
909 * Note that indirect addressing cannot be handled here.
910 * \param index which output register
911 * \param chan which channel of the output register.
912 */
913 LLVMValueRef
914 lp_get_output_ptr(struct lp_build_tgsi_soa_context *bld,
915 unsigned index,
916 unsigned chan)
917 {
918 return get_file_ptr(bld, TGSI_FILE_OUTPUT, index, chan);
919 }
920
921 /*
922 * If we have indirect addressing in outputs copy our alloca array
923 * to the outputs slots specified by the caller to make sure
924 * our outputs are delivered consistently via the same interface.
925 */
926 static void
927 gather_outputs(struct lp_build_tgsi_soa_context * bld)
928 {
929 if ((bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
930 unsigned index, chan;
931 assert(bld->bld_base.info->num_outputs <=
932 bld->bld_base.info->file_max[TGSI_FILE_OUTPUT] + 1);
933 for (index = 0; index < bld->bld_base.info->num_outputs; ++index) {
934 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
935 bld->outputs[index][chan] = lp_get_output_ptr(bld, index, chan);
936 }
937 }
938 }
939 }
940
941 /**
942 * Gather vector.
943 * XXX the lp_build_gather() function should be capable of doing this
944 * with a little work.
945 */
946 static LLVMValueRef
947 build_gather(struct lp_build_tgsi_context *bld_base,
948 LLVMValueRef base_ptr,
949 LLVMValueRef indexes,
950 LLVMValueRef overflow_mask,
951 LLVMValueRef indexes2)
952 {
953 struct gallivm_state *gallivm = bld_base->base.gallivm;
954 LLVMBuilderRef builder = gallivm->builder;
955 struct lp_build_context *uint_bld = &bld_base->uint_bld;
956 struct lp_build_context *bld = &bld_base->base;
957 LLVMValueRef res;
958 unsigned i;
959
960 if (indexes2)
961 res = LLVMGetUndef(LLVMVectorType(LLVMFloatTypeInContext(gallivm->context), bld_base->base.type.length * 2));
962 else
963 res = bld->undef;
964 /*
965 * overflow_mask is a vector telling us which channels
966 * in the vector overflowed. We use the overflow behavior for
967 * constant buffers which is defined as:
968 * Out of bounds access to constant buffer returns 0 in all
969 * components. Out of bounds behavior is always with respect
970 * to the size of the buffer bound at that slot.
971 */
972
973 if (overflow_mask) {
974 /*
975 * We avoid per-element control flow here (also due to llvm going crazy,
976 * though I suspect it's better anyway since overflow is likely rare).
977 * Note that since we still fetch from buffers even if num_elements was
978 * zero (in this case we'll fetch from index zero) the jit func callers
979 * MUST provide valid fake constant buffers of size 4x32 (the values do
980 * not matter), otherwise we'd still need (not per element though)
981 * control flow.
982 */
983 indexes = lp_build_select(uint_bld, overflow_mask, uint_bld->zero, indexes);
984 if (indexes2)
985 indexes2 = lp_build_select(uint_bld, overflow_mask, uint_bld->zero, indexes2);
986 }
987
988 /*
989 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
990 */
991 for (i = 0; i < bld->type.length * (indexes2 ? 2 : 1); i++) {
992 LLVMValueRef si, di;
993 LLVMValueRef index;
994 LLVMValueRef scalar_ptr, scalar;
995
996 di = lp_build_const_int32(bld->gallivm, i);
997 if (indexes2)
998 si = lp_build_const_int32(bld->gallivm, i >> 1);
999 else
1000 si = di;
1001
1002 if (indexes2 && (i & 1)) {
1003 index = LLVMBuildExtractElement(builder,
1004 indexes2, si, "");
1005 } else {
1006 index = LLVMBuildExtractElement(builder,
1007 indexes, si, "");
1008 }
1009 scalar_ptr = LLVMBuildGEP(builder, base_ptr,
1010 &index, 1, "gather_ptr");
1011 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
1012
1013 res = LLVMBuildInsertElement(builder, res, scalar, di, "");
1014 }
1015
1016 if (overflow_mask) {
1017 if (indexes2) {
1018 res = LLVMBuildBitCast(builder, res, bld_base->dbl_bld.vec_type, "");
1019 overflow_mask = LLVMBuildSExt(builder, overflow_mask,
1020 bld_base->dbl_bld.int_vec_type, "");
1021 res = lp_build_select(&bld_base->dbl_bld, overflow_mask,
1022 bld_base->dbl_bld.zero, res);
1023 } else
1024 res = lp_build_select(bld, overflow_mask, bld->zero, res);
1025 }
1026
1027 return res;
1028 }
1029
1030
1031 /**
1032 * Scatter/store vector.
1033 */
1034 static void
1035 emit_mask_scatter(struct lp_build_tgsi_soa_context *bld,
1036 LLVMValueRef base_ptr,
1037 LLVMValueRef indexes,
1038 LLVMValueRef values,
1039 struct lp_exec_mask *mask,
1040 LLVMValueRef pred)
1041 {
1042 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1043 LLVMBuilderRef builder = gallivm->builder;
1044 unsigned i;
1045
1046 /* Mix the predicate and execution mask */
1047 if (mask->has_mask) {
1048 if (pred) {
1049 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
1050 }
1051 else {
1052 pred = mask->exec_mask;
1053 }
1054 }
1055
1056 /*
1057 * Loop over elements of index_vec, store scalar value.
1058 */
1059 for (i = 0; i < bld->bld_base.base.type.length; i++) {
1060 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1061 LLVMValueRef index = LLVMBuildExtractElement(builder, indexes, ii, "");
1062 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr, &index, 1, "scatter_ptr");
1063 LLVMValueRef val = LLVMBuildExtractElement(builder, values, ii, "scatter_val");
1064 LLVMValueRef scalar_pred = pred ?
1065 LLVMBuildExtractElement(builder, pred, ii, "scatter_pred") : NULL;
1066
1067 if (0)
1068 lp_build_printf(gallivm, "scatter %d: val %f at %d %p\n",
1069 ii, val, index, scalar_ptr);
1070
1071 if (scalar_pred) {
1072 LLVMValueRef real_val, dst_val;
1073 dst_val = LLVMBuildLoad(builder, scalar_ptr, "");
1074 real_val = lp_build_select(&bld->elem_bld, scalar_pred, val, dst_val);
1075 LLVMBuildStore(builder, real_val, scalar_ptr);
1076 }
1077 else {
1078 LLVMBuildStore(builder, val, scalar_ptr);
1079 }
1080 }
1081 }
1082
1083
1084 /**
1085 * Read the current value of the ADDR register, convert the floats to
1086 * ints, add the base index and return the vector of offsets.
1087 * The offsets will be used to index into the constant buffer or
1088 * temporary register file.
1089 */
1090 static LLVMValueRef
1091 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
1092 unsigned reg_file, unsigned reg_index,
1093 const struct tgsi_ind_register *indirect_reg)
1094 {
1095 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1096 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
1097 /* always use X component of address register */
1098 unsigned swizzle = indirect_reg->Swizzle;
1099 LLVMValueRef base;
1100 LLVMValueRef rel;
1101 LLVMValueRef max_index;
1102 LLVMValueRef index;
1103
1104 assert(bld->indirect_files & (1 << reg_file));
1105
1106 base = lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, reg_index);
1107
1108 assert(swizzle < 4);
1109 switch (indirect_reg->File) {
1110 case TGSI_FILE_ADDRESS:
1111 rel = LLVMBuildLoad(builder,
1112 bld->addr[indirect_reg->Index][swizzle],
1113 "load addr reg");
1114 /* ADDR LLVM values already have LLVM integer type. */
1115 break;
1116 case TGSI_FILE_TEMPORARY:
1117 rel = lp_get_temp_ptr_soa(bld, indirect_reg->Index, swizzle);
1118 rel = LLVMBuildLoad(builder, rel, "load temp reg");
1119 /* TEMP LLVM values always have LLVM float type, but for indirection, the
1120 * value actually stored is expected to be an integer */
1121 rel = LLVMBuildBitCast(builder, rel, uint_bld->vec_type, "");
1122 break;
1123 default:
1124 assert(0);
1125 rel = uint_bld->zero;
1126 }
1127
1128 index = lp_build_add(uint_bld, base, rel);
1129
1130 /*
1131 * emit_fetch_constant handles constant buffer overflow so this code
1132 * is pointless for them.
1133 * Furthermore the D3D10 spec in section 6.5 says:
1134 * If the constant buffer bound to a slot is larger than the size
1135 * declared in the shader for that slot, implementations are allowed
1136 * to return incorrect data (not necessarily 0) for indices that are
1137 * larger than the declared size but smaller than the buffer size.
1138 */
1139 if (reg_file != TGSI_FILE_CONSTANT) {
1140 max_index = lp_build_const_int_vec(bld->bld_base.base.gallivm,
1141 uint_bld->type,
1142 bld->bld_base.info->file_max[reg_file]);
1143
1144 assert(!uint_bld->type.sign);
1145 index = lp_build_min(uint_bld, index, max_index);
1146 }
1147
1148 return index;
1149 }
1150
1151 static struct lp_build_context *
1152 stype_to_fetch(struct lp_build_tgsi_context * bld_base,
1153 enum tgsi_opcode_type stype)
1154 {
1155 struct lp_build_context *bld_fetch;
1156
1157 switch (stype) {
1158 case TGSI_TYPE_FLOAT:
1159 case TGSI_TYPE_UNTYPED:
1160 bld_fetch = &bld_base->base;
1161 break;
1162 case TGSI_TYPE_UNSIGNED:
1163 bld_fetch = &bld_base->uint_bld;
1164 break;
1165 case TGSI_TYPE_SIGNED:
1166 bld_fetch = &bld_base->int_bld;
1167 break;
1168 case TGSI_TYPE_DOUBLE:
1169 bld_fetch = &bld_base->dbl_bld;
1170 break;
1171 case TGSI_TYPE_VOID:
1172 default:
1173 assert(0);
1174 bld_fetch = NULL;
1175 break;
1176 }
1177 return bld_fetch;
1178 }
1179
1180 static LLVMValueRef
1181 get_soa_array_offsets(struct lp_build_context *uint_bld,
1182 LLVMValueRef indirect_index,
1183 unsigned chan_index,
1184 boolean need_perelement_offset)
1185 {
1186 struct gallivm_state *gallivm = uint_bld->gallivm;
1187 LLVMValueRef chan_vec =
1188 lp_build_const_int_vec(uint_bld->gallivm, uint_bld->type, chan_index);
1189 LLVMValueRef length_vec =
1190 lp_build_const_int_vec(gallivm, uint_bld->type, uint_bld->type.length);
1191 LLVMValueRef index_vec;
1192
1193 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1194 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1195 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1196 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1197
1198 if (need_perelement_offset) {
1199 LLVMValueRef pixel_offsets;
1200 unsigned i;
1201 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1202 pixel_offsets = uint_bld->undef;
1203 for (i = 0; i < uint_bld->type.length; i++) {
1204 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1205 pixel_offsets = LLVMBuildInsertElement(gallivm->builder, pixel_offsets,
1206 ii, ii, "");
1207 }
1208 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1209 }
1210 return index_vec;
1211 }
1212
1213 static LLVMValueRef
1214 emit_fetch_constant(
1215 struct lp_build_tgsi_context * bld_base,
1216 const struct tgsi_full_src_register * reg,
1217 enum tgsi_opcode_type stype,
1218 unsigned swizzle)
1219 {
1220 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1221 struct gallivm_state *gallivm = bld_base->base.gallivm;
1222 LLVMBuilderRef builder = gallivm->builder;
1223 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1224 unsigned dimension = 0;
1225 LLVMValueRef consts_ptr;
1226 LLVMValueRef num_consts;
1227 LLVMValueRef res;
1228
1229 /* XXX: Handle fetching xyzw components as a vector */
1230 assert(swizzle != ~0);
1231
1232 if (reg->Register.Dimension) {
1233 assert(!reg->Dimension.Indirect);
1234 dimension = reg->Dimension.Index;
1235 assert(dimension < LP_MAX_TGSI_CONST_BUFFERS);
1236 }
1237
1238 consts_ptr = bld->consts[dimension];
1239 num_consts = bld->consts_sizes[dimension];
1240
1241 if (reg->Register.Indirect) {
1242 LLVMValueRef indirect_index;
1243 LLVMValueRef swizzle_vec =
1244 lp_build_const_int_vec(gallivm, uint_bld->type, swizzle);
1245 LLVMValueRef index_vec; /* index into the const buffer */
1246 LLVMValueRef overflow_mask;
1247 LLVMValueRef index_vec2 = NULL;
1248
1249 indirect_index = get_indirect_index(bld,
1250 reg->Register.File,
1251 reg->Register.Index,
1252 &reg->Indirect);
1253
1254 /* All fetches are from the same constant buffer, so
1255 * we need to propagate the size to a vector to do a
1256 * vector comparison */
1257 num_consts = lp_build_broadcast_scalar(uint_bld, num_consts);
1258 /* Construct a boolean vector telling us which channels
1259 * overflow the bound constant buffer */
1260 overflow_mask = lp_build_compare(gallivm, uint_bld->type, PIPE_FUNC_GEQUAL,
1261 indirect_index, num_consts);
1262
1263 /* index_vec = indirect_index * 4 + swizzle */
1264 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1265 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
1266
1267 if (stype == TGSI_TYPE_DOUBLE) {
1268 LLVMValueRef swizzle_vec2;
1269 swizzle_vec2 = lp_build_const_int_vec(gallivm, uint_bld->type, swizzle + 1);
1270 index_vec2 = lp_build_shl_imm(uint_bld, indirect_index, 2);
1271 index_vec2 = lp_build_add(uint_bld, index_vec2, swizzle_vec2);
1272 }
1273 /* Gather values from the constant buffer */
1274 res = build_gather(bld_base, consts_ptr, index_vec, overflow_mask, index_vec2);
1275 }
1276 else {
1277 LLVMValueRef index; /* index into the const buffer */
1278 LLVMValueRef scalar, scalar_ptr;
1279 struct lp_build_context *bld_broad = &bld_base->base;
1280 index = lp_build_const_int32(gallivm, reg->Register.Index * 4 + swizzle);
1281
1282 scalar_ptr = LLVMBuildGEP(builder, consts_ptr,
1283 &index, 1, "");
1284 if (stype == TGSI_TYPE_DOUBLE) {
1285 LLVMTypeRef dptr_type = LLVMPointerType(LLVMDoubleTypeInContext(gallivm->context), 0);
1286 scalar_ptr = LLVMBuildBitCast(builder, scalar_ptr, dptr_type, "");
1287 bld_broad = &bld_base->dbl_bld;
1288 }
1289 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
1290 res = lp_build_broadcast_scalar(bld_broad, scalar);
1291 }
1292
1293 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
1294 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
1295 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
1296 }
1297
1298 return res;
1299 }
1300
1301 /**
1302 * Fetch double values from two separate channels.
1303 * Doubles are stored split across two channels, like xy and zw.
1304 * This function creates a set of 16 floats,
1305 * extracts the values from the two channels,
1306 * puts them in the correct place, then casts to 8 doubles.
1307 */
1308 static LLVMValueRef
1309 emit_fetch_double(
1310 struct lp_build_tgsi_context * bld_base,
1311 enum tgsi_opcode_type stype,
1312 LLVMValueRef input,
1313 LLVMValueRef input2)
1314 {
1315 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1316 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1317 LLVMBuilderRef builder = gallivm->builder;
1318 LLVMValueRef res;
1319 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
1320 int i;
1321 LLVMValueRef shuffles[16];
1322 int len = bld_base->base.type.length * 2;
1323 assert(len <= 16);
1324
1325 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
1326 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
1327 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
1328 }
1329 res = LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
1330
1331 return LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
1332 }
1333
1334 static LLVMValueRef
1335 emit_fetch_immediate(
1336 struct lp_build_tgsi_context * bld_base,
1337 const struct tgsi_full_src_register * reg,
1338 enum tgsi_opcode_type stype,
1339 unsigned swizzle)
1340 {
1341 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1342 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1343 LLVMBuilderRef builder = gallivm->builder;
1344 LLVMValueRef res = NULL;
1345
1346 if (bld->use_immediates_array || reg->Register.Indirect) {
1347 LLVMValueRef imms_array;
1348 LLVMTypeRef fptr_type;
1349
1350 /* cast imms_array pointer to float* */
1351 fptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1352 imms_array = LLVMBuildBitCast(builder, bld->imms_array, fptr_type, "");
1353
1354 if (reg->Register.Indirect) {
1355 LLVMValueRef indirect_index;
1356 LLVMValueRef index_vec; /* index into the immediate register array */
1357 LLVMValueRef index_vec2 = NULL;
1358 indirect_index = get_indirect_index(bld,
1359 reg->Register.File,
1360 reg->Register.Index,
1361 &reg->Indirect);
1362 /*
1363 * Unlike for other reg classes, adding pixel offsets is unnecessary -
1364 * immediates are stored as full vectors (FIXME??? - might be better
1365 * to store them the same as constants) but all elements are the same
1366 * in any case.
1367 */
1368 index_vec = get_soa_array_offsets(&bld_base->uint_bld,
1369 indirect_index,
1370 swizzle,
1371 FALSE);
1372 if (stype == TGSI_TYPE_DOUBLE)
1373 index_vec2 = get_soa_array_offsets(&bld_base->uint_bld,
1374 indirect_index,
1375 swizzle + 1,
1376 FALSE);
1377 /* Gather values from the immediate register array */
1378 res = build_gather(bld_base, imms_array, index_vec, NULL, index_vec2);
1379 } else {
1380 LLVMValueRef lindex = lp_build_const_int32(gallivm,
1381 reg->Register.Index * 4 + swizzle);
1382 LLVMValueRef imms_ptr = LLVMBuildGEP(builder,
1383 bld->imms_array, &lindex, 1, "");
1384 res = LLVMBuildLoad(builder, imms_ptr, "");
1385
1386 if (stype == TGSI_TYPE_DOUBLE) {
1387 LLVMValueRef lindex1;
1388 LLVMValueRef imms_ptr2;
1389 LLVMValueRef res2;
1390
1391 lindex1 = lp_build_const_int32(gallivm,
1392 reg->Register.Index * 4 + swizzle + 1);
1393 imms_ptr2 = LLVMBuildGEP(builder,
1394 bld->imms_array, &lindex1, 1, "");
1395 res2 = LLVMBuildLoad(builder, imms_ptr2, "");
1396 res = emit_fetch_double(bld_base, stype, res, res2);
1397 }
1398 }
1399 }
1400 else {
1401 res = bld->immediates[reg->Register.Index][swizzle];
1402 if (stype == TGSI_TYPE_DOUBLE)
1403 res = emit_fetch_double(bld_base, stype, res, bld->immediates[reg->Register.Index][swizzle + 1]);
1404 }
1405
1406 if (stype == TGSI_TYPE_UNSIGNED) {
1407 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
1408 } else if (stype == TGSI_TYPE_SIGNED) {
1409 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
1410 } else if (stype == TGSI_TYPE_DOUBLE) {
1411 res = LLVMBuildBitCast(builder, res, bld_base->dbl_bld.vec_type, "");
1412 }
1413 return res;
1414 }
1415
1416 static LLVMValueRef
1417 emit_fetch_input(
1418 struct lp_build_tgsi_context * bld_base,
1419 const struct tgsi_full_src_register * reg,
1420 enum tgsi_opcode_type stype,
1421 unsigned swizzle)
1422 {
1423 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1424 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1425 LLVMBuilderRef builder = gallivm->builder;
1426 LLVMValueRef res;
1427
1428 if (reg->Register.Indirect) {
1429 LLVMValueRef indirect_index;
1430 LLVMValueRef index_vec; /* index into the input reg array */
1431 LLVMValueRef index_vec2 = NULL;
1432 LLVMValueRef inputs_array;
1433 LLVMTypeRef fptr_type;
1434
1435 indirect_index = get_indirect_index(bld,
1436 reg->Register.File,
1437 reg->Register.Index,
1438 &reg->Indirect);
1439
1440 index_vec = get_soa_array_offsets(&bld_base->uint_bld,
1441 indirect_index,
1442 swizzle,
1443 TRUE);
1444 if (stype == TGSI_TYPE_DOUBLE) {
1445 index_vec2 = get_soa_array_offsets(&bld_base->uint_bld,
1446 indirect_index,
1447 swizzle + 1,
1448 TRUE);
1449 }
1450 /* cast inputs_array pointer to float* */
1451 fptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1452 inputs_array = LLVMBuildBitCast(builder, bld->inputs_array, fptr_type, "");
1453
1454 /* Gather values from the input register array */
1455 res = build_gather(bld_base, inputs_array, index_vec, NULL, index_vec2);
1456 } else {
1457 if (bld->indirect_files & (1 << TGSI_FILE_INPUT)) {
1458 LLVMValueRef lindex = lp_build_const_int32(gallivm,
1459 reg->Register.Index * 4 + swizzle);
1460 LLVMValueRef input_ptr = LLVMBuildGEP(builder,
1461 bld->inputs_array, &lindex, 1, "");
1462
1463 res = LLVMBuildLoad(builder, input_ptr, "");
1464 if (stype == TGSI_TYPE_DOUBLE) {
1465 LLVMValueRef lindex1;
1466 LLVMValueRef input_ptr2;
1467 LLVMValueRef res2;
1468
1469 lindex1 = lp_build_const_int32(gallivm,
1470 reg->Register.Index * 4 + swizzle + 1);
1471 input_ptr2 = LLVMBuildGEP(builder,
1472 bld->inputs_array, &lindex1, 1, "");
1473 res2 = LLVMBuildLoad(builder, input_ptr2, "");
1474 res = emit_fetch_double(bld_base, stype, res, res2);
1475 }
1476 }
1477 else {
1478 res = bld->inputs[reg->Register.Index][swizzle];
1479 if (stype == TGSI_TYPE_DOUBLE)
1480 res = emit_fetch_double(bld_base, stype, res, bld->inputs[reg->Register.Index][swizzle + 1]);
1481 }
1482 }
1483
1484 assert(res);
1485
1486 if (stype == TGSI_TYPE_UNSIGNED) {
1487 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
1488 } else if (stype == TGSI_TYPE_SIGNED) {
1489 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
1490 } else if (stype == TGSI_TYPE_DOUBLE) {
1491 res = LLVMBuildBitCast(builder, res, bld_base->dbl_bld.vec_type, "");
1492 }
1493
1494 return res;
1495 }
1496
1497
1498 static LLVMValueRef
1499 emit_fetch_gs_input(
1500 struct lp_build_tgsi_context * bld_base,
1501 const struct tgsi_full_src_register * reg,
1502 enum tgsi_opcode_type stype,
1503 unsigned swizzle)
1504 {
1505 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1506 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1507 const struct tgsi_shader_info *info = bld->bld_base.info;
1508 LLVMBuilderRef builder = gallivm->builder;
1509 LLVMValueRef attrib_index = NULL;
1510 LLVMValueRef vertex_index = NULL;
1511 LLVMValueRef swizzle_index = lp_build_const_int32(gallivm, swizzle);
1512 LLVMValueRef res;
1513
1514 if (info->input_semantic_name[reg->Register.Index] == TGSI_SEMANTIC_PRIMID) {
1515 /* This is really a system value not a regular input */
1516 assert(!reg->Register.Indirect);
1517 assert(!reg->Dimension.Indirect);
1518 res = bld->system_values.prim_id;
1519 if (stype != TGSI_TYPE_UNSIGNED && stype != TGSI_TYPE_SIGNED) {
1520 res = LLVMBuildBitCast(builder, res, bld_base->base.vec_type, "");
1521 }
1522 return res;
1523 }
1524
1525 if (reg->Register.Indirect) {
1526 attrib_index = get_indirect_index(bld,
1527 reg->Register.File,
1528 reg->Register.Index,
1529 &reg->Indirect);
1530 } else {
1531 attrib_index = lp_build_const_int32(gallivm, reg->Register.Index);
1532 }
1533
1534 if (reg->Dimension.Indirect) {
1535 vertex_index = get_indirect_index(bld,
1536 reg->Register.File,
1537 reg->Dimension.Index,
1538 &reg->DimIndirect);
1539 } else {
1540 vertex_index = lp_build_const_int32(gallivm, reg->Dimension.Index);
1541 }
1542
1543 res = bld->gs_iface->fetch_input(bld->gs_iface, bld_base,
1544 reg->Dimension.Indirect,
1545 vertex_index,
1546 reg->Register.Indirect,
1547 attrib_index,
1548 swizzle_index);
1549
1550 assert(res);
1551 if (stype == TGSI_TYPE_DOUBLE) {
1552 LLVMValueRef swizzle_index = lp_build_const_int32(gallivm, swizzle + 1);
1553 LLVMValueRef res2;
1554 res2 = bld->gs_iface->fetch_input(bld->gs_iface, bld_base,
1555 reg->Dimension.Indirect,
1556 vertex_index,
1557 reg->Register.Indirect,
1558 attrib_index,
1559 swizzle_index);
1560 assert(res2);
1561 res = emit_fetch_double(bld_base, stype, res, res2);
1562 } else if (stype == TGSI_TYPE_UNSIGNED) {
1563 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
1564 } else if (stype == TGSI_TYPE_SIGNED) {
1565 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
1566 }
1567
1568 return res;
1569 }
1570
1571 static LLVMValueRef
1572 emit_fetch_temporary(
1573 struct lp_build_tgsi_context * bld_base,
1574 const struct tgsi_full_src_register * reg,
1575 enum tgsi_opcode_type stype,
1576 unsigned swizzle)
1577 {
1578 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1579 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1580 LLVMBuilderRef builder = gallivm->builder;
1581 LLVMValueRef res;
1582
1583 if (reg->Register.Indirect) {
1584 LLVMValueRef indirect_index;
1585 LLVMValueRef index_vec, index_vec2 = NULL; /* index into the temp reg array */
1586 LLVMValueRef temps_array;
1587 LLVMTypeRef fptr_type;
1588
1589 indirect_index = get_indirect_index(bld,
1590 reg->Register.File,
1591 reg->Register.Index,
1592 &reg->Indirect);
1593
1594 index_vec = get_soa_array_offsets(&bld_base->uint_bld,
1595 indirect_index,
1596 swizzle,
1597 TRUE);
1598 if (stype == TGSI_TYPE_DOUBLE) {
1599 index_vec2 = get_soa_array_offsets(&bld_base->uint_bld,
1600 indirect_index,
1601 swizzle + 1,
1602 TRUE);
1603 }
1604
1605 /* cast temps_array pointer to float* */
1606 fptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1607 temps_array = LLVMBuildBitCast(builder, bld->temps_array, fptr_type, "");
1608
1609 /* Gather values from the temporary register array */
1610 res = build_gather(bld_base, temps_array, index_vec, NULL, index_vec2);
1611 }
1612 else {
1613 LLVMValueRef temp_ptr;
1614 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle);
1615 res = LLVMBuildLoad(builder, temp_ptr, "");
1616
1617 if (stype == TGSI_TYPE_DOUBLE) {
1618 LLVMValueRef temp_ptr2, res2;
1619
1620 temp_ptr2 = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle + 1);
1621 res2 = LLVMBuildLoad(builder, temp_ptr2, "");
1622 res = emit_fetch_double(bld_base, stype, res, res2);
1623 }
1624 }
1625
1626 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED || stype == TGSI_TYPE_DOUBLE) {
1627 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
1628 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
1629 }
1630
1631 return res;
1632 }
1633
1634 static LLVMValueRef
1635 emit_fetch_system_value(
1636 struct lp_build_tgsi_context * bld_base,
1637 const struct tgsi_full_src_register * reg,
1638 enum tgsi_opcode_type stype,
1639 unsigned swizzle)
1640 {
1641 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1642 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1643 const struct tgsi_shader_info *info = bld->bld_base.info;
1644 LLVMBuilderRef builder = gallivm->builder;
1645 LLVMValueRef res;
1646 enum tgsi_opcode_type atype; // Actual type of the value
1647
1648 assert(!reg->Register.Indirect);
1649
1650 switch (info->system_value_semantic_name[reg->Register.Index]) {
1651 case TGSI_SEMANTIC_INSTANCEID:
1652 res = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.instance_id);
1653 atype = TGSI_TYPE_UNSIGNED;
1654 break;
1655
1656 case TGSI_SEMANTIC_VERTEXID:
1657 res = bld->system_values.vertex_id;
1658 atype = TGSI_TYPE_UNSIGNED;
1659 break;
1660
1661 case TGSI_SEMANTIC_VERTEXID_NOBASE:
1662 res = bld->system_values.vertex_id_nobase;
1663 atype = TGSI_TYPE_UNSIGNED;
1664 break;
1665
1666 case TGSI_SEMANTIC_BASEVERTEX:
1667 res = bld->system_values.basevertex;
1668 atype = TGSI_TYPE_UNSIGNED;
1669 break;
1670
1671 case TGSI_SEMANTIC_PRIMID:
1672 res = bld->system_values.prim_id;
1673 atype = TGSI_TYPE_UNSIGNED;
1674 break;
1675
1676 case TGSI_SEMANTIC_INVOCATIONID:
1677 res = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.invocation_id);
1678 atype = TGSI_TYPE_UNSIGNED;
1679 break;
1680
1681 default:
1682 assert(!"unexpected semantic in emit_fetch_system_value");
1683 res = bld_base->base.zero;
1684 atype = TGSI_TYPE_FLOAT;
1685 break;
1686 }
1687
1688 if (atype != stype) {
1689 if (stype == TGSI_TYPE_FLOAT) {
1690 res = LLVMBuildBitCast(builder, res, bld_base->base.vec_type, "");
1691 } else if (stype == TGSI_TYPE_UNSIGNED) {
1692 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
1693 } else if (stype == TGSI_TYPE_SIGNED) {
1694 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
1695 }
1696 }
1697
1698 return res;
1699 }
1700
1701 /**
1702 * Register fetch with derivatives.
1703 */
1704 static void
1705 emit_fetch_deriv(
1706 struct lp_build_tgsi_soa_context *bld,
1707 LLVMValueRef src,
1708 LLVMValueRef *res,
1709 LLVMValueRef *ddx,
1710 LLVMValueRef *ddy)
1711 {
1712 if (res)
1713 *res = src;
1714
1715 /* TODO: use interpolation coeffs for inputs */
1716
1717 if (ddx)
1718 *ddx = lp_build_ddx(&bld->bld_base.base, src);
1719
1720 if (ddy)
1721 *ddy = lp_build_ddy(&bld->bld_base.base, src);
1722 }
1723
1724
1725 /**
1726 * Predicate.
1727 */
1728 static void
1729 emit_fetch_predicate(
1730 struct lp_build_tgsi_soa_context *bld,
1731 const struct tgsi_full_instruction *inst,
1732 LLVMValueRef *pred)
1733 {
1734 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1735 unsigned index;
1736 unsigned char swizzles[4];
1737 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
1738 LLVMValueRef value;
1739 unsigned chan;
1740
1741 if (!inst->Instruction.Predicate) {
1742 TGSI_FOR_EACH_CHANNEL( chan ) {
1743 pred[chan] = NULL;
1744 }
1745 return;
1746 }
1747
1748 swizzles[0] = inst->Predicate.SwizzleX;
1749 swizzles[1] = inst->Predicate.SwizzleY;
1750 swizzles[2] = inst->Predicate.SwizzleZ;
1751 swizzles[3] = inst->Predicate.SwizzleW;
1752
1753 index = inst->Predicate.Index;
1754 assert(index < LP_MAX_TGSI_PREDS);
1755
1756 TGSI_FOR_EACH_CHANNEL( chan ) {
1757 unsigned swizzle = swizzles[chan];
1758
1759 /*
1760 * Only fetch the predicate register channels that are actually listed
1761 * in the swizzles
1762 */
1763 if (!unswizzled[swizzle]) {
1764 value = LLVMBuildLoad(builder,
1765 bld->preds[index][swizzle], "");
1766
1767 /*
1768 * Convert the value to an integer mask.
1769 *
1770 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
1771 * is needlessly causing two comparisons due to storing the intermediate
1772 * result as float vector instead of an integer mask vector.
1773 */
1774 value = lp_build_compare(bld->bld_base.base.gallivm,
1775 bld->bld_base.base.type,
1776 PIPE_FUNC_NOTEQUAL,
1777 value,
1778 bld->bld_base.base.zero);
1779 if (inst->Predicate.Negate) {
1780 value = LLVMBuildNot(builder, value, "");
1781 }
1782
1783 unswizzled[swizzle] = value;
1784 } else {
1785 value = unswizzled[swizzle];
1786 }
1787
1788 pred[chan] = value;
1789 }
1790 }
1791
1792 /**
1793 * store an array of 8 doubles into two arrays of 8 floats
1794 * i.e.
1795 * value is d0, d1, d2, d3 etc.
1796 * each double has high and low pieces x, y
1797 * so gets stored into the separate channels as:
1798 * chan_ptr = d0.x, d1.x, d2.x, d3.x
1799 * chan_ptr2 = d0.y, d1.y, d2.y, d3.y
1800 */
1801 static void
1802 emit_store_double_chan(struct lp_build_tgsi_context *bld_base,
1803 int dtype,
1804 LLVMValueRef chan_ptr, LLVMValueRef chan_ptr2,
1805 LLVMValueRef pred,
1806 LLVMValueRef value)
1807 {
1808 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1809 struct gallivm_state *gallivm = bld_base->base.gallivm;
1810 LLVMBuilderRef builder = gallivm->builder;
1811 struct lp_build_context *float_bld = &bld_base->base;
1812 unsigned i;
1813 LLVMValueRef temp, temp2;
1814 LLVMValueRef shuffles[8];
1815 LLVMValueRef shuffles2[8];
1816
1817 for (i = 0; i < bld_base->base.type.length; i++) {
1818 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
1819 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
1820 }
1821
1822 temp = LLVMBuildShuffleVector(builder, value,
1823 LLVMGetUndef(LLVMTypeOf(value)),
1824 LLVMConstVector(shuffles,
1825 bld_base->base.type.length),
1826 "");
1827 temp2 = LLVMBuildShuffleVector(builder, value,
1828 LLVMGetUndef(LLVMTypeOf(value)),
1829 LLVMConstVector(shuffles2,
1830 bld_base->base.type.length),
1831 "");
1832
1833 lp_exec_mask_store(&bld->exec_mask, float_bld, pred, temp, chan_ptr);
1834 lp_exec_mask_store(&bld->exec_mask, float_bld, pred, temp2, chan_ptr2);
1835 }
1836
1837 /**
1838 * Register store.
1839 */
1840 static void
1841 emit_store_chan(
1842 struct lp_build_tgsi_context *bld_base,
1843 const struct tgsi_full_instruction *inst,
1844 unsigned index,
1845 unsigned chan_index,
1846 LLVMValueRef pred,
1847 LLVMValueRef value)
1848 {
1849 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1850 struct gallivm_state *gallivm = bld_base->base.gallivm;
1851 LLVMBuilderRef builder = gallivm->builder;
1852 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1853 struct lp_build_context *float_bld = &bld_base->base;
1854 struct lp_build_context *int_bld = &bld_base->int_bld;
1855 LLVMValueRef indirect_index = NULL;
1856 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
1857
1858 /*
1859 * Apply saturation.
1860 *
1861 * It is always assumed to be float.
1862 */
1863 if (inst->Instruction.Saturate) {
1864 assert(dtype == TGSI_TYPE_FLOAT ||
1865 dtype == TGSI_TYPE_UNTYPED);
1866 value = LLVMBuildBitCast(builder, value, float_bld->vec_type, "");
1867 value = lp_build_clamp_zero_one_nanzero(float_bld, value);
1868 }
1869
1870 if (reg->Register.Indirect) {
1871 /*
1872 * Currently the mesa/st doesn't generate indirect stores
1873 * to doubles, it normally uses MOV to do indirect stores.
1874 */
1875 assert(dtype != TGSI_TYPE_DOUBLE);
1876 indirect_index = get_indirect_index(bld,
1877 reg->Register.File,
1878 reg->Register.Index,
1879 &reg->Indirect);
1880 } else {
1881 assert(reg->Register.Index <=
1882 bld_base->info->file_max[reg->Register.File]);
1883 }
1884
1885 if (DEBUG_EXECUTION) {
1886 emit_dump_reg(gallivm, reg->Register.File, reg->Register.Index, chan_index, value);
1887 }
1888
1889 switch( reg->Register.File ) {
1890 case TGSI_FILE_OUTPUT:
1891 /* Outputs are always stored as floats */
1892 value = LLVMBuildBitCast(builder, value, float_bld->vec_type, "");
1893
1894 if (reg->Register.Indirect) {
1895 LLVMValueRef index_vec; /* indexes into the output registers */
1896 LLVMValueRef outputs_array;
1897 LLVMTypeRef fptr_type;
1898
1899 index_vec = get_soa_array_offsets(&bld_base->uint_bld,
1900 indirect_index,
1901 chan_index,
1902 TRUE);
1903
1904 fptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1905 outputs_array = LLVMBuildBitCast(builder, bld->outputs_array, fptr_type, "");
1906
1907 /* Scatter store values into output registers */
1908 emit_mask_scatter(bld, outputs_array, index_vec, value,
1909 &bld->exec_mask, pred);
1910 }
1911 else {
1912 LLVMValueRef out_ptr = lp_get_output_ptr(bld, reg->Register.Index,
1913 chan_index);
1914
1915 if (dtype == TGSI_TYPE_DOUBLE) {
1916 LLVMValueRef out_ptr2 = lp_get_output_ptr(bld, reg->Register.Index,
1917 chan_index + 1);
1918 emit_store_double_chan(bld_base, dtype, out_ptr, out_ptr2,
1919 pred, value);
1920 } else
1921 lp_exec_mask_store(&bld->exec_mask, float_bld, pred, value, out_ptr);
1922 }
1923 break;
1924
1925 case TGSI_FILE_TEMPORARY:
1926 /* Temporaries are always stored as floats */
1927 if (dtype != TGSI_TYPE_DOUBLE)
1928 value = LLVMBuildBitCast(builder, value, float_bld->vec_type, "");
1929 else
1930 value = LLVMBuildBitCast(builder, value, LLVMVectorType(LLVMFloatTypeInContext(gallivm->context), bld_base->base.type.length * 2), "");
1931
1932 if (reg->Register.Indirect) {
1933 LLVMValueRef index_vec; /* indexes into the temp registers */
1934 LLVMValueRef temps_array;
1935 LLVMTypeRef fptr_type;
1936
1937 index_vec = get_soa_array_offsets(&bld_base->uint_bld,
1938 indirect_index,
1939 chan_index,
1940 TRUE);
1941
1942 fptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1943 temps_array = LLVMBuildBitCast(builder, bld->temps_array, fptr_type, "");
1944
1945 /* Scatter store values into temp registers */
1946 emit_mask_scatter(bld, temps_array, index_vec, value,
1947 &bld->exec_mask, pred);
1948 }
1949 else {
1950 LLVMValueRef temp_ptr;
1951 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, chan_index);
1952
1953 if (dtype == TGSI_TYPE_DOUBLE) {
1954 LLVMValueRef temp_ptr2 = lp_get_temp_ptr_soa(bld,
1955 reg->Register.Index,
1956 chan_index + 1);
1957 emit_store_double_chan(bld_base, dtype, temp_ptr, temp_ptr2,
1958 pred, value);
1959 }
1960 else
1961 lp_exec_mask_store(&bld->exec_mask, float_bld, pred, value, temp_ptr);
1962 }
1963 break;
1964
1965 case TGSI_FILE_ADDRESS:
1966 assert(dtype == TGSI_TYPE_SIGNED);
1967 assert(LLVMTypeOf(value) == int_bld->vec_type);
1968 value = LLVMBuildBitCast(builder, value, int_bld->vec_type, "");
1969 lp_exec_mask_store(&bld->exec_mask, int_bld, pred, value,
1970 bld->addr[reg->Register.Index][chan_index]);
1971 break;
1972
1973 case TGSI_FILE_PREDICATE:
1974 assert(LLVMTypeOf(value) == float_bld->vec_type);
1975 value = LLVMBuildBitCast(builder, value, float_bld->vec_type, "");
1976 lp_exec_mask_store(&bld->exec_mask, float_bld, pred, value,
1977 bld->preds[reg->Register.Index][chan_index]);
1978 break;
1979
1980 default:
1981 assert( 0 );
1982 }
1983
1984 (void)dtype;
1985 }
1986
1987 /*
1988 * Called at the beginning of the translation of each TGSI instruction, to
1989 * emit some debug code.
1990 */
1991 static void
1992 emit_debug(
1993 struct lp_build_tgsi_context * bld_base,
1994 const struct tgsi_full_instruction * inst,
1995 const struct tgsi_opcode_info * info)
1996
1997 {
1998 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1999
2000 if (DEBUG_EXECUTION) {
2001 /*
2002 * Dump the TGSI instruction.
2003 */
2004
2005 struct gallivm_state *gallivm = bld_base->base.gallivm;
2006 char buf[512];
2007 buf[0] = '$';
2008 buf[1] = ' ';
2009 tgsi_dump_instruction_str(inst, bld_base->pc, &buf[2], sizeof buf - 2);
2010 lp_build_printf(gallivm, buf);
2011
2012 /* Dump the execution mask.
2013 */
2014 if (bld->exec_mask.has_mask) {
2015 lp_build_print_value(gallivm, " mask = ", bld->exec_mask.exec_mask);
2016 }
2017 }
2018 }
2019
2020 static void
2021 emit_store(
2022 struct lp_build_tgsi_context * bld_base,
2023 const struct tgsi_full_instruction * inst,
2024 const struct tgsi_opcode_info * info,
2025 LLVMValueRef dst[4])
2026
2027 {
2028 unsigned chan_index;
2029 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2030 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
2031 if(info->num_dst) {
2032 LLVMValueRef pred[TGSI_NUM_CHANNELS];
2033
2034 emit_fetch_predicate( bld, inst, pred );
2035
2036 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2037
2038 if (dtype == TGSI_TYPE_DOUBLE && (chan_index == 1 || chan_index == 3))
2039 continue;
2040 emit_store_chan(bld_base, inst, 0, chan_index, pred[chan_index], dst[chan_index]);
2041 }
2042 }
2043 }
2044
2045 static unsigned
2046 tgsi_to_pipe_tex_target(unsigned tgsi_target)
2047 {
2048 switch (tgsi_target) {
2049 case TGSI_TEXTURE_BUFFER:
2050 return PIPE_BUFFER;
2051 case TGSI_TEXTURE_1D:
2052 case TGSI_TEXTURE_SHADOW1D:
2053 return PIPE_TEXTURE_1D;
2054 case TGSI_TEXTURE_2D:
2055 case TGSI_TEXTURE_SHADOW2D:
2056 case TGSI_TEXTURE_2D_MSAA:
2057 return PIPE_TEXTURE_2D;
2058 case TGSI_TEXTURE_3D:
2059 return PIPE_TEXTURE_3D;
2060 case TGSI_TEXTURE_CUBE:
2061 case TGSI_TEXTURE_SHADOWCUBE:
2062 return PIPE_TEXTURE_CUBE;
2063 case TGSI_TEXTURE_RECT:
2064 case TGSI_TEXTURE_SHADOWRECT:
2065 return PIPE_TEXTURE_RECT;
2066 case TGSI_TEXTURE_1D_ARRAY:
2067 case TGSI_TEXTURE_SHADOW1D_ARRAY:
2068 return PIPE_TEXTURE_1D_ARRAY;
2069 case TGSI_TEXTURE_2D_ARRAY:
2070 case TGSI_TEXTURE_SHADOW2D_ARRAY:
2071 case TGSI_TEXTURE_2D_ARRAY_MSAA:
2072 return PIPE_TEXTURE_2D_ARRAY;
2073 case TGSI_TEXTURE_CUBE_ARRAY:
2074 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
2075 return PIPE_TEXTURE_CUBE_ARRAY;
2076 default:
2077 assert(0);
2078 return PIPE_BUFFER;
2079 }
2080 }
2081
2082
2083 static enum lp_sampler_lod_property
2084 lp_build_lod_property(
2085 struct lp_build_tgsi_context *bld_base,
2086 const struct tgsi_full_instruction *inst,
2087 unsigned src_op)
2088 {
2089 const struct tgsi_full_src_register *reg = &inst->Src[src_op];
2090 enum lp_sampler_lod_property lod_property;
2091
2092 /*
2093 * Not much we can do here. We could try catching inputs declared
2094 * with constant interpolation but not sure it's worth it - since for
2095 * TEX opcodes as well as FETCH/LD the lod comes from same reg as
2096 * the coords, so it could only work for SAMPLE/TXQ/SVIEWINFO), just
2097 * like the constant/immediate recognition below.
2098 * What seems to be of more value would be to recognize temps holding
2099 * broadcasted scalars but no way we can do it.
2100 * Tried asking llvm but without any success (using LLVMIsConstant
2101 * even though this isn't exactly what we'd need), even as simple as
2102 * IMM[0] UINT32 (0,-1,0,0)
2103 * MOV TEMP[0] IMM[0].yyyy
2104 * SVIEWINFO TEMP[1], TEMP[0].xxxx, SVIEWINFO[0]
2105 * doesn't work.
2106 * This means there's ZERO chance this will ever catch a scalar lod
2107 * with traditional tex opcodes as well as texel fetches, since the lod
2108 * comes from the same reg as coords (except some test shaders using
2109 * constant coords maybe).
2110 * There's at least hope for sample opcodes as well as size queries.
2111 */
2112 if (reg->Register.File == TGSI_FILE_CONSTANT ||
2113 reg->Register.File == TGSI_FILE_IMMEDIATE) {
2114 lod_property = LP_SAMPLER_LOD_SCALAR;
2115 }
2116 else if (bld_base->info->processor == TGSI_PROCESSOR_FRAGMENT) {
2117 if (gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) {
2118 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2119 }
2120 else {
2121 lod_property = LP_SAMPLER_LOD_PER_QUAD;
2122 }
2123 }
2124 else {
2125 /* never use scalar (per-quad) lod the results are just too wrong. */
2126 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2127 }
2128 return lod_property;
2129 }
2130
2131
2132 /**
2133 * High-level instruction translators.
2134 */
2135
2136 static void
2137 emit_tex( struct lp_build_tgsi_soa_context *bld,
2138 const struct tgsi_full_instruction *inst,
2139 enum lp_build_tex_modifier modifier,
2140 LLVMValueRef *texel,
2141 unsigned sampler_reg,
2142 enum lp_sampler_op_type sampler_op)
2143 {
2144 unsigned unit = inst->Src[sampler_reg].Register.Index;
2145 LLVMValueRef oow = NULL;
2146 LLVMValueRef lod = NULL;
2147 LLVMValueRef coords[5];
2148 LLVMValueRef offsets[3] = { NULL };
2149 struct lp_derivatives derivs;
2150 struct lp_sampler_params params;
2151 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2152 unsigned num_derivs, num_offsets, i;
2153 unsigned shadow_coord = 0;
2154 unsigned layer_coord = 0;
2155 unsigned sample_key = sampler_op << LP_SAMPLER_OP_TYPE_SHIFT;
2156
2157 memset(&params, 0, sizeof(params));
2158
2159 if (!bld->sampler) {
2160 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2161 for (i = 0; i < 4; i++) {
2162 texel[i] = bld->bld_base.base.undef;
2163 }
2164 return;
2165 }
2166
2167 switch (inst->Texture.Texture) {
2168 case TGSI_TEXTURE_1D_ARRAY:
2169 layer_coord = 1;
2170 /* fallthrough */
2171 case TGSI_TEXTURE_1D:
2172 num_offsets = 1;
2173 num_derivs = 1;
2174 break;
2175 case TGSI_TEXTURE_2D_ARRAY:
2176 layer_coord = 2;
2177 /* fallthrough */
2178 case TGSI_TEXTURE_2D:
2179 case TGSI_TEXTURE_RECT:
2180 num_offsets = 2;
2181 num_derivs = 2;
2182 break;
2183 case TGSI_TEXTURE_SHADOW1D_ARRAY:
2184 layer_coord = 1;
2185 /* fallthrough */
2186 case TGSI_TEXTURE_SHADOW1D:
2187 shadow_coord = 2;
2188 num_offsets = 1;
2189 num_derivs = 1;
2190 break;
2191 case TGSI_TEXTURE_SHADOW2D_ARRAY:
2192 layer_coord = 2;
2193 shadow_coord = 3;
2194 num_offsets = 2;
2195 num_derivs = 2;
2196 break;
2197 case TGSI_TEXTURE_SHADOW2D:
2198 case TGSI_TEXTURE_SHADOWRECT:
2199 shadow_coord = 2;
2200 num_offsets = 2;
2201 num_derivs = 2;
2202 break;
2203 case TGSI_TEXTURE_CUBE:
2204 num_offsets = 2;
2205 num_derivs = 3;
2206 break;
2207 case TGSI_TEXTURE_3D:
2208 num_offsets = 3;
2209 num_derivs = 3;
2210 break;
2211 case TGSI_TEXTURE_SHADOWCUBE:
2212 shadow_coord = 3;
2213 num_offsets = 2;
2214 num_derivs = 3;
2215 break;
2216 case TGSI_TEXTURE_CUBE_ARRAY:
2217 num_offsets = 2;
2218 num_derivs = 3;
2219 layer_coord = 3;
2220 break;
2221 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
2222 num_offsets = 2;
2223 num_derivs = 3;
2224 layer_coord = 3;
2225 shadow_coord = 4; /* shadow coord special different reg */
2226 break;
2227 case TGSI_TEXTURE_2D_MSAA:
2228 case TGSI_TEXTURE_2D_ARRAY_MSAA:
2229 default:
2230 assert(0);
2231 return;
2232 }
2233
2234 /* Note lod and especially projected are illegal in a LOT of cases */
2235 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS ||
2236 modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
2237 if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
2238 inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY) {
2239 /* note that shadow cube array with bias/explicit lod does not exist */
2240 lod = lp_build_emit_fetch(&bld->bld_base, inst, 1, 0);
2241 }
2242 else {
2243 lod = lp_build_emit_fetch(&bld->bld_base, inst, 0, 3);
2244 }
2245 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
2246 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2247 }
2248 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
2249 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2250 }
2251 lod_property = lp_build_lod_property(&bld->bld_base, inst, 0);
2252 }
2253
2254 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
2255 oow = lp_build_emit_fetch(&bld->bld_base, inst, 0, 3);
2256 oow = lp_build_rcp(&bld->bld_base.base, oow);
2257 }
2258
2259 for (i = 0; i < num_derivs; i++) {
2260 coords[i] = lp_build_emit_fetch(&bld->bld_base, inst, 0, i);
2261 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
2262 coords[i] = lp_build_mul(&bld->bld_base.base, coords[i], oow);
2263 }
2264 for (i = num_derivs; i < 5; i++) {
2265 coords[i] = bld->bld_base.base.undef;
2266 }
2267
2268 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2269 if (layer_coord) {
2270 if (layer_coord == 3) {
2271 coords[3] = lp_build_emit_fetch(&bld->bld_base, inst, 0, layer_coord);
2272 }
2273 else {
2274 coords[2] = lp_build_emit_fetch(&bld->bld_base, inst, 0, layer_coord);
2275 }
2276 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
2277 coords[2] = lp_build_mul(&bld->bld_base.base, coords[2], oow);
2278 }
2279 /* Shadow coord occupies always 5th slot. */
2280 if (shadow_coord) {
2281 sample_key |= LP_SAMPLER_SHADOW;
2282 if (shadow_coord == 4) {
2283 coords[4] = lp_build_emit_fetch(&bld->bld_base, inst, 1, 0);
2284 }
2285 else {
2286 coords[4] = lp_build_emit_fetch(&bld->bld_base, inst, 0, shadow_coord);
2287 }
2288 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
2289 coords[4] = lp_build_mul(&bld->bld_base.base, coords[4], oow);
2290 }
2291
2292 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
2293 unsigned dim;
2294 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2295 for (dim = 0; dim < num_derivs; ++dim) {
2296 derivs.ddx[dim] = lp_build_emit_fetch(&bld->bld_base, inst, 1, dim);
2297 derivs.ddy[dim] = lp_build_emit_fetch(&bld->bld_base, inst, 2, dim);
2298 }
2299 params.derivs = &derivs;
2300 /*
2301 * could also check all src regs if constant but I doubt such
2302 * cases exist in practice.
2303 */
2304 if (bld->bld_base.info->processor == TGSI_PROCESSOR_FRAGMENT) {
2305 if (gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) {
2306 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2307 }
2308 else {
2309 lod_property = LP_SAMPLER_LOD_PER_QUAD;
2310 }
2311 }
2312 else {
2313 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2314 }
2315 }
2316 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2317
2318 /* we don't handle the 4 offset version of tg4 */
2319 if (inst->Texture.NumOffsets == 1) {
2320 unsigned dim;
2321 sample_key |= LP_SAMPLER_OFFSETS;
2322 for (dim = 0; dim < num_offsets; dim++) {
2323 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim);
2324 }
2325 }
2326
2327 params.type = bld->bld_base.base.type;
2328 params.sample_key = sample_key;
2329 params.texture_index = unit;
2330 params.sampler_index = unit;
2331 params.context_ptr = bld->context_ptr;
2332 params.thread_data_ptr = bld->thread_data_ptr;
2333 params.coords = coords;
2334 params.offsets = offsets;
2335 params.lod = lod;
2336 params.texel = texel;
2337
2338 bld->sampler->emit_tex_sample(bld->sampler,
2339 bld->bld_base.base.gallivm,
2340 &params);
2341 }
2342
2343 static void
2344 emit_sample(struct lp_build_tgsi_soa_context *bld,
2345 const struct tgsi_full_instruction *inst,
2346 enum lp_build_tex_modifier modifier,
2347 boolean compare,
2348 LLVMValueRef *texel)
2349 {
2350 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
2351 unsigned texture_unit, sampler_unit;
2352 LLVMValueRef lod = NULL;
2353 LLVMValueRef coords[5];
2354 LLVMValueRef offsets[3] = { NULL };
2355 struct lp_derivatives derivs;
2356 struct lp_sampler_params params;
2357 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2358
2359 unsigned num_offsets, num_derivs, i;
2360 unsigned layer_coord = 0;
2361 unsigned sample_key = LP_SAMPLER_OP_TEXTURE << LP_SAMPLER_OP_TYPE_SHIFT;
2362
2363 memset(&params, 0, sizeof(params));
2364
2365 if (!bld->sampler) {
2366 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2367 for (i = 0; i < 4; i++) {
2368 texel[i] = bld->bld_base.base.undef;
2369 }
2370 return;
2371 }
2372
2373 /*
2374 * unlike old-style tex opcodes the texture/sampler indices
2375 * always come from src1 and src2 respectively.
2376 */
2377 texture_unit = inst->Src[1].Register.Index;
2378 sampler_unit = inst->Src[2].Register.Index;
2379
2380 /*
2381 * Note inst->Texture.Texture will contain the number of offsets,
2382 * however the target information is NOT there and comes from the
2383 * declared sampler views instead.
2384 */
2385 switch (bld->sv[texture_unit].Resource) {
2386 case TGSI_TEXTURE_1D:
2387 num_offsets = 1;
2388 num_derivs = 1;
2389 break;
2390 case TGSI_TEXTURE_1D_ARRAY:
2391 layer_coord = 1;
2392 num_offsets = 1;
2393 num_derivs = 1;
2394 break;
2395 case TGSI_TEXTURE_2D:
2396 case TGSI_TEXTURE_RECT:
2397 num_offsets = 2;
2398 num_derivs = 2;
2399 break;
2400 case TGSI_TEXTURE_2D_ARRAY:
2401 layer_coord = 2;
2402 num_offsets = 2;
2403 num_derivs = 2;
2404 break;
2405 case TGSI_TEXTURE_CUBE:
2406 num_offsets = 2;
2407 num_derivs = 3;
2408 break;
2409 case TGSI_TEXTURE_3D:
2410 num_offsets = 3;
2411 num_derivs = 3;
2412 break;
2413 case TGSI_TEXTURE_CUBE_ARRAY:
2414 layer_coord = 3;
2415 num_offsets = 2;
2416 num_derivs = 3;
2417 break;
2418 default:
2419 assert(0);
2420 return;
2421 }
2422
2423 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS ||
2424 modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
2425 lod = lp_build_emit_fetch(&bld->bld_base, inst, 3, 0);
2426 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
2427 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2428 }
2429 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
2430 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2431 }
2432 lod_property = lp_build_lod_property(&bld->bld_base, inst, 0);
2433 }
2434 else if (modifier == LP_BLD_TEX_MODIFIER_LOD_ZERO) {
2435 /* XXX might be better to explicitly pass the level zero information */
2436 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2437 lod = lp_build_const_vec(gallivm, bld->bld_base.base.type, 0.0F);
2438 }
2439
2440 for (i = 0; i < num_derivs; i++) {
2441 coords[i] = lp_build_emit_fetch(&bld->bld_base, inst, 0, i);
2442 }
2443 for (i = num_derivs; i < 5; i++) {
2444 coords[i] = bld->bld_base.base.undef;
2445 }
2446
2447 /* Layer coord always goes into 3rd slot, except for cube map arrays */
2448 if (layer_coord) {
2449 if (layer_coord == 3)
2450 coords[3] = lp_build_emit_fetch(&bld->bld_base, inst, 0, layer_coord);
2451 else
2452 coords[2] = lp_build_emit_fetch(&bld->bld_base, inst, 0, layer_coord);
2453 }
2454 /* Shadow coord occupies always 5th slot. */
2455 if (compare) {
2456 sample_key |= LP_SAMPLER_SHADOW;
2457 coords[4] = lp_build_emit_fetch(&bld->bld_base, inst, 3, 0);
2458 }
2459
2460 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
2461 unsigned dim;
2462 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2463 for (dim = 0; dim < num_derivs; ++dim) {
2464 derivs.ddx[dim] = lp_build_emit_fetch(&bld->bld_base, inst, 3, dim);
2465 derivs.ddy[dim] = lp_build_emit_fetch(&bld->bld_base, inst, 4, dim);
2466 }
2467 params.derivs = &derivs;
2468 /*
2469 * could also check all src regs if constant but I doubt such
2470 * cases exist in practice.
2471 */
2472 if (bld->bld_base.info->processor == TGSI_PROCESSOR_FRAGMENT) {
2473 if (gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) {
2474 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2475 }
2476 else {
2477 lod_property = LP_SAMPLER_LOD_PER_QUAD;
2478 }
2479 }
2480 else {
2481 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2482 }
2483 }
2484
2485 /* some advanced gather instructions (txgo) would require 4 offsets */
2486 if (inst->Texture.NumOffsets == 1) {
2487 unsigned dim;
2488 sample_key |= LP_SAMPLER_OFFSETS;
2489 for (dim = 0; dim < num_offsets; dim++) {
2490 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim);
2491 }
2492 }
2493 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2494
2495 params.type = bld->bld_base.base.type;
2496 params.sample_key = sample_key;
2497 params.texture_index = texture_unit;
2498 params.sampler_index = sampler_unit;
2499 params.context_ptr = bld->context_ptr;
2500 params.thread_data_ptr = bld->thread_data_ptr;
2501 params.coords = coords;
2502 params.offsets = offsets;
2503 params.lod = lod;
2504 params.texel = texel;
2505
2506 bld->sampler->emit_tex_sample(bld->sampler,
2507 bld->bld_base.base.gallivm,
2508 &params);
2509
2510 if (inst->Src[1].Register.SwizzleX != PIPE_SWIZZLE_RED ||
2511 inst->Src[1].Register.SwizzleY != PIPE_SWIZZLE_GREEN ||
2512 inst->Src[1].Register.SwizzleZ != PIPE_SWIZZLE_BLUE ||
2513 inst->Src[1].Register.SwizzleW != PIPE_SWIZZLE_ALPHA) {
2514 unsigned char swizzles[4];
2515 swizzles[0] = inst->Src[1].Register.SwizzleX;
2516 swizzles[1] = inst->Src[1].Register.SwizzleY;
2517 swizzles[2] = inst->Src[1].Register.SwizzleZ;
2518 swizzles[3] = inst->Src[1].Register.SwizzleW;
2519
2520 lp_build_swizzle_soa_inplace(&bld->bld_base.base, texel, swizzles);
2521 }
2522 }
2523
2524 static void
2525 emit_fetch_texels( struct lp_build_tgsi_soa_context *bld,
2526 const struct tgsi_full_instruction *inst,
2527 LLVMValueRef *texel,
2528 boolean is_samplei)
2529 {
2530 unsigned unit, target;
2531 LLVMValueRef coord_undef = LLVMGetUndef(bld->bld_base.base.int_vec_type);
2532 LLVMValueRef explicit_lod = NULL;
2533 LLVMValueRef coords[5];
2534 LLVMValueRef offsets[3] = { NULL };
2535 struct lp_sampler_params params;
2536 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2537 unsigned dims, i;
2538 unsigned layer_coord = 0;
2539 unsigned sample_key = LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
2540
2541 memset(&params, 0, sizeof(params));
2542
2543 if (!bld->sampler) {
2544 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
2545 for (i = 0; i < 4; i++) {
2546 texel[i] = coord_undef;
2547 }
2548 return;
2549 }
2550
2551 unit = inst->Src[1].Register.Index;
2552
2553 if (is_samplei) {
2554 target = bld->sv[unit].Resource;
2555 }
2556 else {
2557 target = inst->Texture.Texture;
2558 }
2559
2560 switch (target) {
2561 case TGSI_TEXTURE_1D:
2562 case TGSI_TEXTURE_BUFFER:
2563 dims = 1;
2564 break;
2565 case TGSI_TEXTURE_1D_ARRAY:
2566 layer_coord = 1;
2567 dims = 1;
2568 break;
2569 case TGSI_TEXTURE_2D:
2570 case TGSI_TEXTURE_RECT:
2571 case TGSI_TEXTURE_2D_MSAA:
2572 dims = 2;
2573 break;
2574 case TGSI_TEXTURE_2D_ARRAY:
2575 case TGSI_TEXTURE_2D_ARRAY_MSAA:
2576 layer_coord = 2;
2577 dims = 2;
2578 break;
2579 case TGSI_TEXTURE_3D:
2580 dims = 3;
2581 break;
2582 default:
2583 assert(0);
2584 return;
2585 }
2586
2587 /* always have lod except for buffers and msaa targets ? */
2588 if (target != TGSI_TEXTURE_BUFFER &&
2589 target != TGSI_TEXTURE_2D_MSAA &&
2590 target != TGSI_TEXTURE_2D_ARRAY_MSAA) {
2591 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2592 explicit_lod = lp_build_emit_fetch(&bld->bld_base, inst, 0, 3);
2593 lod_property = lp_build_lod_property(&bld->bld_base, inst, 0);
2594 }
2595 /*
2596 * XXX: for real msaa support, the w component (or src2.x for sample_i_ms)
2597 * would be the sample index.
2598 */
2599
2600 for (i = 0; i < dims; i++) {
2601 coords[i] = lp_build_emit_fetch(&bld->bld_base, inst, 0, i);
2602 }
2603 /* never use more than 3 coords here but emit_fetch_texel copies all 5 anyway */
2604 for (i = dims; i < 5; i++) {
2605 coords[i] = coord_undef;
2606 }
2607 if (layer_coord)
2608 coords[2] = lp_build_emit_fetch(&bld->bld_base, inst, 0, layer_coord);
2609
2610 if (inst->Texture.NumOffsets == 1) {
2611 unsigned dim;
2612 sample_key |= LP_SAMPLER_OFFSETS;
2613 for (dim = 0; dim < dims; dim++) {
2614 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim);
2615 }
2616 }
2617 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2618
2619 params.type = bld->bld_base.base.type;
2620 params.sample_key = sample_key;
2621 params.texture_index = unit;
2622 /*
2623 * sampler not actually used, set to 0 so it won't exceed PIPE_MAX_SAMPLERS
2624 * and trigger some assertions with d3d10 where the sampler view number
2625 * can exceed this.
2626 */
2627 params.sampler_index = 0;
2628 params.context_ptr = bld->context_ptr;
2629 params.thread_data_ptr = bld->thread_data_ptr;
2630 params.coords = coords;
2631 params.offsets = offsets;
2632 params.derivs = NULL;
2633 params.lod = explicit_lod;
2634 params.texel = texel;
2635
2636 bld->sampler->emit_tex_sample(bld->sampler,
2637 bld->bld_base.base.gallivm,
2638 &params);
2639
2640 if (is_samplei &&
2641 (inst->Src[1].Register.SwizzleX != PIPE_SWIZZLE_RED ||
2642 inst->Src[1].Register.SwizzleY != PIPE_SWIZZLE_GREEN ||
2643 inst->Src[1].Register.SwizzleZ != PIPE_SWIZZLE_BLUE ||
2644 inst->Src[1].Register.SwizzleW != PIPE_SWIZZLE_ALPHA)) {
2645 unsigned char swizzles[4];
2646 swizzles[0] = inst->Src[1].Register.SwizzleX;
2647 swizzles[1] = inst->Src[1].Register.SwizzleY;
2648 swizzles[2] = inst->Src[1].Register.SwizzleZ;
2649 swizzles[3] = inst->Src[1].Register.SwizzleW;
2650
2651 lp_build_swizzle_soa_inplace(&bld->bld_base.base, texel, swizzles);
2652 }
2653 }
2654
2655 static void
2656 emit_size_query( struct lp_build_tgsi_soa_context *bld,
2657 const struct tgsi_full_instruction *inst,
2658 LLVMValueRef *sizes_out,
2659 boolean is_sviewinfo)
2660 {
2661 LLVMValueRef explicit_lod;
2662 enum lp_sampler_lod_property lod_property;
2663 unsigned has_lod;
2664 unsigned i;
2665 unsigned unit = inst->Src[1].Register.Index;
2666 unsigned target, pipe_target;
2667
2668 if (is_sviewinfo) {
2669 target = bld->sv[unit].Resource;
2670 }
2671 else {
2672 target = inst->Texture.Texture;
2673 }
2674 switch (target) {
2675 case TGSI_TEXTURE_BUFFER:
2676 case TGSI_TEXTURE_RECT:
2677 case TGSI_TEXTURE_SHADOWRECT:
2678 has_lod = 0;
2679 break;
2680 default:
2681 has_lod = 1;
2682 break;
2683 }
2684
2685 if (!bld->sampler) {
2686 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
2687 for (i = 0; i < 4; i++)
2688 sizes_out[i] = bld->bld_base.int_bld.undef;
2689 return;
2690 }
2691
2692 if (has_lod) {
2693 explicit_lod = lp_build_emit_fetch(&bld->bld_base, inst, 0, 0);
2694 lod_property = lp_build_lod_property(&bld->bld_base, inst, 0);
2695 }
2696 else {
2697 explicit_lod = NULL;
2698 lod_property = LP_SAMPLER_LOD_SCALAR;
2699 }
2700
2701
2702 pipe_target = tgsi_to_pipe_tex_target(target);
2703
2704 bld->sampler->emit_size_query(bld->sampler,
2705 bld->bld_base.base.gallivm,
2706 bld->bld_base.int_bld.type,
2707 unit, pipe_target,
2708 bld->context_ptr,
2709 TRUE,
2710 lod_property,
2711 explicit_lod,
2712 sizes_out);
2713 }
2714
2715 static boolean
2716 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
2717 int pc)
2718 {
2719 unsigned i;
2720
2721 for (i = 0; i < 5; i++) {
2722 unsigned opcode;
2723
2724 if (pc + i >= bld->bld_base.info->num_instructions)
2725 return TRUE;
2726
2727 opcode = bld->bld_base.instructions[pc + i].Instruction.Opcode;
2728
2729 if (opcode == TGSI_OPCODE_END)
2730 return TRUE;
2731
2732 if (opcode == TGSI_OPCODE_TEX ||
2733 opcode == TGSI_OPCODE_TXP ||
2734 opcode == TGSI_OPCODE_TXD ||
2735 opcode == TGSI_OPCODE_TXB ||
2736 opcode == TGSI_OPCODE_TXL ||
2737 opcode == TGSI_OPCODE_TXF ||
2738 opcode == TGSI_OPCODE_TXQ ||
2739 opcode == TGSI_OPCODE_TEX2 ||
2740 opcode == TGSI_OPCODE_TXB2 ||
2741 opcode == TGSI_OPCODE_TXL2 ||
2742 opcode == TGSI_OPCODE_SAMPLE ||
2743 opcode == TGSI_OPCODE_SAMPLE_B ||
2744 opcode == TGSI_OPCODE_SAMPLE_C ||
2745 opcode == TGSI_OPCODE_SAMPLE_C_LZ ||
2746 opcode == TGSI_OPCODE_SAMPLE_D ||
2747 opcode == TGSI_OPCODE_SAMPLE_I ||
2748 opcode == TGSI_OPCODE_SAMPLE_I_MS ||
2749 opcode == TGSI_OPCODE_SAMPLE_L ||
2750 opcode == TGSI_OPCODE_SVIEWINFO ||
2751 opcode == TGSI_OPCODE_CAL ||
2752 opcode == TGSI_OPCODE_CALLNZ ||
2753 opcode == TGSI_OPCODE_IF ||
2754 opcode == TGSI_OPCODE_UIF ||
2755 opcode == TGSI_OPCODE_BGNLOOP ||
2756 opcode == TGSI_OPCODE_SWITCH)
2757 return FALSE;
2758 }
2759
2760 return TRUE;
2761 }
2762
2763
2764
2765 /**
2766 * Kill fragment if any of the src register values are negative.
2767 */
2768 static void
2769 emit_kill_if(
2770 struct lp_build_tgsi_soa_context *bld,
2771 const struct tgsi_full_instruction *inst,
2772 int pc)
2773 {
2774 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2775 const struct tgsi_full_src_register *reg = &inst->Src[0];
2776 LLVMValueRef terms[TGSI_NUM_CHANNELS];
2777 LLVMValueRef mask;
2778 unsigned chan_index;
2779
2780 memset(&terms, 0, sizeof terms);
2781
2782 TGSI_FOR_EACH_CHANNEL( chan_index ) {
2783 unsigned swizzle;
2784
2785 /* Unswizzle channel */
2786 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
2787
2788 /* Check if the component has not been already tested. */
2789 assert(swizzle < TGSI_NUM_CHANNELS);
2790 if( !terms[swizzle] )
2791 /* TODO: change the comparison operator instead of setting the sign */
2792 terms[swizzle] = lp_build_emit_fetch(&bld->bld_base, inst, 0, chan_index );
2793 }
2794
2795 mask = NULL;
2796 TGSI_FOR_EACH_CHANNEL( chan_index ) {
2797 if(terms[chan_index]) {
2798 LLVMValueRef chan_mask;
2799
2800 /*
2801 * If term < 0 then mask = 0 else mask = ~0.
2802 */
2803 chan_mask = lp_build_cmp(&bld->bld_base.base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->bld_base.base.zero);
2804
2805 if(mask)
2806 mask = LLVMBuildAnd(builder, mask, chan_mask, "");
2807 else
2808 mask = chan_mask;
2809 }
2810 }
2811
2812 if (bld->exec_mask.has_mask) {
2813 LLVMValueRef invmask;
2814 invmask = LLVMBuildNot(builder, bld->exec_mask.exec_mask, "kilp");
2815 mask = LLVMBuildOr(builder, mask, invmask, "");
2816 }
2817
2818 lp_build_mask_update(bld->mask, mask);
2819 if (!near_end_of_shader(bld, pc))
2820 lp_build_mask_check(bld->mask);
2821 }
2822
2823
2824 /**
2825 * Unconditional fragment kill.
2826 * The only predication is the execution mask which will apply if
2827 * we're inside a loop or conditional.
2828 */
2829 static void
2830 emit_kill(struct lp_build_tgsi_soa_context *bld,
2831 int pc)
2832 {
2833 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2834 LLVMValueRef mask;
2835
2836 /* For those channels which are "alive", disable fragment shader
2837 * execution.
2838 */
2839 if (bld->exec_mask.has_mask) {
2840 mask = LLVMBuildNot(builder, bld->exec_mask.exec_mask, "kilp");
2841 }
2842 else {
2843 LLVMValueRef zero = LLVMConstNull(bld->bld_base.base.int_vec_type);
2844 mask = zero;
2845 }
2846
2847 lp_build_mask_update(bld->mask, mask);
2848
2849 if (!near_end_of_shader(bld, pc))
2850 lp_build_mask_check(bld->mask);
2851 }
2852
2853
2854 /**
2855 * Emit code which will dump the value of all the temporary registers
2856 * to stdout.
2857 */
2858 static void
2859 emit_dump_file(struct lp_build_tgsi_soa_context *bld,
2860 unsigned file)
2861 {
2862 const struct tgsi_shader_info *info = bld->bld_base.info;
2863 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
2864 LLVMBuilderRef builder = gallivm->builder;
2865 LLVMValueRef reg_ptr;
2866 int index;
2867 int max_index = info->file_max[file];
2868
2869 /*
2870 * Some register files, particularly constants, can be very large,
2871 * and dumping everything could make this unusably slow.
2872 */
2873 max_index = MIN2(max_index, 32);
2874
2875 for (index = 0; index <= max_index; index++) {
2876 LLVMValueRef res;
2877 unsigned mask;
2878 int chan;
2879
2880 if (index < 8 * sizeof(unsigned) &&
2881 (info->file_mask[file] & (1 << index)) == 0) {
2882 /* This was not declared.*/
2883 continue;
2884 }
2885
2886 if (file == TGSI_FILE_INPUT) {
2887 mask = info->input_usage_mask[index];
2888 } else {
2889 mask = TGSI_WRITEMASK_XYZW;
2890 }
2891
2892 for (chan = 0; chan < 4; chan++) {
2893 if ((mask & (1 << chan)) == 0) {
2894 /* This channel is not used.*/
2895 continue;
2896 }
2897
2898 if (file == TGSI_FILE_CONSTANT) {
2899 struct tgsi_full_src_register reg;
2900 memset(&reg, 0, sizeof reg);
2901 reg.Register.File = file;
2902 reg.Register.Index = index;
2903 reg.Register.SwizzleX = 0;
2904 reg.Register.SwizzleY = 1;
2905 reg.Register.SwizzleZ = 2;
2906 reg.Register.SwizzleW = 3;
2907
2908 res = bld->bld_base.emit_fetch_funcs[file](&bld->bld_base, &reg, TGSI_TYPE_FLOAT, chan);
2909 if (!res) {
2910 continue;
2911 }
2912 } else if (file == TGSI_FILE_INPUT) {
2913 res = bld->inputs[index][chan];
2914 if (!res) {
2915 continue;
2916 }
2917 } else if (file == TGSI_FILE_TEMPORARY) {
2918 reg_ptr = lp_get_temp_ptr_soa(bld, index, chan);
2919 assert(reg_ptr);
2920 res = LLVMBuildLoad(builder, reg_ptr, "");
2921 } else if (file == TGSI_FILE_OUTPUT) {
2922 reg_ptr = lp_get_output_ptr(bld, index, chan);
2923 assert(reg_ptr);
2924 res = LLVMBuildLoad(builder, reg_ptr, "");
2925 } else {
2926 assert(0);
2927 continue;
2928 }
2929
2930 emit_dump_reg(gallivm, file, index, chan, res);
2931 }
2932 }
2933 }
2934
2935
2936
2937 void
2938 lp_emit_declaration_soa(
2939 struct lp_build_tgsi_context *bld_base,
2940 const struct tgsi_full_declaration *decl)
2941 {
2942 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
2943 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
2944 LLVMTypeRef vec_type = bld->bld_base.base.vec_type;
2945 const unsigned first = decl->Range.First;
2946 const unsigned last = decl->Range.Last;
2947 unsigned idx, i;
2948
2949 assert(last <= bld->bld_base.info->file_max[decl->Declaration.File]);
2950
2951 switch (decl->Declaration.File) {
2952 case TGSI_FILE_TEMPORARY:
2953 if (!(bld->indirect_files & (1 << TGSI_FILE_TEMPORARY))) {
2954 assert(last < LP_MAX_INLINED_TEMPS);
2955 for (idx = first; idx <= last; ++idx) {
2956 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
2957 bld->temps[idx][i] = lp_build_alloca(gallivm, vec_type, "temp");
2958 }
2959 }
2960 break;
2961
2962 case TGSI_FILE_OUTPUT:
2963 if (!(bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
2964 for (idx = first; idx <= last; ++idx) {
2965 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
2966 bld->outputs[idx][i] = lp_build_alloca(gallivm,
2967 vec_type, "output");
2968 }
2969 }
2970 break;
2971
2972 case TGSI_FILE_ADDRESS:
2973 /* ADDR registers are only allocated with an integer LLVM IR type,
2974 * as they are guaranteed to always have integers.
2975 * XXX: Not sure if this exception is worthwhile (or the whole idea of
2976 * an ADDR register for that matter).
2977 */
2978 assert(last < LP_MAX_TGSI_ADDRS);
2979 for (idx = first; idx <= last; ++idx) {
2980 assert(idx < LP_MAX_TGSI_ADDRS);
2981 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
2982 bld->addr[idx][i] = lp_build_alloca(gallivm, bld_base->base.int_vec_type, "addr");
2983 }
2984 break;
2985
2986 case TGSI_FILE_PREDICATE:
2987 assert(last < LP_MAX_TGSI_PREDS);
2988 for (idx = first; idx <= last; ++idx) {
2989 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
2990 bld->preds[idx][i] = lp_build_alloca(gallivm, vec_type,
2991 "predicate");
2992 }
2993 break;
2994
2995 case TGSI_FILE_SAMPLER_VIEW:
2996 /*
2997 * The target stored here MUST match whatever there actually
2998 * is in the set sampler views (what about return type?).
2999 */
3000 assert(last < PIPE_MAX_SHADER_SAMPLER_VIEWS);
3001 for (idx = first; idx <= last; ++idx) {
3002 bld->sv[idx] = decl->SamplerView;
3003 }
3004 break;
3005
3006 case TGSI_FILE_CONSTANT:
3007 {
3008 /*
3009 * We could trivially fetch the per-buffer pointer when fetching the
3010 * constant, relying on llvm to figure out it's always the same pointer
3011 * anyway. However, doing so results in a huge (more than factor of 10)
3012 * slowdown in llvm compilation times for some (but not all) shaders
3013 * (more specifically, the IR optimization spends way more time in
3014 * DominatorTree::dominates). At least with llvm versions 3.1, 3.3.
3015 */
3016 unsigned idx2D = decl->Dim.Index2D;
3017 LLVMValueRef index2D = lp_build_const_int32(gallivm, idx2D);
3018 assert(idx2D < LP_MAX_TGSI_CONST_BUFFERS);
3019 bld->consts[idx2D] =
3020 lp_build_array_get(gallivm, bld->consts_ptr, index2D);
3021 bld->consts_sizes[idx2D] =
3022 lp_build_array_get(gallivm, bld->const_sizes_ptr, index2D);
3023 }
3024 break;
3025
3026 default:
3027 /* don't need to declare other vars */
3028 break;
3029 }
3030 }
3031
3032
3033 void lp_emit_immediate_soa(
3034 struct lp_build_tgsi_context *bld_base,
3035 const struct tgsi_full_immediate *imm)
3036 {
3037 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
3038 struct gallivm_state * gallivm = bld_base->base.gallivm;
3039 LLVMValueRef imms[4];
3040 unsigned i;
3041 const uint size = imm->Immediate.NrTokens - 1;
3042 assert(size <= 4);
3043 switch (imm->Immediate.DataType) {
3044 case TGSI_IMM_FLOAT32:
3045 for( i = 0; i < size; ++i )
3046 imms[i] =
3047 lp_build_const_vec(gallivm, bld_base->base.type, imm->u[i].Float);
3048
3049 break;
3050 case TGSI_IMM_FLOAT64:
3051 case TGSI_IMM_UINT32:
3052 for( i = 0; i < size; ++i ) {
3053 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->uint_bld.type, imm->u[i].Uint);
3054 imms[i] = LLVMConstBitCast(tmp, bld_base->base.vec_type);
3055 }
3056
3057 break;
3058 case TGSI_IMM_INT32:
3059 for( i = 0; i < size; ++i ) {
3060 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->int_bld.type, imm->u[i].Int);
3061 imms[i] = LLVMConstBitCast(tmp, bld_base->base.vec_type);
3062 }
3063
3064 break;
3065 }
3066 for( i = size; i < 4; ++i )
3067 imms[i] = bld_base->base.undef;
3068
3069 if (bld->use_immediates_array) {
3070 unsigned index = bld->num_immediates;
3071 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
3072 LLVMBuilderRef builder = gallivm->builder;
3073
3074 assert(bld->indirect_files & (1 << TGSI_FILE_IMMEDIATE));
3075 for (i = 0; i < 4; ++i ) {
3076 LLVMValueRef lindex = lp_build_const_int32(
3077 bld->bld_base.base.gallivm, index * 4 + i);
3078 LLVMValueRef imm_ptr = LLVMBuildGEP(builder,
3079 bld->imms_array, &lindex, 1, "");
3080 LLVMBuildStore(builder, imms[i], imm_ptr);
3081 }
3082 } else {
3083 /* simply copy the immediate values into the next immediates[] slot */
3084 unsigned i;
3085 assert(imm->Immediate.NrTokens - 1 <= 4);
3086 assert(bld->num_immediates < LP_MAX_INLINED_IMMEDIATES);
3087
3088 for(i = 0; i < 4; ++i )
3089 bld->immediates[bld->num_immediates][i] = imms[i];
3090
3091 if (bld->indirect_files & (1 << TGSI_FILE_IMMEDIATE)) {
3092 unsigned index = bld->num_immediates;
3093 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
3094 LLVMBuilderRef builder = gallivm->builder;
3095 for (i = 0; i < 4; ++i ) {
3096 LLVMValueRef lindex = lp_build_const_int32(
3097 bld->bld_base.base.gallivm, index * 4 + i);
3098 LLVMValueRef imm_ptr = LLVMBuildGEP(builder,
3099 bld->imms_array, &lindex, 1, "");
3100 LLVMBuildStore(builder,
3101 bld->immediates[index][i],
3102 imm_ptr);
3103 }
3104 }
3105 }
3106
3107 bld->num_immediates++;
3108 }
3109
3110 static void
3111 ddx_emit(
3112 const struct lp_build_tgsi_action * action,
3113 struct lp_build_tgsi_context * bld_base,
3114 struct lp_build_emit_data * emit_data)
3115 {
3116 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3117
3118 emit_fetch_deriv(bld, emit_data->args[0], NULL,
3119 &emit_data->output[emit_data->chan], NULL);
3120 }
3121
3122 static void
3123 ddy_emit(
3124 const struct lp_build_tgsi_action * action,
3125 struct lp_build_tgsi_context * bld_base,
3126 struct lp_build_emit_data * emit_data)
3127 {
3128 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3129
3130 emit_fetch_deriv(bld, emit_data->args[0], NULL, NULL,
3131 &emit_data->output[emit_data->chan]);
3132 }
3133
3134 static void
3135 kill_emit(
3136 const struct lp_build_tgsi_action * action,
3137 struct lp_build_tgsi_context * bld_base,
3138 struct lp_build_emit_data * emit_data)
3139 {
3140 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3141
3142 emit_kill(bld, bld_base->pc - 1);
3143 }
3144
3145 static void
3146 kill_if_emit(
3147 const struct lp_build_tgsi_action * action,
3148 struct lp_build_tgsi_context * bld_base,
3149 struct lp_build_emit_data * emit_data)
3150 {
3151 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3152
3153 emit_kill_if(bld, emit_data->inst, bld_base->pc - 1);
3154 }
3155
3156 static void
3157 tex_emit(
3158 const struct lp_build_tgsi_action * action,
3159 struct lp_build_tgsi_context * bld_base,
3160 struct lp_build_emit_data * emit_data)
3161 {
3162 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3163
3164 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
3165 emit_data->output, 1, LP_SAMPLER_OP_TEXTURE);
3166 }
3167
3168 static void
3169 tex2_emit(
3170 const struct lp_build_tgsi_action * action,
3171 struct lp_build_tgsi_context * bld_base,
3172 struct lp_build_emit_data * emit_data)
3173 {
3174 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3175
3176 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
3177 emit_data->output, 2, LP_SAMPLER_OP_TEXTURE);
3178 }
3179
3180 static void
3181 txb_emit(
3182 const struct lp_build_tgsi_action * action,
3183 struct lp_build_tgsi_context * bld_base,
3184 struct lp_build_emit_data * emit_data)
3185 {
3186 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3187
3188 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
3189 emit_data->output, 1, LP_SAMPLER_OP_TEXTURE);
3190 }
3191
3192 static void
3193 txb2_emit(
3194 const struct lp_build_tgsi_action * action,
3195 struct lp_build_tgsi_context * bld_base,
3196 struct lp_build_emit_data * emit_data)
3197 {
3198 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3199
3200 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
3201 emit_data->output, 2, LP_SAMPLER_OP_TEXTURE);
3202 }
3203
3204 static void
3205 txd_emit(
3206 const struct lp_build_tgsi_action * action,
3207 struct lp_build_tgsi_context * bld_base,
3208 struct lp_build_emit_data * emit_data)
3209 {
3210 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3211
3212 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
3213 emit_data->output, 3, LP_SAMPLER_OP_TEXTURE);
3214 }
3215
3216 static void
3217 txl_emit(
3218 const struct lp_build_tgsi_action * action,
3219 struct lp_build_tgsi_context * bld_base,
3220 struct lp_build_emit_data * emit_data)
3221 {
3222 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3223
3224 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
3225 emit_data->output, 1, LP_SAMPLER_OP_TEXTURE);
3226 }
3227
3228 static void
3229 txl2_emit(
3230 const struct lp_build_tgsi_action * action,
3231 struct lp_build_tgsi_context * bld_base,
3232 struct lp_build_emit_data * emit_data)
3233 {
3234 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3235
3236 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
3237 emit_data->output, 2, LP_SAMPLER_OP_TEXTURE);
3238 }
3239
3240 static void
3241 txp_emit(
3242 const struct lp_build_tgsi_action * action,
3243 struct lp_build_tgsi_context * bld_base,
3244 struct lp_build_emit_data * emit_data)
3245 {
3246 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3247
3248 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_PROJECTED,
3249 emit_data->output, 1, LP_SAMPLER_OP_TEXTURE);
3250 }
3251
3252 static void
3253 tg4_emit(
3254 const struct lp_build_tgsi_action * action,
3255 struct lp_build_tgsi_context * bld_base,
3256 struct lp_build_emit_data * emit_data)
3257 {
3258 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3259
3260 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
3261 emit_data->output, 2, LP_SAMPLER_OP_GATHER);
3262 }
3263
3264 static void
3265 txq_emit(
3266 const struct lp_build_tgsi_action * action,
3267 struct lp_build_tgsi_context * bld_base,
3268 struct lp_build_emit_data * emit_data)
3269 {
3270 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3271
3272 emit_size_query(bld, emit_data->inst, emit_data->output, FALSE);
3273 }
3274
3275 static void
3276 txf_emit(
3277 const struct lp_build_tgsi_action * action,
3278 struct lp_build_tgsi_context * bld_base,
3279 struct lp_build_emit_data * emit_data)
3280 {
3281 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3282
3283 emit_fetch_texels(bld, emit_data->inst, emit_data->output, FALSE);
3284 }
3285
3286 static void
3287 sample_i_emit(
3288 const struct lp_build_tgsi_action * action,
3289 struct lp_build_tgsi_context * bld_base,
3290 struct lp_build_emit_data * emit_data)
3291 {
3292 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3293
3294 emit_fetch_texels(bld, emit_data->inst, emit_data->output, TRUE);
3295 }
3296
3297 static void
3298 sample_emit(
3299 const struct lp_build_tgsi_action * action,
3300 struct lp_build_tgsi_context * bld_base,
3301 struct lp_build_emit_data * emit_data)
3302 {
3303 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3304
3305 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
3306 FALSE, emit_data->output);
3307 }
3308
3309 static void
3310 sample_b_emit(
3311 const struct lp_build_tgsi_action * action,
3312 struct lp_build_tgsi_context * bld_base,
3313 struct lp_build_emit_data * emit_data)
3314 {
3315 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3316
3317 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
3318 FALSE, emit_data->output);
3319 }
3320
3321 static void
3322 sample_c_emit(
3323 const struct lp_build_tgsi_action * action,
3324 struct lp_build_tgsi_context * bld_base,
3325 struct lp_build_emit_data * emit_data)
3326 {
3327 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3328
3329 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
3330 TRUE, emit_data->output);
3331 }
3332
3333 static void
3334 sample_c_lz_emit(
3335 const struct lp_build_tgsi_action * action,
3336 struct lp_build_tgsi_context * bld_base,
3337 struct lp_build_emit_data * emit_data)
3338 {
3339 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3340
3341 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_ZERO,
3342 TRUE, emit_data->output);
3343 }
3344
3345 static void
3346 sample_d_emit(
3347 const struct lp_build_tgsi_action * action,
3348 struct lp_build_tgsi_context * bld_base,
3349 struct lp_build_emit_data * emit_data)
3350 {
3351 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3352
3353 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
3354 FALSE, emit_data->output);
3355 }
3356
3357 static void
3358 sample_l_emit(
3359 const struct lp_build_tgsi_action * action,
3360 struct lp_build_tgsi_context * bld_base,
3361 struct lp_build_emit_data * emit_data)
3362 {
3363 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3364
3365 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
3366 FALSE, emit_data->output);
3367 }
3368
3369 static void
3370 sviewinfo_emit(
3371 const struct lp_build_tgsi_action * action,
3372 struct lp_build_tgsi_context * bld_base,
3373 struct lp_build_emit_data * emit_data)
3374 {
3375 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3376
3377 emit_size_query(bld, emit_data->inst, emit_data->output, TRUE);
3378 }
3379
3380 static LLVMValueRef
3381 mask_vec(struct lp_build_tgsi_context *bld_base)
3382 {
3383 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3384 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
3385 struct lp_exec_mask *exec_mask = &bld->exec_mask;
3386
3387 if (!exec_mask->has_mask) {
3388 return lp_build_mask_value(bld->mask);
3389 }
3390 return LLVMBuildAnd(builder, lp_build_mask_value(bld->mask),
3391 exec_mask->exec_mask, "");
3392 }
3393
3394 static void
3395 increment_vec_ptr_by_mask(struct lp_build_tgsi_context * bld_base,
3396 LLVMValueRef ptr,
3397 LLVMValueRef mask)
3398 {
3399 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3400 LLVMValueRef current_vec = LLVMBuildLoad(builder, ptr, "");
3401
3402 current_vec = LLVMBuildSub(builder, current_vec, mask, "");
3403
3404 LLVMBuildStore(builder, current_vec, ptr);
3405 }
3406
3407 static void
3408 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context * bld_base,
3409 LLVMValueRef ptr,
3410 LLVMValueRef mask)
3411 {
3412 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3413 LLVMValueRef current_vec = LLVMBuildLoad(builder, ptr, "");
3414
3415 current_vec = lp_build_select(&bld_base->uint_bld,
3416 mask,
3417 bld_base->uint_bld.zero,
3418 current_vec);
3419
3420 LLVMBuildStore(builder, current_vec, ptr);
3421 }
3422
3423 static LLVMValueRef
3424 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context * bld,
3425 LLVMValueRef current_mask_vec,
3426 LLVMValueRef total_emitted_vertices_vec)
3427 {
3428 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
3429 struct lp_build_context *int_bld = &bld->bld_base.int_bld;
3430 LLVMValueRef max_mask = lp_build_cmp(int_bld, PIPE_FUNC_LESS,
3431 total_emitted_vertices_vec,
3432 bld->max_output_vertices_vec);
3433
3434 return LLVMBuildAnd(builder, current_mask_vec, max_mask, "");
3435 }
3436
3437 static void
3438 emit_vertex(
3439 const struct lp_build_tgsi_action * action,
3440 struct lp_build_tgsi_context * bld_base,
3441 struct lp_build_emit_data * emit_data)
3442 {
3443 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3444 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
3445
3446 if (bld->gs_iface->emit_vertex) {
3447 LLVMValueRef mask = mask_vec(bld_base);
3448 LLVMValueRef total_emitted_vertices_vec =
3449 LLVMBuildLoad(builder, bld->total_emitted_vertices_vec_ptr, "");
3450 mask = clamp_mask_to_max_output_vertices(bld, mask,
3451 total_emitted_vertices_vec);
3452 gather_outputs(bld);
3453 bld->gs_iface->emit_vertex(bld->gs_iface, &bld->bld_base,
3454 bld->outputs,
3455 total_emitted_vertices_vec);
3456 increment_vec_ptr_by_mask(bld_base, bld->emitted_vertices_vec_ptr,
3457 mask);
3458 increment_vec_ptr_by_mask(bld_base, bld->total_emitted_vertices_vec_ptr,
3459 mask);
3460 #if DUMP_GS_EMITS
3461 lp_build_print_value(bld->bld_base.base.gallivm,
3462 " +++ emit vertex masked ones = ",
3463 mask);
3464 lp_build_print_value(bld->bld_base.base.gallivm,
3465 " +++ emit vertex emitted = ",
3466 total_emitted_vertices_vec);
3467 #endif
3468 }
3469 }
3470
3471
3472 static void
3473 end_primitive_masked(struct lp_build_tgsi_context * bld_base,
3474 LLVMValueRef mask)
3475 {
3476 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3477 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
3478
3479 if (bld->gs_iface->end_primitive) {
3480 struct lp_build_context *uint_bld = &bld_base->uint_bld;
3481 LLVMValueRef emitted_vertices_vec =
3482 LLVMBuildLoad(builder, bld->emitted_vertices_vec_ptr, "");
3483 LLVMValueRef emitted_prims_vec =
3484 LLVMBuildLoad(builder, bld->emitted_prims_vec_ptr, "");
3485
3486 LLVMValueRef emitted_mask = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
3487 emitted_vertices_vec,
3488 uint_bld->zero);
3489 /* We need to combine the current execution mask with the mask
3490 telling us which, if any, execution slots actually have
3491 unemitted primitives, this way we make sure that end_primitives
3492 executes only on the paths that have unflushed vertices */
3493 mask = LLVMBuildAnd(builder, mask, emitted_mask, "");
3494
3495 bld->gs_iface->end_primitive(bld->gs_iface, &bld->bld_base,
3496 emitted_vertices_vec,
3497 emitted_prims_vec);
3498
3499 #if DUMP_GS_EMITS
3500 lp_build_print_value(bld->bld_base.base.gallivm,
3501 " +++ end prim masked ones = ",
3502 mask);
3503 lp_build_print_value(bld->bld_base.base.gallivm,
3504 " +++ end prim emitted verts1 = ",
3505 emitted_vertices_vec);
3506 lp_build_print_value(bld->bld_base.base.gallivm,
3507 " +++ end prim emitted prims1 = ",
3508 LLVMBuildLoad(builder,
3509 bld->emitted_prims_vec_ptr, ""));
3510 #endif
3511 increment_vec_ptr_by_mask(bld_base, bld->emitted_prims_vec_ptr,
3512 mask);
3513 clear_uint_vec_ptr_from_mask(bld_base, bld->emitted_vertices_vec_ptr,
3514 mask);
3515 #if DUMP_GS_EMITS
3516 lp_build_print_value(bld->bld_base.base.gallivm,
3517 " +++ end prim emitted verts2 = ",
3518 LLVMBuildLoad(builder,
3519 bld->emitted_vertices_vec_ptr, ""));
3520 #endif
3521 }
3522
3523 }
3524
3525 static void
3526 end_primitive(
3527 const struct lp_build_tgsi_action * action,
3528 struct lp_build_tgsi_context * bld_base,
3529 struct lp_build_emit_data * emit_data)
3530 {
3531 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3532
3533 if (bld->gs_iface->end_primitive) {
3534 LLVMValueRef mask = mask_vec(bld_base);
3535 end_primitive_masked(bld_base, mask);
3536 }
3537 }
3538
3539 static void
3540 cal_emit(
3541 const struct lp_build_tgsi_action * action,
3542 struct lp_build_tgsi_context * bld_base,
3543 struct lp_build_emit_data * emit_data)
3544 {
3545 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3546
3547 lp_exec_mask_call(&bld->exec_mask, emit_data->inst->Label.Label,
3548 &bld_base->pc);
3549 }
3550
3551 static void
3552 ret_emit(
3553 const struct lp_build_tgsi_action * action,
3554 struct lp_build_tgsi_context * bld_base,
3555 struct lp_build_emit_data * emit_data)
3556 {
3557 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3558
3559 lp_exec_mask_ret(&bld->exec_mask, &bld_base->pc);
3560 }
3561
3562 static void
3563 brk_emit(
3564 const struct lp_build_tgsi_action * action,
3565 struct lp_build_tgsi_context * bld_base,
3566 struct lp_build_emit_data * emit_data)
3567 {
3568 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3569
3570 lp_exec_break(&bld->exec_mask, bld_base);
3571 }
3572
3573 static void
3574 breakc_emit(
3575 const struct lp_build_tgsi_action * action,
3576 struct lp_build_tgsi_context * bld_base,
3577 struct lp_build_emit_data * emit_data)
3578 {
3579 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3580 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3581 struct lp_build_context *uint_bld = &bld_base->uint_bld;
3582 LLVMValueRef unsigned_cond =
3583 LLVMBuildBitCast(builder, emit_data->args[0], uint_bld->vec_type, "");
3584 LLVMValueRef cond = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
3585 unsigned_cond,
3586 uint_bld->zero);
3587
3588 lp_exec_break_condition(&bld->exec_mask, cond);
3589 }
3590
3591 static void
3592 if_emit(
3593 const struct lp_build_tgsi_action * action,
3594 struct lp_build_tgsi_context * bld_base,
3595 struct lp_build_emit_data * emit_data)
3596 {
3597 LLVMValueRef tmp;
3598 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3599
3600 tmp = lp_build_cmp(&bld_base->base, PIPE_FUNC_NOTEQUAL,
3601 emit_data->args[0], bld->bld_base.base.zero);
3602 lp_exec_mask_cond_push(&bld->exec_mask, tmp);
3603 }
3604
3605 static void
3606 uif_emit(
3607 const struct lp_build_tgsi_action * action,
3608 struct lp_build_tgsi_context * bld_base,
3609 struct lp_build_emit_data * emit_data)
3610 {
3611 LLVMValueRef tmp;
3612 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3613 struct lp_build_context *uint_bld = &bld_base->uint_bld;
3614
3615 tmp = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
3616 emit_data->args[0], uint_bld->zero);
3617 lp_exec_mask_cond_push(&bld->exec_mask, tmp);
3618 }
3619
3620 static void
3621 case_emit(
3622 const struct lp_build_tgsi_action * action,
3623 struct lp_build_tgsi_context * bld_base,
3624 struct lp_build_emit_data * emit_data)
3625 {
3626 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3627
3628 lp_exec_case(&bld->exec_mask, emit_data->args[0]);
3629 }
3630
3631 static void
3632 default_emit(
3633 const struct lp_build_tgsi_action * action,
3634 struct lp_build_tgsi_context * bld_base,
3635 struct lp_build_emit_data * emit_data)
3636 {
3637 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3638
3639 lp_exec_default(&bld->exec_mask, bld_base);
3640 }
3641
3642 static void
3643 switch_emit(
3644 const struct lp_build_tgsi_action * action,
3645 struct lp_build_tgsi_context * bld_base,
3646 struct lp_build_emit_data * emit_data)
3647 {
3648 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3649
3650 lp_exec_switch(&bld->exec_mask, emit_data->args[0]);
3651 }
3652
3653 static void
3654 endswitch_emit(
3655 const struct lp_build_tgsi_action * action,
3656 struct lp_build_tgsi_context * bld_base,
3657 struct lp_build_emit_data * emit_data)
3658 {
3659 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3660
3661 lp_exec_endswitch(&bld->exec_mask, bld_base);
3662 }
3663
3664 static void
3665 bgnloop_emit(
3666 const struct lp_build_tgsi_action * action,
3667 struct lp_build_tgsi_context * bld_base,
3668 struct lp_build_emit_data * emit_data)
3669 {
3670 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3671
3672 lp_exec_bgnloop(&bld->exec_mask);
3673 }
3674
3675 static void
3676 bgnsub_emit(
3677 const struct lp_build_tgsi_action * action,
3678 struct lp_build_tgsi_context * bld_base,
3679 struct lp_build_emit_data * emit_data)
3680 {
3681 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3682
3683 lp_exec_mask_bgnsub(&bld->exec_mask);
3684 }
3685
3686 static void
3687 else_emit(
3688 const struct lp_build_tgsi_action * action,
3689 struct lp_build_tgsi_context * bld_base,
3690 struct lp_build_emit_data * emit_data)
3691 {
3692 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3693
3694 lp_exec_mask_cond_invert(&bld->exec_mask);
3695 }
3696
3697 static void
3698 endif_emit(
3699 const struct lp_build_tgsi_action * action,
3700 struct lp_build_tgsi_context * bld_base,
3701 struct lp_build_emit_data * emit_data)
3702 {
3703 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3704
3705 lp_exec_mask_cond_pop(&bld->exec_mask);
3706 }
3707
3708 static void
3709 endloop_emit(
3710 const struct lp_build_tgsi_action * action,
3711 struct lp_build_tgsi_context * bld_base,
3712 struct lp_build_emit_data * emit_data)
3713 {
3714 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3715
3716 lp_exec_endloop(bld_base->base.gallivm, &bld->exec_mask);
3717 }
3718
3719 static void
3720 endsub_emit(
3721 const struct lp_build_tgsi_action * action,
3722 struct lp_build_tgsi_context * bld_base,
3723 struct lp_build_emit_data * emit_data)
3724 {
3725 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3726
3727 lp_exec_mask_endsub(&bld->exec_mask, &bld_base->pc);
3728 }
3729
3730 static void
3731 cont_emit(
3732 const struct lp_build_tgsi_action * action,
3733 struct lp_build_tgsi_context * bld_base,
3734 struct lp_build_emit_data * emit_data)
3735 {
3736 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3737
3738 lp_exec_continue(&bld->exec_mask);
3739 }
3740
3741 static void emit_prologue(struct lp_build_tgsi_context * bld_base)
3742 {
3743 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3744 struct gallivm_state * gallivm = bld_base->base.gallivm;
3745
3746 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
3747 LLVMValueRef array_size =
3748 lp_build_const_int32(gallivm,
3749 bld_base->info->file_max[TGSI_FILE_TEMPORARY] * 4 + 4);
3750 bld->temps_array = lp_build_array_alloca(gallivm,
3751 bld_base->base.vec_type, array_size,
3752 "temp_array");
3753 }
3754
3755 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
3756 LLVMValueRef array_size =
3757 lp_build_const_int32(gallivm,
3758 bld_base->info->file_max[TGSI_FILE_OUTPUT] * 4 + 4);
3759 bld->outputs_array = lp_build_array_alloca(gallivm,
3760 bld_base->base.vec_type, array_size,
3761 "output_array");
3762 }
3763
3764 if (bld->indirect_files & (1 << TGSI_FILE_IMMEDIATE)) {
3765 LLVMValueRef array_size =
3766 lp_build_const_int32(gallivm,
3767 bld_base->info->file_max[TGSI_FILE_IMMEDIATE] * 4 + 4);
3768 bld->imms_array = lp_build_array_alloca(gallivm,
3769 bld_base->base.vec_type, array_size,
3770 "imms_array");
3771 }
3772
3773 /* If we have indirect addressing in inputs we need to copy them into
3774 * our alloca array to be able to iterate over them */
3775 if (bld->indirect_files & (1 << TGSI_FILE_INPUT) && !bld->gs_iface) {
3776 unsigned index, chan;
3777 LLVMTypeRef vec_type = bld_base->base.vec_type;
3778 LLVMValueRef array_size = lp_build_const_int32(gallivm,
3779 bld_base->info->file_max[TGSI_FILE_INPUT]*4 + 4);
3780 bld->inputs_array = lp_build_array_alloca(gallivm,
3781 vec_type, array_size,
3782 "input_array");
3783
3784 assert(bld_base->info->num_inputs
3785 <= bld_base->info->file_max[TGSI_FILE_INPUT] + 1);
3786
3787 for (index = 0; index < bld_base->info->num_inputs; ++index) {
3788 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
3789 LLVMValueRef lindex =
3790 lp_build_const_int32(gallivm, index * 4 + chan);
3791 LLVMValueRef input_ptr =
3792 LLVMBuildGEP(gallivm->builder, bld->inputs_array,
3793 &lindex, 1, "");
3794 LLVMValueRef value = bld->inputs[index][chan];
3795 if (value)
3796 LLVMBuildStore(gallivm->builder, value, input_ptr);
3797 }
3798 }
3799 }
3800
3801 if (bld->gs_iface) {
3802 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
3803 bld->emitted_prims_vec_ptr =
3804 lp_build_alloca(gallivm,
3805 uint_bld->vec_type,
3806 "emitted_prims_ptr");
3807 bld->emitted_vertices_vec_ptr =
3808 lp_build_alloca(gallivm,
3809 uint_bld->vec_type,
3810 "emitted_vertices_ptr");
3811 bld->total_emitted_vertices_vec_ptr =
3812 lp_build_alloca(gallivm,
3813 uint_bld->vec_type,
3814 "total_emitted_vertices_ptr");
3815
3816 LLVMBuildStore(gallivm->builder, uint_bld->zero,
3817 bld->emitted_prims_vec_ptr);
3818 LLVMBuildStore(gallivm->builder, uint_bld->zero,
3819 bld->emitted_vertices_vec_ptr);
3820 LLVMBuildStore(gallivm->builder, uint_bld->zero,
3821 bld->total_emitted_vertices_vec_ptr);
3822 }
3823
3824 if (DEBUG_EXECUTION) {
3825 lp_build_printf(gallivm, "\n");
3826 emit_dump_file(bld, TGSI_FILE_CONSTANT);
3827 if (!bld->gs_iface)
3828 emit_dump_file(bld, TGSI_FILE_INPUT);
3829 }
3830 }
3831
3832 static void emit_epilogue(struct lp_build_tgsi_context * bld_base)
3833 {
3834 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
3835 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
3836
3837 if (DEBUG_EXECUTION) {
3838 /* for debugging */
3839 if (0) {
3840 emit_dump_file(bld, TGSI_FILE_TEMPORARY);
3841 }
3842 emit_dump_file(bld, TGSI_FILE_OUTPUT);
3843 lp_build_printf(bld_base->base.gallivm, "\n");
3844 }
3845
3846 /* If we have indirect addressing in outputs we need to copy our alloca array
3847 * to the outputs slots specified by the caller */
3848 if (bld->gs_iface) {
3849 LLVMValueRef total_emitted_vertices_vec;
3850 LLVMValueRef emitted_prims_vec;
3851 /* implicit end_primitives, needed in case there are any unflushed
3852 vertices in the cache. Note must not call end_primitive here
3853 since the exec_mask is not valid at this point. */
3854 end_primitive_masked(bld_base, lp_build_mask_value(bld->mask));
3855
3856 total_emitted_vertices_vec =
3857 LLVMBuildLoad(builder, bld->total_emitted_vertices_vec_ptr, "");
3858 emitted_prims_vec =
3859 LLVMBuildLoad(builder, bld->emitted_prims_vec_ptr, "");
3860
3861 bld->gs_iface->gs_epilogue(bld->gs_iface,
3862 &bld->bld_base,
3863 total_emitted_vertices_vec,
3864 emitted_prims_vec);
3865 } else {
3866 gather_outputs(bld);
3867 }
3868 }
3869
3870 void
3871 lp_build_tgsi_soa(struct gallivm_state *gallivm,
3872 const struct tgsi_token *tokens,
3873 struct lp_type type,
3874 struct lp_build_mask_context *mask,
3875 LLVMValueRef consts_ptr,
3876 LLVMValueRef const_sizes_ptr,
3877 const struct lp_bld_tgsi_system_values *system_values,
3878 const LLVMValueRef (*inputs)[TGSI_NUM_CHANNELS],
3879 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS],
3880 LLVMValueRef context_ptr,
3881 LLVMValueRef thread_data_ptr,
3882 struct lp_build_sampler_soa *sampler,
3883 const struct tgsi_shader_info *info,
3884 const struct lp_build_tgsi_gs_iface *gs_iface)
3885 {
3886 struct lp_build_tgsi_soa_context bld;
3887
3888 struct lp_type res_type;
3889
3890 assert(type.length <= LP_MAX_VECTOR_LENGTH);
3891 memset(&res_type, 0, sizeof res_type);
3892 res_type.width = type.width;
3893 res_type.length = type.length;
3894 res_type.sign = 1;
3895
3896 /* Setup build context */
3897 memset(&bld, 0, sizeof bld);
3898 lp_build_context_init(&bld.bld_base.base, gallivm, type);
3899 lp_build_context_init(&bld.bld_base.uint_bld, gallivm, lp_uint_type(type));
3900 lp_build_context_init(&bld.bld_base.int_bld, gallivm, lp_int_type(type));
3901 lp_build_context_init(&bld.elem_bld, gallivm, lp_elem_type(type));
3902 {
3903 struct lp_type dbl_type;
3904 dbl_type = type;
3905 dbl_type.width *= 2;
3906 lp_build_context_init(&bld.bld_base.dbl_bld, gallivm, dbl_type);
3907 }
3908 bld.mask = mask;
3909 bld.inputs = inputs;
3910 bld.outputs = outputs;
3911 bld.consts_ptr = consts_ptr;
3912 bld.const_sizes_ptr = const_sizes_ptr;
3913 bld.sampler = sampler;
3914 bld.bld_base.info = info;
3915 bld.indirect_files = info->indirect_files;
3916 bld.context_ptr = context_ptr;
3917 bld.thread_data_ptr = thread_data_ptr;
3918
3919 /*
3920 * If the number of temporaries is rather large then we just
3921 * allocate them as an array right from the start and treat
3922 * like indirect temporaries.
3923 */
3924 if (info->file_max[TGSI_FILE_TEMPORARY] >= LP_MAX_INLINED_TEMPS) {
3925 bld.indirect_files |= (1 << TGSI_FILE_TEMPORARY);
3926 }
3927 /*
3928 * For performance reason immediates are always backed in a static
3929 * array, but if their number is too great, we have to use just
3930 * a dynamically allocated array.
3931 */
3932 bld.use_immediates_array =
3933 (info->file_max[TGSI_FILE_IMMEDIATE] >= LP_MAX_INLINED_IMMEDIATES);
3934 if (bld.use_immediates_array) {
3935 bld.indirect_files |= (1 << TGSI_FILE_IMMEDIATE);
3936 }
3937
3938
3939 bld.bld_base.soa = TRUE;
3940 bld.bld_base.emit_debug = emit_debug;
3941 bld.bld_base.emit_fetch_funcs[TGSI_FILE_CONSTANT] = emit_fetch_constant;
3942 bld.bld_base.emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = emit_fetch_immediate;
3943 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_input;
3944 bld.bld_base.emit_fetch_funcs[TGSI_FILE_TEMPORARY] = emit_fetch_temporary;
3945 bld.bld_base.emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = emit_fetch_system_value;
3946 bld.bld_base.emit_store = emit_store;
3947
3948 bld.bld_base.emit_declaration = lp_emit_declaration_soa;
3949 bld.bld_base.emit_immediate = lp_emit_immediate_soa;
3950
3951 bld.bld_base.emit_prologue = emit_prologue;
3952 bld.bld_base.emit_epilogue = emit_epilogue;
3953
3954 /* Set opcode actions */
3955 lp_set_default_actions_cpu(&bld.bld_base);
3956
3957 bld.bld_base.op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
3958 bld.bld_base.op_actions[TGSI_OPCODE_BGNSUB].emit = bgnsub_emit;
3959 bld.bld_base.op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
3960 bld.bld_base.op_actions[TGSI_OPCODE_BREAKC].emit = breakc_emit;
3961 bld.bld_base.op_actions[TGSI_OPCODE_CAL].emit = cal_emit;
3962 bld.bld_base.op_actions[TGSI_OPCODE_CASE].emit = case_emit;
3963 bld.bld_base.op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
3964 bld.bld_base.op_actions[TGSI_OPCODE_DDX].emit = ddx_emit;
3965 bld.bld_base.op_actions[TGSI_OPCODE_DDY].emit = ddy_emit;
3966 bld.bld_base.op_actions[TGSI_OPCODE_DEFAULT].emit = default_emit;
3967 bld.bld_base.op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
3968 bld.bld_base.op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
3969 bld.bld_base.op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
3970 bld.bld_base.op_actions[TGSI_OPCODE_ENDSUB].emit = endsub_emit;
3971 bld.bld_base.op_actions[TGSI_OPCODE_ENDSWITCH].emit = endswitch_emit;
3972 bld.bld_base.op_actions[TGSI_OPCODE_IF].emit = if_emit;
3973 bld.bld_base.op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
3974 bld.bld_base.op_actions[TGSI_OPCODE_KILL_IF].emit = kill_if_emit;
3975 bld.bld_base.op_actions[TGSI_OPCODE_KILL].emit = kill_emit;
3976 bld.bld_base.op_actions[TGSI_OPCODE_RET].emit = ret_emit;
3977 bld.bld_base.op_actions[TGSI_OPCODE_SWITCH].emit = switch_emit;
3978 bld.bld_base.op_actions[TGSI_OPCODE_TEX].emit = tex_emit;
3979 bld.bld_base.op_actions[TGSI_OPCODE_TXB].emit = txb_emit;
3980 bld.bld_base.op_actions[TGSI_OPCODE_TXD].emit = txd_emit;
3981 bld.bld_base.op_actions[TGSI_OPCODE_TXL].emit = txl_emit;
3982 bld.bld_base.op_actions[TGSI_OPCODE_TXP].emit = txp_emit;
3983 bld.bld_base.op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
3984 bld.bld_base.op_actions[TGSI_OPCODE_TXF].emit = txf_emit;
3985 bld.bld_base.op_actions[TGSI_OPCODE_TEX2].emit = tex2_emit;
3986 bld.bld_base.op_actions[TGSI_OPCODE_TXB2].emit = txb2_emit;
3987 bld.bld_base.op_actions[TGSI_OPCODE_TXL2].emit = txl2_emit;
3988 bld.bld_base.op_actions[TGSI_OPCODE_TG4].emit = tg4_emit;
3989 /* DX10 sampling ops */
3990 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE].emit = sample_emit;
3991 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_B].emit = sample_b_emit;
3992 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C].emit = sample_c_emit;
3993 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C_LZ].emit = sample_c_lz_emit;
3994 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_D].emit = sample_d_emit;
3995 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_I].emit = sample_i_emit;
3996 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_I_MS].emit = sample_i_emit;
3997 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_L].emit = sample_l_emit;
3998 bld.bld_base.op_actions[TGSI_OPCODE_SVIEWINFO].emit = sviewinfo_emit;
3999
4000 if (gs_iface) {
4001 /* There's no specific value for this because it should always
4002 * be set, but apps using ext_geometry_shader4 quite often
4003 * were forgetting so we're using MAX_VERTEX_VARYING from
4004 * that spec even though we could debug_assert if it's not
4005 * set, but that's a lot uglier. */
4006 uint max_output_vertices;
4007
4008 /* inputs are always indirect with gs */
4009 bld.indirect_files |= (1 << TGSI_FILE_INPUT);
4010 bld.gs_iface = gs_iface;
4011 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_gs_input;
4012 bld.bld_base.op_actions[TGSI_OPCODE_EMIT].emit = emit_vertex;
4013 bld.bld_base.op_actions[TGSI_OPCODE_ENDPRIM].emit = end_primitive;
4014
4015 max_output_vertices =
4016 info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
4017 if (!max_output_vertices)
4018 max_output_vertices = 32;
4019
4020 bld.max_output_vertices_vec =
4021 lp_build_const_int_vec(gallivm, bld.bld_base.int_bld.type,
4022 max_output_vertices);
4023 }
4024
4025 lp_exec_mask_init(&bld.exec_mask, &bld.bld_base.int_bld);
4026
4027 bld.system_values = *system_values;
4028
4029 lp_build_tgsi_llvm(&bld.bld_base, tokens);
4030
4031 if (0) {
4032 LLVMBasicBlockRef block = LLVMGetInsertBlock(gallivm->builder);
4033 LLVMValueRef function = LLVMGetBasicBlockParent(block);
4034 debug_printf("11111111111111111111111111111 \n");
4035 tgsi_dump(tokens, 0);
4036 lp_debug_dump_value(function);
4037 debug_printf("2222222222222222222222222222 \n");
4038 }
4039
4040 if (0) {
4041 LLVMModuleRef module = LLVMGetGlobalParent(
4042 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm->builder)));
4043 LLVMDumpModule(module);
4044
4045 }
4046 lp_exec_mask_fini(&bld.exec_mask);
4047 }