gallivm: use uint build context for mask instead of float
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_tgsi_action.h"
51 #include "lp_bld_type.h"
52 #include "lp_bld_const.h"
53 #include "lp_bld_arit.h"
54 #include "lp_bld_bitarit.h"
55 #include "lp_bld_gather.h"
56 #include "lp_bld_init.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_swizzle.h"
59 #include "lp_bld_flow.h"
60 #include "lp_bld_quad.h"
61 #include "lp_bld_tgsi.h"
62 #include "lp_bld_limits.h"
63 #include "lp_bld_debug.h"
64 #include "lp_bld_printf.h"
65 #include "lp_bld_sample.h"
66 #include "lp_bld_struct.h"
67
68 #define DUMP_GS_EMITS 0
69
70 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
71 {
72 LLVMTypeRef int_type = LLVMInt32TypeInContext(bld->gallivm->context);
73 LLVMBuilderRef builder = bld->gallivm->builder;
74
75 mask->bld = bld;
76 mask->has_mask = FALSE;
77 mask->ret_in_main = FALSE;
78 mask->cond_stack_size = 0;
79 mask->loop_stack_size = 0;
80 mask->call_stack_size = 0;
81
82 mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
83 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
84 LLVMConstAllOnes(mask->int_vec_type);
85
86 mask->loop_limiter = lp_build_alloca(bld->gallivm, int_type, "looplimiter");
87
88 LLVMBuildStore(
89 builder,
90 LLVMConstInt(int_type, LP_MAX_TGSI_LOOP_ITERATIONS, false),
91 mask->loop_limiter);
92 }
93
94 static void lp_exec_mask_update(struct lp_exec_mask *mask)
95 {
96 LLVMBuilderRef builder = mask->bld->gallivm->builder;
97
98 if (mask->loop_stack_size) {
99 /*for loops we need to update the entire mask at runtime */
100 LLVMValueRef tmp;
101 assert(mask->break_mask);
102 tmp = LLVMBuildAnd(builder,
103 mask->cont_mask,
104 mask->break_mask,
105 "maskcb");
106 mask->exec_mask = LLVMBuildAnd(builder,
107 mask->cond_mask,
108 tmp,
109 "maskfull");
110 } else
111 mask->exec_mask = mask->cond_mask;
112
113 if (mask->call_stack_size || mask->ret_in_main) {
114 mask->exec_mask = LLVMBuildAnd(builder,
115 mask->exec_mask,
116 mask->ret_mask,
117 "callmask");
118 }
119
120 mask->has_mask = (mask->cond_stack_size > 0 ||
121 mask->loop_stack_size > 0 ||
122 mask->call_stack_size > 0 ||
123 mask->ret_in_main);
124 }
125
126 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
127 LLVMValueRef val)
128 {
129 LLVMBuilderRef builder = mask->bld->gallivm->builder;
130
131 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
132 if (mask->cond_stack_size == 0) {
133 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
134 }
135 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
136 assert(LLVMTypeOf(val) == mask->int_vec_type);
137 mask->cond_mask = LLVMBuildAnd(builder,
138 mask->cond_mask,
139 val,
140 "");
141 lp_exec_mask_update(mask);
142 }
143
144 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
145 {
146 LLVMBuilderRef builder = mask->bld->gallivm->builder;
147 LLVMValueRef prev_mask;
148 LLVMValueRef inv_mask;
149
150 assert(mask->cond_stack_size);
151 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
152 if (mask->cond_stack_size == 1) {
153 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
154 }
155
156 inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
157
158 mask->cond_mask = LLVMBuildAnd(builder,
159 inv_mask,
160 prev_mask, "");
161 lp_exec_mask_update(mask);
162 }
163
164 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
165 {
166 assert(mask->cond_stack_size);
167 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
168 lp_exec_mask_update(mask);
169 }
170
171 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
172 {
173 LLVMBuilderRef builder = mask->bld->gallivm->builder;
174
175 if (mask->loop_stack_size == 0) {
176 assert(mask->loop_block == NULL);
177 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
178 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
179 assert(mask->break_var == NULL);
180 }
181
182 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
183
184 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
185 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
186 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
187 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
188 ++mask->loop_stack_size;
189
190 mask->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
191 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
192
193 mask->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
194
195 LLVMBuildBr(builder, mask->loop_block);
196 LLVMPositionBuilderAtEnd(builder, mask->loop_block);
197
198 mask->break_mask = LLVMBuildLoad(builder, mask->break_var, "");
199
200 lp_exec_mask_update(mask);
201 }
202
203 static void lp_exec_break(struct lp_exec_mask *mask)
204 {
205 LLVMBuilderRef builder = mask->bld->gallivm->builder;
206 LLVMValueRef exec_mask = LLVMBuildNot(builder,
207 mask->exec_mask,
208 "break");
209
210 mask->break_mask = LLVMBuildAnd(builder,
211 mask->break_mask,
212 exec_mask, "break_full");
213
214 lp_exec_mask_update(mask);
215 }
216
217 static void lp_exec_break_condition(struct lp_exec_mask *mask,
218 LLVMValueRef cond)
219 {
220 LLVMBuilderRef builder = mask->bld->gallivm->builder;
221 LLVMValueRef cond_mask = LLVMBuildAnd(builder,
222 mask->exec_mask,
223 cond, "cond_mask");
224 cond_mask = LLVMBuildNot(builder, cond_mask, "break_cond");
225
226 mask->break_mask = LLVMBuildAnd(builder,
227 mask->break_mask,
228 cond_mask, "breakc_full");
229
230 lp_exec_mask_update(mask);
231 }
232
233 static void lp_exec_continue(struct lp_exec_mask *mask)
234 {
235 LLVMBuilderRef builder = mask->bld->gallivm->builder;
236 LLVMValueRef exec_mask = LLVMBuildNot(builder,
237 mask->exec_mask,
238 "");
239
240 mask->cont_mask = LLVMBuildAnd(builder,
241 mask->cont_mask,
242 exec_mask, "");
243
244 lp_exec_mask_update(mask);
245 }
246
247
248 static void lp_exec_endloop(struct gallivm_state *gallivm,
249 struct lp_exec_mask *mask)
250 {
251 LLVMBuilderRef builder = mask->bld->gallivm->builder;
252 LLVMBasicBlockRef endloop;
253 LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
254 LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
255 mask->bld->type.width *
256 mask->bld->type.length);
257 LLVMValueRef i1cond, i2cond, icond, limiter;
258
259 assert(mask->break_mask);
260
261 /*
262 * Restore the cont_mask, but don't pop
263 */
264 assert(mask->loop_stack_size);
265 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
266 lp_exec_mask_update(mask);
267
268 /*
269 * Unlike the continue mask, the break_mask must be preserved across loop
270 * iterations
271 */
272 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
273
274 /* Decrement the loop limiter */
275 limiter = LLVMBuildLoad(builder, mask->loop_limiter, "");
276
277 limiter = LLVMBuildSub(
278 builder,
279 limiter,
280 LLVMConstInt(int_type, 1, false),
281 "");
282
283 LLVMBuildStore(builder, limiter, mask->loop_limiter);
284
285 /* i1cond = (mask != 0) */
286 i1cond = LLVMBuildICmp(
287 builder,
288 LLVMIntNE,
289 LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
290 LLVMConstNull(reg_type), "i1cond");
291
292 /* i2cond = (looplimiter > 0) */
293 i2cond = LLVMBuildICmp(
294 builder,
295 LLVMIntSGT,
296 limiter,
297 LLVMConstNull(int_type), "i2cond");
298
299 /* if( i1cond && i2cond ) */
300 icond = LLVMBuildAnd(builder, i1cond, i2cond, "");
301
302 endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
303
304 LLVMBuildCondBr(builder,
305 icond, mask->loop_block, endloop);
306
307 LLVMPositionBuilderAtEnd(builder, endloop);
308
309 assert(mask->loop_stack_size);
310 --mask->loop_stack_size;
311 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
312 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
313 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
314 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
315
316 lp_exec_mask_update(mask);
317 }
318
319 /* stores val into an address pointed to by dst.
320 * mask->exec_mask is used to figure out which bits of val
321 * should be stored into the address
322 * (0 means don't store this bit, 1 means do store).
323 */
324 static void lp_exec_mask_store(struct lp_exec_mask *mask,
325 struct lp_build_context *bld_store,
326 LLVMValueRef pred,
327 LLVMValueRef val,
328 LLVMValueRef dst)
329 {
330 LLVMBuilderRef builder = mask->bld->gallivm->builder;
331
332 /* Mix the predicate and execution mask */
333 if (mask->has_mask) {
334 if (pred) {
335 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
336 } else {
337 pred = mask->exec_mask;
338 }
339 }
340
341 if (pred) {
342 LLVMValueRef real_val, dst_val;
343
344 dst_val = LLVMBuildLoad(builder, dst, "");
345 real_val = lp_build_select(bld_store,
346 pred,
347 val, dst_val);
348
349 LLVMBuildStore(builder, real_val, dst);
350 } else
351 LLVMBuildStore(builder, val, dst);
352 }
353
354 static void lp_exec_mask_call(struct lp_exec_mask *mask,
355 int func,
356 int *pc)
357 {
358 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
359 mask->call_stack[mask->call_stack_size].pc = *pc;
360 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
361 mask->call_stack_size++;
362 *pc = func;
363 }
364
365 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
366 {
367 LLVMBuilderRef builder = mask->bld->gallivm->builder;
368 LLVMValueRef exec_mask;
369
370 if (mask->cond_stack_size == 0 &&
371 mask->loop_stack_size == 0 &&
372 mask->call_stack_size == 0) {
373 /* returning from main() */
374 *pc = -1;
375 return;
376 }
377
378 if (mask->call_stack_size == 0) {
379 /*
380 * This requires special handling since we need to ensure
381 * we don't drop the mask even if we have no call stack
382 * (e.g. after a ret in a if clause after the endif)
383 */
384 mask->ret_in_main = TRUE;
385 }
386
387 exec_mask = LLVMBuildNot(builder,
388 mask->exec_mask,
389 "ret");
390
391 mask->ret_mask = LLVMBuildAnd(builder,
392 mask->ret_mask,
393 exec_mask, "ret_full");
394
395 lp_exec_mask_update(mask);
396 }
397
398 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
399 {
400 }
401
402 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
403 {
404 assert(mask->call_stack_size);
405 mask->call_stack_size--;
406 *pc = mask->call_stack[mask->call_stack_size].pc;
407 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
408 lp_exec_mask_update(mask);
409 }
410
411
412 /**
413 * Return pointer to a temporary register channel (src or dest).
414 * Note that indirect addressing cannot be handled here.
415 * \param index which temporary register
416 * \param chan which channel of the temp register.
417 */
418 LLVMValueRef
419 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context *bld,
420 unsigned index,
421 unsigned chan)
422 {
423 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
424 assert(chan < 4);
425 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
426 LLVMValueRef lindex = lp_build_const_int32(bld->bld_base.base.gallivm, index * 4 + chan);
427 return LLVMBuildGEP(builder, bld->temps_array, &lindex, 1, "");
428 }
429 else {
430 return bld->temps[index][chan];
431 }
432 }
433
434 /**
435 * Return pointer to a output register channel (src or dest).
436 * Note that indirect addressing cannot be handled here.
437 * \param index which output register
438 * \param chan which channel of the output register.
439 */
440 LLVMValueRef
441 lp_get_output_ptr(struct lp_build_tgsi_soa_context *bld,
442 unsigned index,
443 unsigned chan)
444 {
445 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
446 assert(chan < 4);
447 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
448 LLVMValueRef lindex = lp_build_const_int32(bld->bld_base.base.gallivm,
449 index * 4 + chan);
450 return LLVMBuildGEP(builder, bld->outputs_array, &lindex, 1, "");
451 }
452 else {
453 return bld->outputs[index][chan];
454 }
455 }
456
457 /*
458 * If we have indirect addressing in outputs copy our alloca array
459 * to the outputs slots specified by the caller to make sure
460 * our outputs are delivered consistently via the same interface.
461 */
462 static void
463 gather_outputs(struct lp_build_tgsi_soa_context * bld)
464 {
465 if ((bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
466 unsigned index, chan;
467 assert(bld->bld_base.info->num_outputs <=
468 bld->bld_base.info->file_max[TGSI_FILE_OUTPUT] + 1);
469 for (index = 0; index < bld->bld_base.info->num_outputs; ++index) {
470 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
471 bld->outputs[index][chan] = lp_get_output_ptr(bld, index, chan);
472 }
473 }
474 }
475 }
476
477 /**
478 * Gather vector.
479 * XXX the lp_build_gather() function should be capable of doing this
480 * with a little work.
481 */
482 static LLVMValueRef
483 build_gather(struct lp_build_context *bld,
484 LLVMValueRef base_ptr,
485 LLVMValueRef indexes)
486 {
487 LLVMBuilderRef builder = bld->gallivm->builder;
488 LLVMValueRef res = bld->undef;
489 unsigned i;
490
491 /*
492 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
493 */
494 for (i = 0; i < bld->type.length; i++) {
495 LLVMValueRef ii = lp_build_const_int32(bld->gallivm, i);
496 LLVMValueRef index = LLVMBuildExtractElement(builder,
497 indexes, ii, "");
498 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr,
499 &index, 1, "gather_ptr");
500 LLVMValueRef scalar = LLVMBuildLoad(builder, scalar_ptr, "");
501
502 res = LLVMBuildInsertElement(builder, res, scalar, ii, "");
503 }
504
505 return res;
506 }
507
508
509 /**
510 * Scatter/store vector.
511 */
512 static void
513 emit_mask_scatter(struct lp_build_tgsi_soa_context *bld,
514 LLVMValueRef base_ptr,
515 LLVMValueRef indexes,
516 LLVMValueRef values,
517 struct lp_exec_mask *mask,
518 LLVMValueRef pred)
519 {
520 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
521 LLVMBuilderRef builder = gallivm->builder;
522 unsigned i;
523
524 /* Mix the predicate and execution mask */
525 if (mask->has_mask) {
526 if (pred) {
527 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
528 }
529 else {
530 pred = mask->exec_mask;
531 }
532 }
533
534 /*
535 * Loop over elements of index_vec, store scalar value.
536 */
537 for (i = 0; i < bld->bld_base.base.type.length; i++) {
538 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
539 LLVMValueRef index = LLVMBuildExtractElement(builder, indexes, ii, "");
540 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr, &index, 1, "scatter_ptr");
541 LLVMValueRef val = LLVMBuildExtractElement(builder, values, ii, "scatter_val");
542 LLVMValueRef scalar_pred = pred ?
543 LLVMBuildExtractElement(builder, pred, ii, "scatter_pred") : NULL;
544
545 if (0)
546 lp_build_printf(gallivm, "scatter %d: val %f at %d %p\n",
547 ii, val, index, scalar_ptr);
548
549 if (scalar_pred) {
550 LLVMValueRef real_val, dst_val;
551 dst_val = LLVMBuildLoad(builder, scalar_ptr, "");
552 real_val = lp_build_select(&bld->elem_bld, scalar_pred, val, dst_val);
553 LLVMBuildStore(builder, real_val, scalar_ptr);
554 }
555 else {
556 LLVMBuildStore(builder, val, scalar_ptr);
557 }
558 }
559 }
560
561
562 /**
563 * Read the current value of the ADDR register, convert the floats to
564 * ints, add the base index and return the vector of offsets.
565 * The offsets will be used to index into the constant buffer or
566 * temporary register file.
567 */
568 static LLVMValueRef
569 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
570 unsigned reg_file, unsigned reg_index,
571 const struct tgsi_ind_register *indirect_reg)
572 {
573 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
574 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
575 /* always use X component of address register */
576 unsigned swizzle = indirect_reg->Swizzle;
577 LLVMValueRef base;
578 LLVMValueRef rel;
579 LLVMValueRef max_index;
580 LLVMValueRef index;
581
582 assert(bld->indirect_files & (1 << reg_file));
583
584 base = lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, reg_index);
585
586 assert(swizzle < 4);
587 switch (indirect_reg->File) {
588 case TGSI_FILE_ADDRESS:
589 rel = LLVMBuildLoad(builder,
590 bld->addr[indirect_reg->Index][swizzle],
591 "load addr reg");
592 /* ADDR LLVM values already have LLVM integer type. */
593 break;
594 case TGSI_FILE_TEMPORARY:
595 rel = lp_get_temp_ptr_soa(bld, indirect_reg->Index, swizzle);
596 rel = LLVMBuildLoad(builder, rel, "load temp reg");
597 /* TEMP LLVM values always have LLVM float type, but for indirection, the
598 * value actually stored is expected to be an integer */
599 rel = LLVMBuildBitCast(builder, rel, uint_bld->vec_type, "");
600 break;
601 default:
602 assert(0);
603 rel = uint_bld->zero;
604 }
605
606 index = lp_build_add(uint_bld, base, rel);
607
608 max_index = lp_build_const_int_vec(bld->bld_base.base.gallivm,
609 uint_bld->type,
610 bld->bld_base.info->file_max[reg_file]);
611
612 assert(!uint_bld->type.sign);
613 index = lp_build_min(uint_bld, index, max_index);
614
615 return index;
616 }
617
618 static struct lp_build_context *
619 stype_to_fetch(struct lp_build_tgsi_context * bld_base,
620 enum tgsi_opcode_type stype)
621 {
622 struct lp_build_context *bld_fetch;
623
624 switch (stype) {
625 case TGSI_TYPE_FLOAT:
626 case TGSI_TYPE_UNTYPED:
627 bld_fetch = &bld_base->base;
628 break;
629 case TGSI_TYPE_UNSIGNED:
630 bld_fetch = &bld_base->uint_bld;
631 break;
632 case TGSI_TYPE_SIGNED:
633 bld_fetch = &bld_base->int_bld;
634 break;
635 case TGSI_TYPE_VOID:
636 case TGSI_TYPE_DOUBLE:
637 default:
638 assert(0);
639 bld_fetch = NULL;
640 break;
641 }
642 return bld_fetch;
643 }
644
645 static LLVMValueRef
646 emit_fetch_constant(
647 struct lp_build_tgsi_context * bld_base,
648 const struct tgsi_full_src_register * reg,
649 enum tgsi_opcode_type stype,
650 unsigned swizzle)
651 {
652 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
653 struct gallivm_state *gallivm = bld_base->base.gallivm;
654 LLVMBuilderRef builder = gallivm->builder;
655 struct lp_build_context *uint_bld = &bld_base->uint_bld;
656 LLVMValueRef indirect_index = NULL;
657 unsigned dimension = 0;
658 LLVMValueRef dimension_index;
659 LLVMValueRef consts_ptr;
660 LLVMValueRef res;
661
662 /* XXX: Handle fetching xyzw components as a vector */
663 assert(swizzle != ~0);
664
665 if (reg->Register.Dimension) {
666 assert(!reg->Dimension.Indirect);
667 dimension = reg->Dimension.Index;
668 assert(dimension < LP_MAX_TGSI_CONST_BUFFERS);
669 }
670
671 dimension_index = lp_build_const_int32(gallivm, dimension);
672 consts_ptr = lp_build_array_get(gallivm, bld->consts_ptr, dimension_index);
673
674 if (reg->Register.Indirect) {
675 indirect_index = get_indirect_index(bld,
676 reg->Register.File,
677 reg->Register.Index,
678 &reg->Indirect);
679 }
680
681 if (reg->Register.Indirect) {
682 LLVMValueRef swizzle_vec =
683 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, swizzle);
684 LLVMValueRef index_vec; /* index into the const buffer */
685
686 /* index_vec = indirect_index * 4 + swizzle */
687 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
688 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
689
690 /* Gather values from the constant buffer */
691 res = build_gather(&bld_base->base, consts_ptr, index_vec);
692 }
693 else {
694 LLVMValueRef index; /* index into the const buffer */
695 LLVMValueRef scalar, scalar_ptr;
696
697 index = lp_build_const_int32(gallivm, reg->Register.Index*4 + swizzle);
698
699 scalar_ptr = LLVMBuildGEP(builder, consts_ptr,
700 &index, 1, "");
701 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
702 res = lp_build_broadcast_scalar(&bld_base->base, scalar);
703 }
704
705 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED) {
706 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
707 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
708 }
709 return res;
710 }
711
712 static LLVMValueRef
713 emit_fetch_immediate(
714 struct lp_build_tgsi_context * bld_base,
715 const struct tgsi_full_src_register * reg,
716 enum tgsi_opcode_type stype,
717 unsigned swizzle)
718 {
719 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
720 LLVMValueRef res = bld->immediates[reg->Register.Index][swizzle];
721 assert(res);
722
723 if (stype == TGSI_TYPE_UNSIGNED) {
724 res = LLVMConstBitCast(res, bld_base->uint_bld.vec_type);
725 } else if (stype == TGSI_TYPE_SIGNED) {
726 res = LLVMConstBitCast(res, bld_base->int_bld.vec_type);
727 }
728 return res;
729 }
730
731 static LLVMValueRef
732 emit_fetch_input(
733 struct lp_build_tgsi_context * bld_base,
734 const struct tgsi_full_src_register * reg,
735 enum tgsi_opcode_type stype,
736 unsigned swizzle)
737 {
738 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
739 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
740 LLVMBuilderRef builder = gallivm->builder;
741 struct lp_build_context *uint_bld = &bld_base->uint_bld;
742 LLVMValueRef indirect_index = NULL;
743 LLVMValueRef res;
744
745 if (reg->Register.Indirect) {
746 indirect_index = get_indirect_index(bld,
747 reg->Register.File,
748 reg->Register.Index,
749 &reg->Indirect);
750 }
751
752 if (reg->Register.Indirect) {
753 LLVMValueRef swizzle_vec =
754 lp_build_const_int_vec(gallivm, uint_bld->type, swizzle);
755 LLVMValueRef length_vec =
756 lp_build_const_int_vec(gallivm, uint_bld->type, bld->bld_base.base.type.length);
757 LLVMValueRef index_vec; /* index into the const buffer */
758 LLVMValueRef inputs_array;
759 LLVMTypeRef float4_ptr_type;
760
761 /* index_vec = (indirect_index * 4 + swizzle) * length */
762 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
763 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
764 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
765
766 /* cast inputs_array pointer to float* */
767 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
768 inputs_array = LLVMBuildBitCast(builder, bld->inputs_array,
769 float4_ptr_type, "");
770
771 /* Gather values from the temporary register array */
772 res = build_gather(&bld_base->base, inputs_array, index_vec);
773 } else {
774 if (bld->indirect_files & (1 << TGSI_FILE_INPUT)) {
775 LLVMValueRef lindex = lp_build_const_int32(gallivm,
776 reg->Register.Index * 4 + swizzle);
777 LLVMValueRef input_ptr = LLVMBuildGEP(builder,
778 bld->inputs_array, &lindex, 1, "");
779 res = LLVMBuildLoad(builder, input_ptr, "");
780 }
781 else {
782 res = bld->inputs[reg->Register.Index][swizzle];
783 }
784 }
785
786 assert(res);
787
788 if (stype == TGSI_TYPE_UNSIGNED) {
789 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
790 } else if (stype == TGSI_TYPE_SIGNED) {
791 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
792 }
793
794 return res;
795 }
796
797
798 static LLVMValueRef
799 emit_fetch_gs_input(
800 struct lp_build_tgsi_context * bld_base,
801 const struct tgsi_full_src_register * reg,
802 enum tgsi_opcode_type stype,
803 unsigned swizzle)
804 {
805 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
806 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
807 LLVMBuilderRef builder = gallivm->builder;
808 LLVMValueRef attrib_index = NULL;
809 LLVMValueRef vertex_index = NULL;
810 LLVMValueRef swizzle_index = lp_build_const_int32(gallivm, swizzle);
811 LLVMValueRef res;
812
813 if (reg->Register.Indirect) {
814 attrib_index = get_indirect_index(bld,
815 reg->Register.File,
816 reg->Register.Index,
817 &reg->Indirect);
818 } else {
819 attrib_index = lp_build_const_int32(gallivm, reg->Register.Index);
820 }
821
822 if (reg->Dimension.Indirect) {
823 vertex_index = get_indirect_index(bld,
824 reg->Register.File,
825 reg->Dimension.Index,
826 &reg->DimIndirect);
827 } else {
828 vertex_index = lp_build_const_int32(gallivm, reg->Dimension.Index);
829 }
830
831 res = bld->gs_iface->fetch_input(bld->gs_iface, bld_base,
832 reg->Dimension.Indirect,
833 vertex_index, attrib_index,
834 swizzle_index);
835
836 assert(res);
837
838 if (stype == TGSI_TYPE_UNSIGNED) {
839 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
840 } else if (stype == TGSI_TYPE_SIGNED) {
841 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
842 }
843
844 return res;
845 }
846
847 static LLVMValueRef
848 emit_fetch_temporary(
849 struct lp_build_tgsi_context * bld_base,
850 const struct tgsi_full_src_register * reg,
851 enum tgsi_opcode_type stype,
852 unsigned swizzle)
853 {
854 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
855 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
856 LLVMBuilderRef builder = gallivm->builder;
857 struct lp_build_context *uint_bld = &bld_base->uint_bld;
858 LLVMValueRef indirect_index = NULL;
859 LLVMValueRef res;
860
861 if (reg->Register.Indirect) {
862 indirect_index = get_indirect_index(bld,
863 reg->Register.File,
864 reg->Register.Index,
865 &reg->Indirect);
866 }
867
868 if (reg->Register.Indirect) {
869 LLVMValueRef swizzle_vec =
870 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, swizzle);
871 LLVMValueRef length_vec =
872 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type,
873 bld->bld_base.base.type.length);
874 LLVMValueRef index_vec; /* index into the const buffer */
875 LLVMValueRef temps_array;
876 LLVMTypeRef float4_ptr_type;
877
878 /* index_vec = (indirect_index * 4 + swizzle) * length */
879 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
880 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
881 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
882
883 /* cast temps_array pointer to float* */
884 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(bld->bld_base.base.gallivm->context), 0);
885 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
886 float4_ptr_type, "");
887
888 /* Gather values from the temporary register array */
889 res = build_gather(&bld_base->base, temps_array, index_vec);
890 }
891 else {
892 LLVMValueRef temp_ptr;
893 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle);
894 res = LLVMBuildLoad(builder, temp_ptr, "");
895 }
896
897 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED) {
898 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
899 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
900 }
901
902 return res;
903 }
904
905 static LLVMValueRef
906 emit_fetch_system_value(
907 struct lp_build_tgsi_context * bld_base,
908 const struct tgsi_full_src_register * reg,
909 enum tgsi_opcode_type stype,
910 unsigned swizzle)
911 {
912 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
913 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
914 const struct tgsi_shader_info *info = bld->bld_base.info;
915 LLVMBuilderRef builder = gallivm->builder;
916 LLVMValueRef res;
917 enum tgsi_opcode_type atype; // Actual type of the value
918
919 assert(!reg->Register.Indirect);
920
921 switch (info->system_value_semantic_name[reg->Register.Index]) {
922 case TGSI_SEMANTIC_INSTANCEID:
923 res = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.instance_id);
924 atype = TGSI_TYPE_UNSIGNED;
925 break;
926
927 case TGSI_SEMANTIC_VERTEXID:
928 res = bld->system_values.vertex_id;
929 atype = TGSI_TYPE_UNSIGNED;
930 break;
931
932 case TGSI_SEMANTIC_PRIMID:
933 res = bld->system_values.prim_id;
934 atype = TGSI_TYPE_UNSIGNED;
935 break;
936
937 default:
938 assert(!"unexpected semantic in emit_fetch_system_value");
939 res = bld_base->base.zero;
940 atype = TGSI_TYPE_FLOAT;
941 break;
942 }
943
944 if (atype != stype) {
945 if (stype == TGSI_TYPE_FLOAT) {
946 res = LLVMBuildBitCast(builder, res, bld_base->base.vec_type, "");
947 } else if (stype == TGSI_TYPE_UNSIGNED) {
948 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
949 } else if (stype == TGSI_TYPE_SIGNED) {
950 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
951 }
952 }
953
954 return res;
955 }
956
957 /**
958 * Register fetch with derivatives.
959 */
960 static void
961 emit_fetch_deriv(
962 struct lp_build_tgsi_soa_context *bld,
963 LLVMValueRef src,
964 LLVMValueRef *res,
965 LLVMValueRef *ddx,
966 LLVMValueRef *ddy)
967 {
968 if(res)
969 *res = src;
970
971 /* TODO: use interpolation coeffs for inputs */
972
973 if(ddx)
974 *ddx = lp_build_ddx(&bld->bld_base.base, src);
975
976 if(ddy)
977 *ddy = lp_build_ddy(&bld->bld_base.base, src);
978 }
979
980
981 /**
982 * Predicate.
983 */
984 static void
985 emit_fetch_predicate(
986 struct lp_build_tgsi_soa_context *bld,
987 const struct tgsi_full_instruction *inst,
988 LLVMValueRef *pred)
989 {
990 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
991 unsigned index;
992 unsigned char swizzles[4];
993 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
994 LLVMValueRef value;
995 unsigned chan;
996
997 if (!inst->Instruction.Predicate) {
998 TGSI_FOR_EACH_CHANNEL( chan ) {
999 pred[chan] = NULL;
1000 }
1001 return;
1002 }
1003
1004 swizzles[0] = inst->Predicate.SwizzleX;
1005 swizzles[1] = inst->Predicate.SwizzleY;
1006 swizzles[2] = inst->Predicate.SwizzleZ;
1007 swizzles[3] = inst->Predicate.SwizzleW;
1008
1009 index = inst->Predicate.Index;
1010 assert(index < LP_MAX_TGSI_PREDS);
1011
1012 TGSI_FOR_EACH_CHANNEL( chan ) {
1013 unsigned swizzle = swizzles[chan];
1014
1015 /*
1016 * Only fetch the predicate register channels that are actually listed
1017 * in the swizzles
1018 */
1019 if (!unswizzled[swizzle]) {
1020 value = LLVMBuildLoad(builder,
1021 bld->preds[index][swizzle], "");
1022
1023 /*
1024 * Convert the value to an integer mask.
1025 *
1026 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
1027 * is needlessly causing two comparisons due to storing the intermediate
1028 * result as float vector instead of an integer mask vector.
1029 */
1030 value = lp_build_compare(bld->bld_base.base.gallivm,
1031 bld->bld_base.base.type,
1032 PIPE_FUNC_NOTEQUAL,
1033 value,
1034 bld->bld_base.base.zero);
1035 if (inst->Predicate.Negate) {
1036 value = LLVMBuildNot(builder, value, "");
1037 }
1038
1039 unswizzled[swizzle] = value;
1040 } else {
1041 value = unswizzled[swizzle];
1042 }
1043
1044 pred[chan] = value;
1045 }
1046 }
1047
1048 /**
1049 * Register store.
1050 */
1051 static void
1052 emit_store_chan(
1053 struct lp_build_tgsi_context *bld_base,
1054 const struct tgsi_full_instruction *inst,
1055 unsigned index,
1056 unsigned chan_index,
1057 LLVMValueRef pred,
1058 LLVMValueRef value)
1059 {
1060 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1061 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1062 LLVMBuilderRef builder = gallivm->builder;
1063 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1064 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1065 LLVMValueRef indirect_index = NULL;
1066 struct lp_build_context *bld_store;
1067 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
1068
1069 switch (dtype) {
1070 default:
1071 case TGSI_TYPE_FLOAT:
1072 case TGSI_TYPE_UNTYPED:
1073 bld_store = &bld_base->base;
1074 break;
1075 case TGSI_TYPE_UNSIGNED:
1076 bld_store = &bld_base->uint_bld;
1077 break;
1078 case TGSI_TYPE_SIGNED:
1079 bld_store = &bld_base->int_bld;
1080 break;
1081 case TGSI_TYPE_DOUBLE:
1082 case TGSI_TYPE_VOID:
1083 assert(0);
1084 bld_store = NULL;
1085 break;
1086 }
1087
1088 /* If the destination is untyped then the source can be anything,
1089 * but LLVM won't like if the types don't match so lets cast
1090 * to the correct destination type as expected by LLVM. */
1091 if (dtype == TGSI_TYPE_UNTYPED &&
1092 !lp_check_vec_type(bld_store->type, LLVMTypeOf(value))) {
1093 value = LLVMBuildBitCast(builder, value, bld_store->vec_type,
1094 "src_casted");
1095 }
1096
1097 switch( inst->Instruction.Saturate ) {
1098 case TGSI_SAT_NONE:
1099 break;
1100
1101 case TGSI_SAT_ZERO_ONE:
1102 value = lp_build_max(&bld->bld_base.base, value, bld->bld_base.base.zero);
1103 value = lp_build_min(&bld->bld_base.base, value, bld->bld_base.base.one);
1104 break;
1105
1106 case TGSI_SAT_MINUS_PLUS_ONE:
1107 value = lp_build_max(&bld->bld_base.base, value, lp_build_const_vec(bld->bld_base.base.gallivm, bld->bld_base.base.type, -1.0));
1108 value = lp_build_min(&bld->bld_base.base, value, bld->bld_base.base.one);
1109 break;
1110
1111 default:
1112 assert(0);
1113 }
1114
1115 if (reg->Register.Indirect) {
1116 indirect_index = get_indirect_index(bld,
1117 reg->Register.File,
1118 reg->Register.Index,
1119 &reg->Indirect);
1120 } else {
1121 assert(reg->Register.Index <=
1122 bld->bld_base.info->file_max[reg->Register.File]);
1123 }
1124
1125 switch( reg->Register.File ) {
1126 case TGSI_FILE_OUTPUT:
1127 if (reg->Register.Indirect) {
1128 LLVMValueRef chan_vec =
1129 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
1130 LLVMValueRef length_vec =
1131 lp_build_const_int_vec(gallivm, uint_bld->type, bld->bld_base.base.type.length);
1132 LLVMValueRef index_vec; /* indexes into the temp registers */
1133 LLVMValueRef outputs_array;
1134 LLVMValueRef pixel_offsets;
1135 LLVMTypeRef float_ptr_type;
1136 int i;
1137
1138 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1139 pixel_offsets = uint_bld->undef;
1140 for (i = 0; i < bld->bld_base.base.type.length; i++) {
1141 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1142 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
1143 ii, ii, "");
1144 }
1145
1146 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1147 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1148 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1149 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1150 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1151
1152 float_ptr_type =
1153 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1154 outputs_array = LLVMBuildBitCast(builder, bld->outputs_array,
1155 float_ptr_type, "");
1156
1157 /* Scatter store values into temp registers */
1158 emit_mask_scatter(bld, outputs_array, index_vec, value,
1159 &bld->exec_mask, pred);
1160 }
1161 else {
1162 LLVMValueRef out_ptr = lp_get_output_ptr(bld, reg->Register.Index,
1163 chan_index);
1164 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value, out_ptr);
1165 }
1166 break;
1167
1168 case TGSI_FILE_TEMPORARY:
1169 if (reg->Register.Indirect) {
1170 LLVMValueRef chan_vec =
1171 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
1172 LLVMValueRef length_vec =
1173 lp_build_const_int_vec(gallivm, uint_bld->type,
1174 bld->bld_base.base.type.length);
1175 LLVMValueRef index_vec; /* indexes into the temp registers */
1176 LLVMValueRef temps_array;
1177 LLVMValueRef pixel_offsets;
1178 LLVMTypeRef float_ptr_type;
1179 int i;
1180
1181 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1182 pixel_offsets = uint_bld->undef;
1183 for (i = 0; i < bld->bld_base.base.type.length; i++) {
1184 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1185 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
1186 ii, ii, "");
1187 }
1188
1189 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1190 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1191 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1192 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1193 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1194
1195 float_ptr_type =
1196 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1197 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
1198 float_ptr_type, "");
1199
1200 /* Scatter store values into temp registers */
1201 emit_mask_scatter(bld, temps_array, index_vec, value,
1202 &bld->exec_mask, pred);
1203 }
1204 else {
1205 LLVMValueRef temp_ptr;
1206
1207 switch (dtype) {
1208 case TGSI_TYPE_UNSIGNED:
1209 case TGSI_TYPE_SIGNED: {
1210 LLVMTypeRef itype = bld_base->int_bld.vec_type;
1211 LLVMTypeRef ivtype = LLVMPointerType(itype, 0);
1212 LLVMValueRef tint_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index,
1213 chan_index);
1214 LLVMValueRef temp_value_ptr;
1215
1216 temp_ptr = LLVMBuildBitCast(builder, tint_ptr, ivtype, "");
1217 temp_value_ptr = LLVMBuildBitCast(builder, value, itype, "");
1218 value = temp_value_ptr;
1219 break;
1220 }
1221 default:
1222 case TGSI_TYPE_FLOAT:
1223 case TGSI_TYPE_UNTYPED:
1224 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index,
1225 chan_index);
1226 break;
1227 }
1228
1229 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value, temp_ptr);
1230 }
1231 break;
1232
1233 case TGSI_FILE_ADDRESS:
1234 assert(dtype == TGSI_TYPE_SIGNED);
1235 assert(LLVMTypeOf(value) == bld_base->base.int_vec_type);
1236 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value,
1237 bld->addr[reg->Register.Index][chan_index]);
1238 break;
1239
1240 case TGSI_FILE_PREDICATE:
1241 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value,
1242 bld->preds[reg->Register.Index][chan_index]);
1243 break;
1244
1245 default:
1246 assert( 0 );
1247 }
1248 }
1249
1250 static void
1251 emit_store(
1252 struct lp_build_tgsi_context * bld_base,
1253 const struct tgsi_full_instruction * inst,
1254 const struct tgsi_opcode_info * info,
1255 LLVMValueRef dst[4])
1256
1257 {
1258 unsigned chan_index;
1259 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1260
1261 if(info->num_dst) {
1262 LLVMValueRef pred[TGSI_NUM_CHANNELS];
1263
1264 emit_fetch_predicate( bld, inst, pred );
1265
1266 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1267 emit_store_chan(bld_base, inst, 0, chan_index, pred[chan_index], dst[chan_index]);
1268 }
1269 }
1270 }
1271
1272 /**
1273 * High-level instruction translators.
1274 */
1275
1276 static void
1277 emit_tex( struct lp_build_tgsi_soa_context *bld,
1278 const struct tgsi_full_instruction *inst,
1279 enum lp_build_tex_modifier modifier,
1280 LLVMValueRef *texel)
1281 {
1282 unsigned unit;
1283 LLVMValueRef lod_bias, explicit_lod;
1284 LLVMValueRef oow = NULL;
1285 LLVMValueRef coords[4];
1286 LLVMValueRef offsets[3] = { NULL };
1287 struct lp_derivatives derivs;
1288 struct lp_derivatives *deriv_ptr = NULL;
1289 unsigned num_coords, num_derivs, num_offsets;
1290 unsigned i;
1291
1292 if (!bld->sampler) {
1293 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1294 for (i = 0; i < 4; i++) {
1295 texel[i] = bld->bld_base.base.undef;
1296 }
1297 return;
1298 }
1299
1300 switch (inst->Texture.Texture) {
1301 case TGSI_TEXTURE_1D:
1302 num_coords = 1;
1303 num_offsets = 1;
1304 num_derivs = 1;
1305 break;
1306 case TGSI_TEXTURE_1D_ARRAY:
1307 num_coords = 2;
1308 num_offsets = 1;
1309 num_derivs = 1;
1310 break;
1311 case TGSI_TEXTURE_2D:
1312 case TGSI_TEXTURE_RECT:
1313 num_coords = 2;
1314 num_offsets = 2;
1315 num_derivs = 2;
1316 break;
1317 case TGSI_TEXTURE_SHADOW1D:
1318 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1319 num_coords = 3;
1320 num_offsets = 1;
1321 num_derivs = 1;
1322 break;
1323 case TGSI_TEXTURE_SHADOW2D:
1324 case TGSI_TEXTURE_SHADOWRECT:
1325 case TGSI_TEXTURE_2D_ARRAY:
1326 num_coords = 3;
1327 num_offsets = 2;
1328 num_derivs = 2;
1329 break;
1330 case TGSI_TEXTURE_CUBE:
1331 num_coords = 3;
1332 num_offsets = 2;
1333 num_derivs = 3;
1334 break;
1335 case TGSI_TEXTURE_3D:
1336 num_coords = 3;
1337 num_offsets = 3;
1338 num_derivs = 3;
1339 break;
1340 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1341 num_coords = 4;
1342 num_offsets = 2;
1343 num_derivs = 2;
1344 break;
1345 case TGSI_TEXTURE_SHADOWCUBE:
1346 num_coords = 4;
1347 num_offsets = 2;
1348 num_derivs = 3;
1349 break;
1350 default:
1351 assert(0);
1352 return;
1353 }
1354
1355 /* Note lod and especially projected are illegal in a LOT of cases */
1356 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
1357 assert(num_coords < 4);
1358 lod_bias = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1359 explicit_lod = NULL;
1360 }
1361 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
1362 assert(num_coords < 4);
1363 lod_bias = NULL;
1364 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1365 }
1366 else {
1367 lod_bias = NULL;
1368 explicit_lod = NULL;
1369 }
1370
1371 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
1372 assert(num_coords < 4);
1373 oow = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1374 oow = lp_build_rcp(&bld->bld_base.base, oow);
1375 }
1376
1377 for (i = 0; i < num_coords; i++) {
1378 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1379 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
1380 coords[i] = lp_build_mul(&bld->bld_base.base, coords[i], oow);
1381 }
1382 for (i = num_coords; i < 4; i++) {
1383 coords[i] = bld->bld_base.base.undef;
1384 }
1385
1386 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
1387 unsigned dim;
1388 for (dim = 0; dim < num_derivs; ++dim) {
1389 derivs.ddx[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 1, dim );
1390 derivs.ddy[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 2, dim );
1391 }
1392 deriv_ptr = &derivs;
1393 unit = inst->Src[3].Register.Index;
1394 } else {
1395 unit = inst->Src[1].Register.Index;
1396 }
1397
1398 /* some advanced gather instructions (txgo) would require 4 offsets */
1399 if (inst->Texture.NumOffsets == 1) {
1400 unsigned dim;
1401 for (dim = 0; dim < num_offsets; dim++) {
1402 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1403 }
1404 }
1405
1406 bld->sampler->emit_fetch_texel(bld->sampler,
1407 bld->bld_base.base.gallivm,
1408 bld->bld_base.base.type,
1409 FALSE,
1410 unit, unit,
1411 coords,
1412 offsets,
1413 deriv_ptr,
1414 lod_bias, explicit_lod,
1415 texel);
1416 }
1417
1418 static void
1419 emit_sample(struct lp_build_tgsi_soa_context *bld,
1420 const struct tgsi_full_instruction *inst,
1421 enum lp_build_tex_modifier modifier,
1422 boolean compare,
1423 LLVMValueRef *texel)
1424 {
1425 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1426 unsigned texture_unit, sampler_unit;
1427 LLVMValueRef lod_bias, explicit_lod;
1428 LLVMValueRef coords[4];
1429 LLVMValueRef offsets[3] = { NULL };
1430 struct lp_derivatives derivs;
1431 struct lp_derivatives *deriv_ptr = NULL;
1432 unsigned num_coords, num_offsets, num_derivs;
1433 unsigned i;
1434
1435 if (!bld->sampler) {
1436 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1437 for (i = 0; i < 4; i++) {
1438 texel[i] = bld->bld_base.base.undef;
1439 }
1440 return;
1441 }
1442
1443 /*
1444 * unlike old-style tex opcodes the texture/sampler indices
1445 * always come from src1 and src2 respectively.
1446 */
1447 texture_unit = inst->Src[1].Register.Index;
1448 sampler_unit = inst->Src[2].Register.Index;
1449
1450 /*
1451 * Note inst->Texture.Texture will contain the number of offsets,
1452 * however the target information is NOT there and comes from the
1453 * declared sampler views instead.
1454 */
1455 switch (bld->sv[texture_unit].Resource) {
1456 case TGSI_TEXTURE_1D:
1457 num_coords = 1;
1458 num_offsets = 1;
1459 num_derivs = 1;
1460 break;
1461 case TGSI_TEXTURE_1D_ARRAY:
1462 num_coords = 2;
1463 num_offsets = 1;
1464 num_derivs = 1;
1465 break;
1466 case TGSI_TEXTURE_2D:
1467 case TGSI_TEXTURE_RECT:
1468 num_coords = 2;
1469 num_offsets = 2;
1470 num_derivs = 2;
1471 break;
1472 case TGSI_TEXTURE_2D_ARRAY:
1473 num_coords = 3;
1474 num_offsets = 2;
1475 num_derivs = 2;
1476 break;
1477 case TGSI_TEXTURE_CUBE:
1478 num_coords = 3;
1479 num_offsets = 2;
1480 num_derivs = 3;
1481 break;
1482 case TGSI_TEXTURE_3D:
1483 num_coords = 3;
1484 num_offsets = 3;
1485 num_derivs = 3;
1486 break;
1487 case TGSI_TEXTURE_CUBE_ARRAY:
1488 num_coords = 4;
1489 num_offsets = 2;
1490 num_derivs = 3;
1491 break;
1492 default:
1493 assert(0);
1494 return;
1495 }
1496
1497 /*
1498 * unlike old-style tex opcodes the texture/sampler indices
1499 * always come from src1 and src2 respectively.
1500 */
1501 texture_unit = inst->Src[1].Register.Index;
1502 sampler_unit = inst->Src[2].Register.Index;
1503
1504 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
1505 lod_bias = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1506 explicit_lod = NULL;
1507 }
1508 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
1509 lod_bias = NULL;
1510 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1511 }
1512 else if (modifier == LP_BLD_TEX_MODIFIER_LOD_ZERO) {
1513 lod_bias = NULL;
1514 /* XXX might be better to explicitly pass the level zero information */
1515 explicit_lod = lp_build_const_vec(gallivm, bld->bld_base.base.type, 0.0F);
1516 }
1517 else {
1518 lod_bias = NULL;
1519 explicit_lod = NULL;
1520 }
1521
1522 for (i = 0; i < num_coords; i++) {
1523 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1524 }
1525 for (i = num_coords; i < 4; i++) {
1526 coords[i] = bld->bld_base.base.undef;
1527 }
1528 /*
1529 * XXX: whack shadow comparison value into place.
1530 * Should probably fix the interface for separate value
1531 * (it will not work for cube arrays if it is part of coords).
1532 */
1533 if (compare) {
1534 unsigned c_coord = num_coords > 2 ? 3 : 2;
1535 assert(num_coords < 4);
1536 coords[c_coord] = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1537 }
1538
1539 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
1540 unsigned dim;
1541 for (dim = 0; dim < num_derivs; ++dim) {
1542 derivs.ddx[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 3, dim );
1543 derivs.ddy[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 4, dim );
1544 }
1545 deriv_ptr = &derivs;
1546 }
1547
1548 /* some advanced gather instructions (txgo) would require 4 offsets */
1549 if (inst->Texture.NumOffsets == 1) {
1550 unsigned dim;
1551 for (dim = 0; dim < num_offsets; dim++) {
1552 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1553 }
1554 }
1555
1556 bld->sampler->emit_fetch_texel(bld->sampler,
1557 bld->bld_base.base.gallivm,
1558 bld->bld_base.base.type,
1559 FALSE,
1560 texture_unit, sampler_unit,
1561 coords,
1562 offsets,
1563 deriv_ptr,
1564 lod_bias, explicit_lod,
1565 texel);
1566 }
1567
1568 static void
1569 emit_fetch_texels( struct lp_build_tgsi_soa_context *bld,
1570 const struct tgsi_full_instruction *inst,
1571 LLVMValueRef *texel,
1572 boolean is_samplei)
1573 {
1574 unsigned unit, target;
1575 LLVMValueRef coord_undef = LLVMGetUndef(bld->bld_base.base.int_vec_type);
1576 LLVMValueRef explicit_lod = NULL;
1577 LLVMValueRef coords[3];
1578 LLVMValueRef offsets[3] = { NULL };
1579 unsigned num_coords;
1580 unsigned dims;
1581 unsigned i;
1582
1583 if (!bld->sampler) {
1584 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1585 for (i = 0; i < 4; i++) {
1586 texel[i] = coord_undef;
1587 }
1588 return;
1589 }
1590
1591 unit = inst->Src[1].Register.Index;
1592
1593 if (is_samplei) {
1594 target = bld->sv[unit].Resource;
1595 }
1596 else {
1597 target = inst->Texture.Texture;
1598 }
1599
1600 switch (target) {
1601 case TGSI_TEXTURE_1D:
1602 case TGSI_TEXTURE_BUFFER:
1603 num_coords = 1;
1604 dims = 1;
1605 break;
1606 case TGSI_TEXTURE_1D_ARRAY:
1607 num_coords = 2;
1608 dims = 1;
1609 break;
1610 case TGSI_TEXTURE_2D:
1611 case TGSI_TEXTURE_RECT:
1612 num_coords = 2;
1613 dims = 2;
1614 break;
1615 case TGSI_TEXTURE_2D_ARRAY:
1616 num_coords = 3;
1617 dims = 2;
1618 break;
1619 case TGSI_TEXTURE_3D:
1620 num_coords = 3;
1621 dims = 3;
1622 break;
1623 default:
1624 assert(0);
1625 return;
1626 }
1627
1628 /* always have lod except for buffers ? */
1629 if (target != TGSI_TEXTURE_BUFFER) {
1630 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1631 }
1632
1633 for (i = 0; i < num_coords; i++) {
1634 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1635 }
1636 for (i = num_coords; i < 3; i++) {
1637 coords[i] = coord_undef;
1638 }
1639
1640 if (inst->Texture.NumOffsets == 1) {
1641 unsigned dim;
1642 for (dim = 0; dim < dims; dim++) {
1643 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1644 }
1645 }
1646
1647 bld->sampler->emit_fetch_texel(bld->sampler,
1648 bld->bld_base.base.gallivm,
1649 bld->bld_base.base.type,
1650 TRUE,
1651 unit, unit,
1652 coords,
1653 offsets,
1654 NULL,
1655 NULL, explicit_lod,
1656 texel);
1657 }
1658
1659 static void
1660 emit_size_query( struct lp_build_tgsi_soa_context *bld,
1661 const struct tgsi_full_instruction *inst,
1662 LLVMValueRef *sizes_out,
1663 boolean is_sviewinfo)
1664 {
1665 LLVMValueRef explicit_lod;
1666 unsigned has_lod;
1667 unsigned i;
1668 unsigned unit = inst->Src[1].Register.Index;
1669 unsigned target;
1670
1671 if (is_sviewinfo) {
1672 target = bld->sv[unit].Resource;
1673 }
1674 else {
1675 target = inst->Texture.Texture;
1676 }
1677 switch (target) {
1678 case TGSI_TEXTURE_BUFFER:
1679 case TGSI_TEXTURE_RECT:
1680 case TGSI_TEXTURE_SHADOWRECT:
1681 has_lod = 0;
1682 break;
1683 default:
1684 has_lod = 1;
1685 break;
1686 }
1687
1688 if (!bld->sampler) {
1689 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
1690 for (i = 0; i < 4; i++)
1691 sizes_out[i] = bld->bld_base.int_bld.undef;
1692 return;
1693 }
1694
1695 if (has_lod)
1696 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 0 );
1697 else
1698 explicit_lod = NULL;
1699
1700 bld->sampler->emit_size_query(bld->sampler,
1701 bld->bld_base.base.gallivm,
1702 bld->bld_base.int_bld.type,
1703 unit,
1704 is_sviewinfo,
1705 explicit_lod,
1706 sizes_out);
1707 }
1708
1709 static boolean
1710 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
1711 int pc)
1712 {
1713 int i;
1714
1715 for (i = 0; i < 5; i++) {
1716 unsigned opcode;
1717
1718 if (pc + i >= bld->bld_base.info->num_instructions)
1719 return TRUE;
1720
1721 opcode = bld->bld_base.instructions[pc + i].Instruction.Opcode;
1722
1723 if (opcode == TGSI_OPCODE_END)
1724 return TRUE;
1725
1726 if (opcode == TGSI_OPCODE_TEX ||
1727 opcode == TGSI_OPCODE_TXP ||
1728 opcode == TGSI_OPCODE_TXD ||
1729 opcode == TGSI_OPCODE_TXB ||
1730 opcode == TGSI_OPCODE_TXL ||
1731 opcode == TGSI_OPCODE_TXF ||
1732 opcode == TGSI_OPCODE_TXQ ||
1733 opcode == TGSI_OPCODE_CAL ||
1734 opcode == TGSI_OPCODE_CALLNZ ||
1735 opcode == TGSI_OPCODE_IF ||
1736 opcode == TGSI_OPCODE_UIF ||
1737 opcode == TGSI_OPCODE_BGNLOOP ||
1738 opcode == TGSI_OPCODE_SWITCH)
1739 return FALSE;
1740 }
1741
1742 return TRUE;
1743 }
1744
1745
1746
1747 /**
1748 * Kill fragment if any of the src register values are negative.
1749 */
1750 static void
1751 emit_kil(
1752 struct lp_build_tgsi_soa_context *bld,
1753 const struct tgsi_full_instruction *inst,
1754 int pc)
1755 {
1756 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1757 const struct tgsi_full_src_register *reg = &inst->Src[0];
1758 LLVMValueRef terms[TGSI_NUM_CHANNELS];
1759 LLVMValueRef mask;
1760 unsigned chan_index;
1761
1762 memset(&terms, 0, sizeof terms);
1763
1764 TGSI_FOR_EACH_CHANNEL( chan_index ) {
1765 unsigned swizzle;
1766
1767 /* Unswizzle channel */
1768 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
1769
1770 /* Check if the component has not been already tested. */
1771 assert(swizzle < TGSI_NUM_CHANNELS);
1772 if( !terms[swizzle] )
1773 /* TODO: change the comparison operator instead of setting the sign */
1774 terms[swizzle] = lp_build_emit_fetch(&bld->bld_base, inst, 0, chan_index );
1775 }
1776
1777 mask = NULL;
1778 TGSI_FOR_EACH_CHANNEL( chan_index ) {
1779 if(terms[chan_index]) {
1780 LLVMValueRef chan_mask;
1781
1782 /*
1783 * If term < 0 then mask = 0 else mask = ~0.
1784 */
1785 chan_mask = lp_build_cmp(&bld->bld_base.base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->bld_base.base.zero);
1786
1787 if(mask)
1788 mask = LLVMBuildAnd(builder, mask, chan_mask, "");
1789 else
1790 mask = chan_mask;
1791 }
1792 }
1793
1794 if(mask) {
1795 lp_build_mask_update(bld->mask, mask);
1796
1797 if (!near_end_of_shader(bld, pc))
1798 lp_build_mask_check(bld->mask);
1799 }
1800 }
1801
1802
1803 /**
1804 * Predicated fragment kill.
1805 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1806 * The only predication is the execution mask which will apply if
1807 * we're inside a loop or conditional.
1808 */
1809 static void
1810 emit_kilp(struct lp_build_tgsi_soa_context *bld,
1811 int pc)
1812 {
1813 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1814 LLVMValueRef mask;
1815
1816 /* For those channels which are "alive", disable fragment shader
1817 * execution.
1818 */
1819 if (bld->exec_mask.has_mask) {
1820 mask = LLVMBuildNot(builder, bld->exec_mask.exec_mask, "kilp");
1821 }
1822 else {
1823 LLVMValueRef zero = LLVMConstNull(bld->bld_base.base.int_vec_type);
1824 mask = zero;
1825 }
1826
1827 lp_build_mask_update(bld->mask, mask);
1828
1829 if (!near_end_of_shader(bld, pc))
1830 lp_build_mask_check(bld->mask);
1831 }
1832
1833
1834 /**
1835 * Emit code which will dump the value of all the temporary registers
1836 * to stdout.
1837 */
1838 static void
1839 emit_dump_temps(struct lp_build_tgsi_soa_context *bld)
1840 {
1841 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1842 LLVMBuilderRef builder = gallivm->builder;
1843 LLVMValueRef temp_ptr;
1844 LLVMValueRef i0 = lp_build_const_int32(gallivm, 0);
1845 LLVMValueRef i1 = lp_build_const_int32(gallivm, 1);
1846 LLVMValueRef i2 = lp_build_const_int32(gallivm, 2);
1847 LLVMValueRef i3 = lp_build_const_int32(gallivm, 3);
1848 int index;
1849 int n = bld->bld_base.info->file_max[TGSI_FILE_TEMPORARY];
1850
1851 for (index = 0; index < n; index++) {
1852 LLVMValueRef idx = lp_build_const_int32(gallivm, index);
1853 LLVMValueRef v[4][4], res;
1854 int chan;
1855
1856 lp_build_printf(gallivm, "TEMP[%d]:\n", idx);
1857
1858 for (chan = 0; chan < 4; chan++) {
1859 temp_ptr = lp_get_temp_ptr_soa(bld, index, chan);
1860 res = LLVMBuildLoad(builder, temp_ptr, "");
1861 v[chan][0] = LLVMBuildExtractElement(builder, res, i0, "");
1862 v[chan][1] = LLVMBuildExtractElement(builder, res, i1, "");
1863 v[chan][2] = LLVMBuildExtractElement(builder, res, i2, "");
1864 v[chan][3] = LLVMBuildExtractElement(builder, res, i3, "");
1865 }
1866
1867 lp_build_printf(gallivm, " X: %f %f %f %f\n",
1868 v[0][0], v[0][1], v[0][2], v[0][3]);
1869 lp_build_printf(gallivm, " Y: %f %f %f %f\n",
1870 v[1][0], v[1][1], v[1][2], v[1][3]);
1871 lp_build_printf(gallivm, " Z: %f %f %f %f\n",
1872 v[2][0], v[2][1], v[2][2], v[2][3]);
1873 lp_build_printf(gallivm, " W: %f %f %f %f\n",
1874 v[3][0], v[3][1], v[3][2], v[3][3]);
1875 }
1876 }
1877
1878
1879
1880 void
1881 lp_emit_declaration_soa(
1882 struct lp_build_tgsi_context *bld_base,
1883 const struct tgsi_full_declaration *decl)
1884 {
1885 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1886 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1887 LLVMTypeRef vec_type = bld->bld_base.base.vec_type;
1888 const unsigned first = decl->Range.First;
1889 const unsigned last = decl->Range.Last;
1890 unsigned idx, i;
1891
1892 for (idx = first; idx <= last; ++idx) {
1893 assert(last <= bld->bld_base.info->file_max[decl->Declaration.File]);
1894 switch (decl->Declaration.File) {
1895 case TGSI_FILE_TEMPORARY:
1896 assert(idx < LP_MAX_TGSI_TEMPS);
1897 if (!(bld->indirect_files & (1 << TGSI_FILE_TEMPORARY))) {
1898 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1899 bld->temps[idx][i] = lp_build_alloca(gallivm, vec_type, "temp");
1900 }
1901 break;
1902
1903 case TGSI_FILE_OUTPUT:
1904 if (!(bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
1905 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1906 bld->outputs[idx][i] = lp_build_alloca(gallivm,
1907 vec_type, "output");
1908 }
1909 break;
1910
1911 case TGSI_FILE_ADDRESS:
1912 /* ADDR registers are only allocated with an integer LLVM IR type,
1913 * as they are guaranteed to always have integers.
1914 * XXX: Not sure if this exception is worthwhile (or the whole idea of
1915 * an ADDR register for that matter).
1916 */
1917 assert(idx < LP_MAX_TGSI_ADDRS);
1918 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1919 bld->addr[idx][i] = lp_build_alloca(gallivm, bld_base->base.int_vec_type, "addr");
1920 break;
1921
1922 case TGSI_FILE_PREDICATE:
1923 assert(idx < LP_MAX_TGSI_PREDS);
1924 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1925 bld->preds[idx][i] = lp_build_alloca(gallivm, vec_type,
1926 "predicate");
1927 break;
1928
1929 case TGSI_FILE_SAMPLER_VIEW:
1930 /*
1931 * The target stored here MUST match whatever there actually
1932 * is in the set sampler views (what about return type?).
1933 */
1934 assert(idx < PIPE_MAX_SHADER_SAMPLER_VIEWS);
1935 bld->sv[idx] = decl->SamplerView;
1936 break;
1937
1938 default:
1939 /* don't need to declare other vars */
1940 break;
1941 }
1942 }
1943 }
1944
1945
1946 void lp_emit_immediate_soa(
1947 struct lp_build_tgsi_context *bld_base,
1948 const struct tgsi_full_immediate *imm)
1949 {
1950 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1951 struct gallivm_state * gallivm = bld_base->base.gallivm;
1952
1953 /* simply copy the immediate values into the next immediates[] slot */
1954 unsigned i;
1955 const uint size = imm->Immediate.NrTokens - 1;
1956 assert(size <= 4);
1957 assert(bld->num_immediates < LP_MAX_TGSI_IMMEDIATES);
1958 switch (imm->Immediate.DataType) {
1959 case TGSI_IMM_FLOAT32:
1960 for( i = 0; i < size; ++i )
1961 bld->immediates[bld->num_immediates][i] =
1962 lp_build_const_vec(gallivm, bld_base->base.type, imm->u[i].Float);
1963
1964 break;
1965 case TGSI_IMM_UINT32:
1966 for( i = 0; i < size; ++i ) {
1967 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->uint_bld.type, imm->u[i].Uint);
1968 bld->immediates[bld->num_immediates][i] =
1969 LLVMConstBitCast(tmp, bld_base->base.vec_type);
1970 }
1971
1972 break;
1973 case TGSI_IMM_INT32:
1974 for( i = 0; i < size; ++i ) {
1975 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->int_bld.type, imm->u[i].Int);
1976 bld->immediates[bld->num_immediates][i] =
1977 LLVMConstBitCast(tmp, bld_base->base.vec_type);
1978 }
1979
1980 break;
1981 }
1982 for( i = size; i < 4; ++i )
1983 bld->immediates[bld->num_immediates][i] = bld_base->base.undef;
1984
1985 bld->num_immediates++;
1986 }
1987
1988 static void
1989 ddx_emit(
1990 const struct lp_build_tgsi_action * action,
1991 struct lp_build_tgsi_context * bld_base,
1992 struct lp_build_emit_data * emit_data)
1993 {
1994 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1995
1996 emit_fetch_deriv(bld, emit_data->args[0], NULL,
1997 &emit_data->output[emit_data->chan], NULL);
1998 }
1999
2000 static void
2001 ddy_emit(
2002 const struct lp_build_tgsi_action * action,
2003 struct lp_build_tgsi_context * bld_base,
2004 struct lp_build_emit_data * emit_data)
2005 {
2006 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2007
2008 emit_fetch_deriv(bld, emit_data->args[0], NULL, NULL,
2009 &emit_data->output[emit_data->chan]);
2010 }
2011
2012 static void
2013 kilp_emit(
2014 const struct lp_build_tgsi_action * action,
2015 struct lp_build_tgsi_context * bld_base,
2016 struct lp_build_emit_data * emit_data)
2017 {
2018 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2019
2020 emit_kilp(bld, bld_base->pc - 1);
2021 }
2022
2023 static void
2024 kil_emit(
2025 const struct lp_build_tgsi_action * action,
2026 struct lp_build_tgsi_context * bld_base,
2027 struct lp_build_emit_data * emit_data)
2028 {
2029 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2030
2031 emit_kil(bld, emit_data->inst, bld_base->pc - 1);
2032 }
2033
2034 static void
2035 tex_emit(
2036 const struct lp_build_tgsi_action * action,
2037 struct lp_build_tgsi_context * bld_base,
2038 struct lp_build_emit_data * emit_data)
2039 {
2040 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2041
2042 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE, emit_data->output);
2043 }
2044
2045 static void
2046 txb_emit(
2047 const struct lp_build_tgsi_action * action,
2048 struct lp_build_tgsi_context * bld_base,
2049 struct lp_build_emit_data * emit_data)
2050 {
2051 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2052
2053 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
2054 emit_data->output);
2055 }
2056
2057 static void
2058 txd_emit(
2059 const struct lp_build_tgsi_action * action,
2060 struct lp_build_tgsi_context * bld_base,
2061 struct lp_build_emit_data * emit_data)
2062 {
2063 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2064
2065 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
2066 emit_data->output);
2067 }
2068
2069 static void
2070 txl_emit(
2071 const struct lp_build_tgsi_action * action,
2072 struct lp_build_tgsi_context * bld_base,
2073 struct lp_build_emit_data * emit_data)
2074 {
2075 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2076
2077 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
2078 emit_data->output);
2079 }
2080
2081 static void
2082 txp_emit(
2083 const struct lp_build_tgsi_action * action,
2084 struct lp_build_tgsi_context * bld_base,
2085 struct lp_build_emit_data * emit_data)
2086 {
2087 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2088
2089 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_PROJECTED,
2090 emit_data->output);
2091 }
2092
2093 static void
2094 txq_emit(
2095 const struct lp_build_tgsi_action * action,
2096 struct lp_build_tgsi_context * bld_base,
2097 struct lp_build_emit_data * emit_data)
2098 {
2099 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2100
2101 emit_size_query(bld, emit_data->inst, emit_data->output, FALSE);
2102 }
2103
2104 static void
2105 txf_emit(
2106 const struct lp_build_tgsi_action * action,
2107 struct lp_build_tgsi_context * bld_base,
2108 struct lp_build_emit_data * emit_data)
2109 {
2110 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2111
2112 emit_fetch_texels(bld, emit_data->inst, emit_data->output, FALSE);
2113 }
2114
2115 static void
2116 sample_i_emit(
2117 const struct lp_build_tgsi_action * action,
2118 struct lp_build_tgsi_context * bld_base,
2119 struct lp_build_emit_data * emit_data)
2120 {
2121 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2122
2123 emit_fetch_texels(bld, emit_data->inst, emit_data->output, TRUE);
2124 }
2125
2126 static void
2127 sample_emit(
2128 const struct lp_build_tgsi_action * action,
2129 struct lp_build_tgsi_context * bld_base,
2130 struct lp_build_emit_data * emit_data)
2131 {
2132 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2133
2134 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
2135 FALSE, emit_data->output);
2136 }
2137
2138 static void
2139 sample_b_emit(
2140 const struct lp_build_tgsi_action * action,
2141 struct lp_build_tgsi_context * bld_base,
2142 struct lp_build_emit_data * emit_data)
2143 {
2144 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2145
2146 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
2147 FALSE, emit_data->output);
2148 }
2149
2150 static void
2151 sample_c_emit(
2152 const struct lp_build_tgsi_action * action,
2153 struct lp_build_tgsi_context * bld_base,
2154 struct lp_build_emit_data * emit_data)
2155 {
2156 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2157
2158 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
2159 TRUE, emit_data->output);
2160 }
2161
2162 static void
2163 sample_c_lz_emit(
2164 const struct lp_build_tgsi_action * action,
2165 struct lp_build_tgsi_context * bld_base,
2166 struct lp_build_emit_data * emit_data)
2167 {
2168 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2169
2170 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_ZERO,
2171 TRUE, emit_data->output);
2172 }
2173
2174 static void
2175 sample_d_emit(
2176 const struct lp_build_tgsi_action * action,
2177 struct lp_build_tgsi_context * bld_base,
2178 struct lp_build_emit_data * emit_data)
2179 {
2180 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2181
2182 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
2183 FALSE, emit_data->output);
2184 }
2185
2186 static void
2187 sample_l_emit(
2188 const struct lp_build_tgsi_action * action,
2189 struct lp_build_tgsi_context * bld_base,
2190 struct lp_build_emit_data * emit_data)
2191 {
2192 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2193
2194 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
2195 FALSE, emit_data->output);
2196 }
2197
2198 static void
2199 sviewinfo_emit(
2200 const struct lp_build_tgsi_action * action,
2201 struct lp_build_tgsi_context * bld_base,
2202 struct lp_build_emit_data * emit_data)
2203 {
2204 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2205
2206 emit_size_query(bld, emit_data->inst, emit_data->output, TRUE);
2207 }
2208
2209 static LLVMValueRef
2210 mask_to_one_vec(struct lp_build_tgsi_context *bld_base)
2211 {
2212 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2213 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2214 LLVMValueRef one_vec = bld_base->int_bld.one;
2215 struct lp_exec_mask *exec_mask = &bld->exec_mask;
2216
2217 if (exec_mask->has_mask) {
2218 one_vec = LLVMBuildAnd(builder, one_vec, exec_mask->exec_mask, "");
2219 }
2220 one_vec = LLVMBuildAnd(builder, one_vec,
2221 lp_build_mask_value(bld->mask), "");
2222 return one_vec;
2223 }
2224
2225 static void
2226 increment_vec_ptr_by_mask(struct lp_build_tgsi_context * bld_base,
2227 LLVMValueRef ptr,
2228 LLVMValueRef mask)
2229 {
2230 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2231
2232 LLVMValueRef current_vec = LLVMBuildLoad(builder, ptr, "");
2233
2234 current_vec = LLVMBuildAdd(builder, current_vec, mask, "");
2235
2236 LLVMBuildStore(builder, current_vec, ptr);
2237 }
2238
2239 static void
2240 clear_uint_vec_ptr_from_mask(struct lp_build_tgsi_context * bld_base,
2241 LLVMValueRef ptr,
2242 LLVMValueRef mask)
2243 {
2244 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2245
2246 LLVMValueRef current_vec = LLVMBuildLoad(builder, ptr, "");
2247 LLVMValueRef full_mask = lp_build_cmp(&bld_base->uint_bld,
2248 PIPE_FUNC_NOTEQUAL,
2249 mask,
2250 bld_base->uint_bld.zero);
2251
2252 current_vec = lp_build_select(&bld_base->uint_bld,
2253 full_mask,
2254 bld_base->uint_bld.zero,
2255 current_vec);
2256
2257 LLVMBuildStore(builder, current_vec, ptr);
2258 }
2259
2260 static LLVMValueRef
2261 clamp_mask_to_max_output_vertices(struct lp_build_tgsi_soa_context * bld,
2262 LLVMValueRef current_mask_vec,
2263 LLVMValueRef total_emitted_vertices_vec)
2264 {
2265 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2266 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
2267 LLVMValueRef max_mask = lp_build_cmp(uint_bld, PIPE_FUNC_LESS,
2268 total_emitted_vertices_vec,
2269 bld->max_output_vertices_vec);
2270
2271 return LLVMBuildAnd(builder, current_mask_vec, max_mask, "");
2272 }
2273
2274 static void
2275 emit_vertex(
2276 const struct lp_build_tgsi_action * action,
2277 struct lp_build_tgsi_context * bld_base,
2278 struct lp_build_emit_data * emit_data)
2279 {
2280 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2281 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2282
2283 if (bld->gs_iface->emit_vertex) {
2284 LLVMValueRef masked_ones = mask_to_one_vec(bld_base);
2285 LLVMValueRef total_emitted_vertices_vec =
2286 LLVMBuildLoad(builder, bld->total_emitted_vertices_vec_ptr, "");
2287 masked_ones = clamp_mask_to_max_output_vertices(bld, masked_ones,
2288 total_emitted_vertices_vec);
2289 gather_outputs(bld);
2290 bld->gs_iface->emit_vertex(bld->gs_iface, &bld->bld_base,
2291 bld->outputs,
2292 total_emitted_vertices_vec);
2293 increment_vec_ptr_by_mask(bld_base, bld->emitted_vertices_vec_ptr,
2294 masked_ones);
2295 increment_vec_ptr_by_mask(bld_base, bld->total_emitted_vertices_vec_ptr,
2296 masked_ones);
2297 #if DUMP_GS_EMITS
2298 lp_build_print_value(bld->bld_base.base.gallivm,
2299 " +++ emit vertex masked ones = ",
2300 masked_ones);
2301 lp_build_print_value(bld->bld_base.base.gallivm,
2302 " +++ emit vertex emitted = ",
2303 total_emitted_vertices_vec);
2304 #endif
2305 }
2306 }
2307
2308
2309 static void
2310 end_primitive_masked(struct lp_build_tgsi_context * bld_base,
2311 LLVMValueRef masked_ones)
2312 {
2313 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2314 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2315
2316 if (bld->gs_iface->end_primitive) {
2317 LLVMValueRef emitted_vertices_vec =
2318 LLVMBuildLoad(builder, bld->emitted_vertices_vec_ptr, "");
2319 LLVMValueRef emitted_prims_vec =
2320 LLVMBuildLoad(builder, bld->emitted_prims_vec_ptr, "");
2321
2322 bld->gs_iface->end_primitive(bld->gs_iface, &bld->bld_base,
2323 emitted_vertices_vec,
2324 emitted_prims_vec);
2325
2326 #if DUMP_GS_EMITS
2327 lp_build_print_value(bld->bld_base.base.gallivm,
2328 " +++ end prim masked ones = ",
2329 masked_ones);
2330 lp_build_print_value(bld->bld_base.base.gallivm,
2331 " +++ end prim emitted verts1 = ",
2332 emitted_vertices_vec);
2333 lp_build_print_value(bld->bld_base.base.gallivm,
2334 " +++ end prim emitted prims1 = ",
2335 LLVMBuildLoad(builder,
2336 bld->emitted_prims_vec_ptr, ""));
2337 #endif
2338 increment_vec_ptr_by_mask(bld_base, bld->emitted_prims_vec_ptr,
2339 masked_ones);
2340 clear_uint_vec_ptr_from_mask(bld_base, bld->emitted_vertices_vec_ptr,
2341 masked_ones);
2342 #if DUMP_GS_EMITS
2343 lp_build_print_value(bld->bld_base.base.gallivm,
2344 " +++ end prim emitted verts2 = ",
2345 LLVMBuildLoad(builder,
2346 bld->emitted_vertices_vec_ptr, ""));
2347 #endif
2348 }
2349
2350 }
2351
2352 static void
2353 end_primitive(
2354 const struct lp_build_tgsi_action * action,
2355 struct lp_build_tgsi_context * bld_base,
2356 struct lp_build_emit_data * emit_data)
2357 {
2358 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2359
2360 if (bld->gs_iface->end_primitive) {
2361 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2362 LLVMValueRef masked_ones = mask_to_one_vec(bld_base);
2363 struct lp_build_context *uint_bld = &bld_base->uint_bld;
2364 LLVMValueRef emitted_verts = LLVMBuildLoad(
2365 builder, bld->emitted_vertices_vec_ptr, "");
2366 LLVMValueRef emitted_mask = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
2367 emitted_verts,
2368 uint_bld->zero);
2369 /* We need to combine the current execution mask with the mask
2370 telling us which, if any, execution slots actually have
2371 unemitted primitives, this way we make sure that end_primitives
2372 executes only on the paths that have unflushed vertices */
2373 masked_ones = LLVMBuildAnd(builder, masked_ones, emitted_mask, "");
2374
2375 end_primitive_masked(bld_base, masked_ones);
2376 }
2377 }
2378
2379 static void
2380 cal_emit(
2381 const struct lp_build_tgsi_action * action,
2382 struct lp_build_tgsi_context * bld_base,
2383 struct lp_build_emit_data * emit_data)
2384 {
2385 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2386
2387 lp_exec_mask_call(&bld->exec_mask, emit_data->inst->Label.Label,
2388 &bld_base->pc);
2389 }
2390
2391 static void
2392 ret_emit(
2393 const struct lp_build_tgsi_action * action,
2394 struct lp_build_tgsi_context * bld_base,
2395 struct lp_build_emit_data * emit_data)
2396 {
2397 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2398
2399 lp_exec_mask_ret(&bld->exec_mask, &bld_base->pc);
2400 }
2401
2402 static void
2403 brk_emit(
2404 const struct lp_build_tgsi_action * action,
2405 struct lp_build_tgsi_context * bld_base,
2406 struct lp_build_emit_data * emit_data)
2407 {
2408 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2409
2410 lp_exec_break(&bld->exec_mask);
2411 }
2412
2413 static void
2414 breakc_emit(
2415 const struct lp_build_tgsi_action * action,
2416 struct lp_build_tgsi_context * bld_base,
2417 struct lp_build_emit_data * emit_data)
2418 {
2419 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2420 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2421 struct lp_build_context *uint_bld = &bld_base->uint_bld;
2422 LLVMValueRef unsigned_cond =
2423 LLVMBuildBitCast(builder, emit_data->args[0], uint_bld->vec_type, "");
2424 LLVMValueRef cond = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
2425 unsigned_cond,
2426 uint_bld->zero);
2427
2428 lp_exec_break_condition(&bld->exec_mask, cond);
2429 }
2430
2431 static void
2432 if_emit(
2433 const struct lp_build_tgsi_action * action,
2434 struct lp_build_tgsi_context * bld_base,
2435 struct lp_build_emit_data * emit_data)
2436 {
2437 LLVMValueRef tmp;
2438 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2439
2440 tmp = lp_build_cmp(&bld_base->base, PIPE_FUNC_NOTEQUAL,
2441 emit_data->args[0], bld->bld_base.base.zero);
2442 lp_exec_mask_cond_push(&bld->exec_mask, tmp);
2443 }
2444
2445 static void
2446 uif_emit(
2447 const struct lp_build_tgsi_action * action,
2448 struct lp_build_tgsi_context * bld_base,
2449 struct lp_build_emit_data * emit_data)
2450 {
2451 LLVMValueRef tmp;
2452 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2453 struct lp_build_context *uint_bld = &bld_base->uint_bld;
2454
2455 tmp = lp_build_cmp(uint_bld, PIPE_FUNC_NOTEQUAL,
2456 emit_data->args[0], uint_bld->zero);
2457 lp_exec_mask_cond_push(&bld->exec_mask, tmp);
2458 }
2459
2460 static void
2461 bgnloop_emit(
2462 const struct lp_build_tgsi_action * action,
2463 struct lp_build_tgsi_context * bld_base,
2464 struct lp_build_emit_data * emit_data)
2465 {
2466 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2467
2468 lp_exec_bgnloop(&bld->exec_mask);
2469 }
2470
2471 static void
2472 bgnsub_emit(
2473 const struct lp_build_tgsi_action * action,
2474 struct lp_build_tgsi_context * bld_base,
2475 struct lp_build_emit_data * emit_data)
2476 {
2477 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2478
2479 lp_exec_mask_bgnsub(&bld->exec_mask);
2480 }
2481
2482 static void
2483 else_emit(
2484 const struct lp_build_tgsi_action * action,
2485 struct lp_build_tgsi_context * bld_base,
2486 struct lp_build_emit_data * emit_data)
2487 {
2488 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2489
2490 lp_exec_mask_cond_invert(&bld->exec_mask);
2491 }
2492
2493 static void
2494 endif_emit(
2495 const struct lp_build_tgsi_action * action,
2496 struct lp_build_tgsi_context * bld_base,
2497 struct lp_build_emit_data * emit_data)
2498 {
2499 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2500
2501 lp_exec_mask_cond_pop(&bld->exec_mask);
2502 }
2503
2504 static void
2505 endloop_emit(
2506 const struct lp_build_tgsi_action * action,
2507 struct lp_build_tgsi_context * bld_base,
2508 struct lp_build_emit_data * emit_data)
2509 {
2510 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2511
2512 lp_exec_endloop(bld_base->base.gallivm, &bld->exec_mask);
2513 }
2514
2515 static void
2516 endsub_emit(
2517 const struct lp_build_tgsi_action * action,
2518 struct lp_build_tgsi_context * bld_base,
2519 struct lp_build_emit_data * emit_data)
2520 {
2521 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2522
2523 lp_exec_mask_endsub(&bld->exec_mask, &bld_base->pc);
2524 }
2525
2526 static void
2527 cont_emit(
2528 const struct lp_build_tgsi_action * action,
2529 struct lp_build_tgsi_context * bld_base,
2530 struct lp_build_emit_data * emit_data)
2531 {
2532 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2533
2534 lp_exec_continue(&bld->exec_mask);
2535 }
2536
2537 /* XXX: Refactor and move it to lp_bld_tgsi_action.c
2538 *
2539 * XXX: What do the comments about xmm registers mean? Maybe they are left over
2540 * from old code, but there is no garauntee that LLVM will use those registers
2541 * for this code.
2542 *
2543 * XXX: There should be no calls to lp_build_emit_fetch in this function. This
2544 * should be handled by the emit_data->fetch_args function. */
2545 static void
2546 nrm_emit(
2547 const struct lp_build_tgsi_action * action,
2548 struct lp_build_tgsi_context * bld_base,
2549 struct lp_build_emit_data * emit_data)
2550 {
2551 LLVMValueRef tmp0, tmp1;
2552 LLVMValueRef tmp4 = NULL;
2553 LLVMValueRef tmp5 = NULL;
2554 LLVMValueRef tmp6 = NULL;
2555 LLVMValueRef tmp7 = NULL;
2556 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2557
2558 uint dims = (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
2559
2560 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X) ||
2561 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y) ||
2562 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z) ||
2563 (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W) && dims == 4)) {
2564
2565 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
2566
2567 /* xmm4 = src.x */
2568 /* xmm0 = src.x * src.x */
2569 tmp0 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_X);
2570 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X)) {
2571 tmp4 = tmp0;
2572 }
2573 tmp0 = lp_build_mul( &bld->bld_base.base, tmp0, tmp0);
2574
2575 /* xmm5 = src.y */
2576 /* xmm0 = xmm0 + src.y * src.y */
2577 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_Y);
2578 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y)) {
2579 tmp5 = tmp1;
2580 }
2581 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2582 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2583
2584 /* xmm6 = src.z */
2585 /* xmm0 = xmm0 + src.z * src.z */
2586 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_Z);
2587 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z)) {
2588 tmp6 = tmp1;
2589 }
2590 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2591 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2592
2593 if (dims == 4) {
2594 /* xmm7 = src.w */
2595 /* xmm0 = xmm0 + src.w * src.w */
2596 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_W);
2597 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W)) {
2598 tmp7 = tmp1;
2599 }
2600 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2601 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2602 }
2603 /* xmm1 = 1 / sqrt(xmm0) */
2604 tmp1 = lp_build_rsqrt( &bld->bld_base.base, tmp0);
2605 /* dst.x = xmm1 * src.x */
2606 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X)) {
2607 emit_data->output[TGSI_CHAN_X] = lp_build_mul( &bld->bld_base.base, tmp4, tmp1);
2608 }
2609 /* dst.y = xmm1 * src.y */
2610 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y)) {
2611 emit_data->output[TGSI_CHAN_Y] = lp_build_mul( &bld->bld_base.base, tmp5, tmp1);
2612 }
2613
2614 /* dst.z = xmm1 * src.z */
2615 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z)) {
2616 emit_data->output[TGSI_CHAN_Z] = lp_build_mul( &bld->bld_base.base, tmp6, tmp1);
2617 }
2618 /* dst.w = xmm1 * src.w */
2619 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X) && dims == 4) {
2620 emit_data->output[TGSI_CHAN_W] = lp_build_mul( &bld->bld_base.base, tmp7, tmp1);
2621 }
2622 }
2623
2624 /* dst.w = 1.0 */
2625 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W) && dims == 3) {
2626 emit_data->output[TGSI_CHAN_W] = bld->bld_base.base.one;
2627 }
2628 }
2629
2630 static void emit_prologue(struct lp_build_tgsi_context * bld_base)
2631 {
2632 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2633 struct gallivm_state * gallivm = bld_base->base.gallivm;
2634
2635 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
2636 LLVMValueRef array_size =
2637 lp_build_const_int32(gallivm,
2638 bld_base->info->file_max[TGSI_FILE_TEMPORARY] * 4 + 4);
2639 bld->temps_array = lp_build_array_alloca(gallivm,
2640 bld_base->base.vec_type, array_size,
2641 "temp_array");
2642 }
2643
2644 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2645 LLVMValueRef array_size =
2646 lp_build_const_int32(gallivm,
2647 bld_base->info->file_max[TGSI_FILE_OUTPUT] * 4 + 4);
2648 bld->outputs_array = lp_build_array_alloca(gallivm,
2649 bld_base->base.vec_type, array_size,
2650 "output_array");
2651 }
2652
2653 /* If we have indirect addressing in inputs we need to copy them into
2654 * our alloca array to be able to iterate over them */
2655 if (bld->indirect_files & (1 << TGSI_FILE_INPUT) && !bld->gs_iface) {
2656 unsigned index, chan;
2657 LLVMTypeRef vec_type = bld_base->base.vec_type;
2658 LLVMValueRef array_size = lp_build_const_int32(gallivm,
2659 bld_base->info->file_max[TGSI_FILE_INPUT]*4 + 4);
2660 bld->inputs_array = lp_build_array_alloca(gallivm,
2661 vec_type, array_size,
2662 "input_array");
2663
2664 assert(bld_base->info->num_inputs
2665 <= bld_base->info->file_max[TGSI_FILE_INPUT] + 1);
2666
2667 for (index = 0; index < bld_base->info->num_inputs; ++index) {
2668 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
2669 LLVMValueRef lindex =
2670 lp_build_const_int32(gallivm, index * 4 + chan);
2671 LLVMValueRef input_ptr =
2672 LLVMBuildGEP(gallivm->builder, bld->inputs_array,
2673 &lindex, 1, "");
2674 LLVMValueRef value = bld->inputs[index][chan];
2675 if (value)
2676 LLVMBuildStore(gallivm->builder, value, input_ptr);
2677 }
2678 }
2679 }
2680
2681 if (bld->gs_iface) {
2682 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
2683 bld->emitted_prims_vec_ptr =
2684 lp_build_alloca(gallivm,
2685 uint_bld->vec_type,
2686 "emitted_prims_ptr");
2687 bld->emitted_vertices_vec_ptr =
2688 lp_build_alloca(gallivm,
2689 uint_bld->vec_type,
2690 "emitted_vertices_ptr");
2691 bld->total_emitted_vertices_vec_ptr =
2692 lp_build_alloca(gallivm,
2693 uint_bld->vec_type,
2694 "total_emitted_vertices_ptr");
2695
2696 LLVMBuildStore(gallivm->builder, uint_bld->zero,
2697 bld->emitted_prims_vec_ptr);
2698 LLVMBuildStore(gallivm->builder, uint_bld->zero,
2699 bld->emitted_vertices_vec_ptr);
2700 LLVMBuildStore(gallivm->builder, uint_bld->zero,
2701 bld->total_emitted_vertices_vec_ptr);
2702 }
2703 }
2704
2705 static void emit_epilogue(struct lp_build_tgsi_context * bld_base)
2706 {
2707 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2708 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
2709
2710 if (0) {
2711 /* for debugging */
2712 emit_dump_temps(bld);
2713 }
2714
2715 /* If we have indirect addressing in outputs we need to copy our alloca array
2716 * to the outputs slots specified by the caller */
2717 if (bld->gs_iface) {
2718 LLVMValueRef total_emitted_vertices_vec;
2719 LLVMValueRef emitted_prims_vec;
2720 /* implicit end_primitives, needed in case there are any unflushed
2721 vertices in the cache */
2722 end_primitive(NULL, bld_base, NULL);
2723
2724 total_emitted_vertices_vec =
2725 LLVMBuildLoad(builder, bld->total_emitted_vertices_vec_ptr, "");
2726 emitted_prims_vec =
2727 LLVMBuildLoad(builder, bld->emitted_prims_vec_ptr, "");
2728
2729 bld->gs_iface->gs_epilogue(bld->gs_iface,
2730 &bld->bld_base,
2731 total_emitted_vertices_vec,
2732 emitted_prims_vec);
2733 } else {
2734 gather_outputs(bld);
2735 }
2736 }
2737
2738 void
2739 lp_build_tgsi_soa(struct gallivm_state *gallivm,
2740 const struct tgsi_token *tokens,
2741 struct lp_type type,
2742 struct lp_build_mask_context *mask,
2743 LLVMValueRef consts_ptr,
2744 const struct lp_bld_tgsi_system_values *system_values,
2745 const LLVMValueRef (*inputs)[TGSI_NUM_CHANNELS],
2746 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS],
2747 struct lp_build_sampler_soa *sampler,
2748 const struct tgsi_shader_info *info,
2749 const struct lp_build_tgsi_gs_iface *gs_iface)
2750 {
2751 struct lp_build_tgsi_soa_context bld;
2752
2753 struct lp_type res_type;
2754
2755 assert(type.length <= LP_MAX_VECTOR_LENGTH);
2756 memset(&res_type, 0, sizeof res_type);
2757 res_type.width = type.width;
2758 res_type.length = type.length;
2759 res_type.sign = 1;
2760
2761 /* Setup build context */
2762 memset(&bld, 0, sizeof bld);
2763 lp_build_context_init(&bld.bld_base.base, gallivm, type);
2764 lp_build_context_init(&bld.bld_base.uint_bld, gallivm, lp_uint_type(type));
2765 lp_build_context_init(&bld.bld_base.int_bld, gallivm, lp_int_type(type));
2766 lp_build_context_init(&bld.elem_bld, gallivm, lp_elem_type(type));
2767 bld.mask = mask;
2768 bld.inputs = inputs;
2769 bld.outputs = outputs;
2770 bld.consts_ptr = consts_ptr;
2771 bld.sampler = sampler;
2772 bld.bld_base.info = info;
2773 bld.indirect_files = info->indirect_files;
2774
2775 bld.bld_base.soa = TRUE;
2776 bld.bld_base.emit_fetch_funcs[TGSI_FILE_CONSTANT] = emit_fetch_constant;
2777 bld.bld_base.emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = emit_fetch_immediate;
2778 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_input;
2779 bld.bld_base.emit_fetch_funcs[TGSI_FILE_TEMPORARY] = emit_fetch_temporary;
2780 bld.bld_base.emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = emit_fetch_system_value;
2781 bld.bld_base.emit_store = emit_store;
2782
2783 bld.bld_base.emit_declaration = lp_emit_declaration_soa;
2784 bld.bld_base.emit_immediate = lp_emit_immediate_soa;
2785
2786 bld.bld_base.emit_prologue = emit_prologue;
2787 bld.bld_base.emit_epilogue = emit_epilogue;
2788
2789 /* Set opcode actions */
2790 lp_set_default_actions_cpu(&bld.bld_base);
2791
2792 bld.bld_base.op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
2793 bld.bld_base.op_actions[TGSI_OPCODE_BGNSUB].emit = bgnsub_emit;
2794 bld.bld_base.op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
2795 bld.bld_base.op_actions[TGSI_OPCODE_BREAKC].emit = breakc_emit;
2796 bld.bld_base.op_actions[TGSI_OPCODE_CAL].emit = cal_emit;
2797 bld.bld_base.op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
2798 bld.bld_base.op_actions[TGSI_OPCODE_DDX].emit = ddx_emit;
2799 bld.bld_base.op_actions[TGSI_OPCODE_DDY].emit = ddy_emit;
2800 bld.bld_base.op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
2801 bld.bld_base.op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
2802 bld.bld_base.op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
2803 bld.bld_base.op_actions[TGSI_OPCODE_ENDSUB].emit = endsub_emit;
2804 bld.bld_base.op_actions[TGSI_OPCODE_IF].emit = if_emit;
2805 bld.bld_base.op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
2806 bld.bld_base.op_actions[TGSI_OPCODE_KIL].emit = kil_emit;
2807 bld.bld_base.op_actions[TGSI_OPCODE_KILP].emit = kilp_emit;
2808 bld.bld_base.op_actions[TGSI_OPCODE_NRM].emit = nrm_emit;
2809 bld.bld_base.op_actions[TGSI_OPCODE_NRM4].emit = nrm_emit;
2810 bld.bld_base.op_actions[TGSI_OPCODE_RET].emit = ret_emit;
2811 bld.bld_base.op_actions[TGSI_OPCODE_TEX].emit = tex_emit;
2812 bld.bld_base.op_actions[TGSI_OPCODE_TXB].emit = txb_emit;
2813 bld.bld_base.op_actions[TGSI_OPCODE_TXD].emit = txd_emit;
2814 bld.bld_base.op_actions[TGSI_OPCODE_TXL].emit = txl_emit;
2815 bld.bld_base.op_actions[TGSI_OPCODE_TXP].emit = txp_emit;
2816 bld.bld_base.op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
2817 bld.bld_base.op_actions[TGSI_OPCODE_TXF].emit = txf_emit;
2818 /* DX10 sampling ops */
2819 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE].emit = sample_emit;
2820 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_B].emit = sample_b_emit;
2821 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C].emit = sample_c_emit;
2822 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C_LZ].emit = sample_c_lz_emit;
2823 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_D].emit = sample_d_emit;
2824 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_I].emit = sample_i_emit;
2825 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_L].emit = sample_l_emit;
2826 bld.bld_base.op_actions[TGSI_OPCODE_SVIEWINFO].emit = sviewinfo_emit;
2827
2828 if (gs_iface) {
2829 /* There's no specific value for this because it should always
2830 * be set, but apps using ext_geometry_shader4 quite often
2831 * were forgetting so we're using MAX_VERTEX_VARYING from
2832 * that spec even though we could debug_assert if it's not
2833 * set, but that's a lot uglier. */
2834 uint max_output_vertices = 32;
2835 uint i = 0;
2836 /* inputs are always indirect with gs */
2837 bld.indirect_files |= (1 << TGSI_FILE_INPUT);
2838 bld.gs_iface = gs_iface;
2839 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_gs_input;
2840 bld.bld_base.op_actions[TGSI_OPCODE_EMIT].emit = emit_vertex;
2841 bld.bld_base.op_actions[TGSI_OPCODE_ENDPRIM].emit = end_primitive;
2842
2843 for (i = 0; i < info->num_properties; ++i) {
2844 if (info->properties[i].name ==
2845 TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES) {
2846 max_output_vertices = info->properties[i].data[0];
2847 }
2848 }
2849 bld.max_output_vertices_vec =
2850 lp_build_const_int_vec(gallivm, bld.bld_base.uint_bld.type,
2851 max_output_vertices);
2852 }
2853
2854 lp_exec_mask_init(&bld.exec_mask, &bld.bld_base.int_bld);
2855
2856 bld.system_values = *system_values;
2857
2858 lp_build_tgsi_llvm(&bld.bld_base, tokens);
2859
2860 if (0) {
2861 LLVMBasicBlockRef block = LLVMGetInsertBlock(gallivm->builder);
2862 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2863 debug_printf("11111111111111111111111111111 \n");
2864 tgsi_dump(tokens, 0);
2865 lp_debug_dump_value(function);
2866 debug_printf("2222222222222222222222222222 \n");
2867 }
2868
2869 if (0) {
2870 LLVMModuleRef module = LLVMGetGlobalParent(
2871 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm->builder)));
2872 LLVMDumpModule(module);
2873
2874 }
2875 }