602f96e4037da7fa613954c4b2acca1a920d9289
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_scan.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_bitarit.h"
53 #include "lp_bld_gather.h"
54 #include "lp_bld_init.h"
55 #include "lp_bld_logic.h"
56 #include "lp_bld_swizzle.h"
57 #include "lp_bld_flow.h"
58 #include "lp_bld_quad.h"
59 #include "lp_bld_tgsi.h"
60 #include "lp_bld_limits.h"
61 #include "lp_bld_debug.h"
62 #include "lp_bld_printf.h"
63
64
65 #define FOR_EACH_CHANNEL( CHAN )\
66 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
67
68 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
69 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
70
71 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
72 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
73
74 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
75 FOR_EACH_CHANNEL( CHAN )\
76 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
77
78 #define CHAN_X 0
79 #define CHAN_Y 1
80 #define CHAN_Z 2
81 #define CHAN_W 3
82 #define NUM_CHANNELS 4
83
84 #define LP_MAX_INSTRUCTIONS 256
85
86
87 struct lp_exec_mask {
88 struct lp_build_context *bld;
89
90 boolean has_mask;
91
92 LLVMTypeRef int_vec_type;
93
94 LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
95 int cond_stack_size;
96 LLVMValueRef cond_mask;
97
98 LLVMBasicBlockRef loop_block;
99 LLVMValueRef cont_mask;
100 LLVMValueRef break_mask;
101 LLVMValueRef break_var;
102 struct {
103 LLVMBasicBlockRef loop_block;
104 LLVMValueRef cont_mask;
105 LLVMValueRef break_mask;
106 LLVMValueRef break_var;
107 } loop_stack[LP_MAX_TGSI_NESTING];
108 int loop_stack_size;
109
110 LLVMValueRef ret_mask;
111 struct {
112 int pc;
113 LLVMValueRef ret_mask;
114 } call_stack[LP_MAX_TGSI_NESTING];
115 int call_stack_size;
116
117 LLVMValueRef exec_mask;
118 };
119
120 struct lp_build_tgsi_soa_context
121 {
122 struct lp_build_context base;
123
124 /* Builder for vector integer masks and indices */
125 struct lp_build_context uint_bld;
126
127 /* Builder for scalar elements of shader's data type (float) */
128 struct lp_build_context elem_bld;
129
130 LLVMValueRef consts_ptr;
131 const LLVMValueRef *pos;
132 const LLVMValueRef (*inputs)[NUM_CHANNELS];
133 LLVMValueRef (*outputs)[NUM_CHANNELS];
134
135 const struct lp_build_sampler_soa *sampler;
136
137 LLVMValueRef immediates[LP_MAX_TGSI_IMMEDIATES][NUM_CHANNELS];
138 LLVMValueRef temps[LP_MAX_TGSI_TEMPS][NUM_CHANNELS];
139 LLVMValueRef addr[LP_MAX_TGSI_ADDRS][NUM_CHANNELS];
140 LLVMValueRef preds[LP_MAX_TGSI_PREDS][NUM_CHANNELS];
141
142 /* We allocate/use this array of temps if (1 << TGSI_FILE_TEMPORARY) is
143 * set in the indirect_files field.
144 * The temps[] array above is unused then.
145 */
146 LLVMValueRef temps_array;
147
148 /* We allocate/use this array of output if (1 << TGSI_FILE_OUTPUT) is
149 * set in the indirect_files field.
150 * The outputs[] array above is unused then.
151 */
152 LLVMValueRef outputs_array;
153
154 /* We allocate/use this array of inputs if (1 << TGSI_FILE_INPUT) is
155 * set in the indirect_files field.
156 * The inputs[] array above is unused then.
157 */
158 LLVMValueRef inputs_array;
159
160 LLVMValueRef system_values_array;
161
162 const struct tgsi_shader_info *info;
163 /** bitmask indicating which register files are accessed indirectly */
164 unsigned indirect_files;
165
166 struct lp_build_mask_context *mask;
167 struct lp_exec_mask exec_mask;
168
169 struct tgsi_full_instruction *instructions;
170 uint max_instructions;
171 };
172
173 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
174 {
175 mask->bld = bld;
176 mask->has_mask = FALSE;
177 mask->cond_stack_size = 0;
178 mask->loop_stack_size = 0;
179 mask->call_stack_size = 0;
180
181 mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
182 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
183 LLVMConstAllOnes(mask->int_vec_type);
184 }
185
186 static void lp_exec_mask_update(struct lp_exec_mask *mask)
187 {
188 LLVMBuilderRef builder = mask->bld->gallivm->builder;
189
190 if (mask->loop_stack_size) {
191 /*for loops we need to update the entire mask at runtime */
192 LLVMValueRef tmp;
193 assert(mask->break_mask);
194 tmp = LLVMBuildAnd(builder,
195 mask->cont_mask,
196 mask->break_mask,
197 "maskcb");
198 mask->exec_mask = LLVMBuildAnd(builder,
199 mask->cond_mask,
200 tmp,
201 "maskfull");
202 } else
203 mask->exec_mask = mask->cond_mask;
204
205 if (mask->call_stack_size) {
206 mask->exec_mask = LLVMBuildAnd(builder,
207 mask->exec_mask,
208 mask->ret_mask,
209 "callmask");
210 }
211
212 mask->has_mask = (mask->cond_stack_size > 0 ||
213 mask->loop_stack_size > 0 ||
214 mask->call_stack_size > 0);
215 }
216
217 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
218 LLVMValueRef val)
219 {
220 LLVMBuilderRef builder = mask->bld->gallivm->builder;
221
222 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
223 if (mask->cond_stack_size == 0) {
224 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
225 }
226 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
227 assert(LLVMTypeOf(val) == mask->int_vec_type);
228 mask->cond_mask = LLVMBuildAnd(builder,
229 mask->cond_mask,
230 val,
231 "");
232 lp_exec_mask_update(mask);
233 }
234
235 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
236 {
237 LLVMBuilderRef builder = mask->bld->gallivm->builder;
238 LLVMValueRef prev_mask;
239 LLVMValueRef inv_mask;
240
241 assert(mask->cond_stack_size);
242 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
243 if (mask->cond_stack_size == 1) {
244 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
245 }
246
247 inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
248
249 mask->cond_mask = LLVMBuildAnd(builder,
250 inv_mask,
251 prev_mask, "");
252 lp_exec_mask_update(mask);
253 }
254
255 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
256 {
257 assert(mask->cond_stack_size);
258 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
259 lp_exec_mask_update(mask);
260 }
261
262 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
263 {
264 LLVMBuilderRef builder = mask->bld->gallivm->builder;
265
266 if (mask->loop_stack_size == 0) {
267 assert(mask->loop_block == NULL);
268 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
269 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
270 assert(mask->break_var == NULL);
271 }
272
273 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
274
275 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
276 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
277 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
278 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
279 ++mask->loop_stack_size;
280
281 mask->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
282 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
283
284 mask->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
285 LLVMBuildBr(builder, mask->loop_block);
286 LLVMPositionBuilderAtEnd(builder, mask->loop_block);
287
288 mask->break_mask = LLVMBuildLoad(builder, mask->break_var, "");
289
290 lp_exec_mask_update(mask);
291 }
292
293 static void lp_exec_break(struct lp_exec_mask *mask)
294 {
295 LLVMBuilderRef builder = mask->bld->gallivm->builder;
296 LLVMValueRef exec_mask = LLVMBuildNot(builder,
297 mask->exec_mask,
298 "break");
299
300 mask->break_mask = LLVMBuildAnd(builder,
301 mask->break_mask,
302 exec_mask, "break_full");
303
304 lp_exec_mask_update(mask);
305 }
306
307 static void lp_exec_continue(struct lp_exec_mask *mask)
308 {
309 LLVMBuilderRef builder = mask->bld->gallivm->builder;
310 LLVMValueRef exec_mask = LLVMBuildNot(builder,
311 mask->exec_mask,
312 "");
313
314 mask->cont_mask = LLVMBuildAnd(builder,
315 mask->cont_mask,
316 exec_mask, "");
317
318 lp_exec_mask_update(mask);
319 }
320
321
322 static void lp_exec_endloop(struct gallivm_state *gallivm,
323 struct lp_exec_mask *mask)
324 {
325 LLVMBuilderRef builder = mask->bld->gallivm->builder;
326 LLVMBasicBlockRef endloop;
327 LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
328 mask->bld->type.width *
329 mask->bld->type.length);
330 LLVMValueRef i1cond;
331
332 assert(mask->break_mask);
333
334 /*
335 * Restore the cont_mask, but don't pop
336 */
337 assert(mask->loop_stack_size);
338 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
339 lp_exec_mask_update(mask);
340
341 /*
342 * Unlike the continue mask, the break_mask must be preserved across loop
343 * iterations
344 */
345 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
346
347 /* i1cond = (mask == 0) */
348 i1cond = LLVMBuildICmp(
349 builder,
350 LLVMIntNE,
351 LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
352 LLVMConstNull(reg_type), "");
353
354 endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
355
356 LLVMBuildCondBr(builder,
357 i1cond, mask->loop_block, endloop);
358
359 LLVMPositionBuilderAtEnd(builder, endloop);
360
361 assert(mask->loop_stack_size);
362 --mask->loop_stack_size;
363 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
364 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
365 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
366 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
367
368 lp_exec_mask_update(mask);
369 }
370
371 /* stores val into an address pointed to by dst.
372 * mask->exec_mask is used to figure out which bits of val
373 * should be stored into the address
374 * (0 means don't store this bit, 1 means do store).
375 */
376 static void lp_exec_mask_store(struct lp_exec_mask *mask,
377 LLVMValueRef pred,
378 LLVMValueRef val,
379 LLVMValueRef dst)
380 {
381 LLVMBuilderRef builder = mask->bld->gallivm->builder;
382
383 /* Mix the predicate and execution mask */
384 if (mask->has_mask) {
385 if (pred) {
386 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
387 } else {
388 pred = mask->exec_mask;
389 }
390 }
391
392 if (pred) {
393 LLVMValueRef real_val, dst_val;
394
395 dst_val = LLVMBuildLoad(builder, dst, "");
396 real_val = lp_build_select(mask->bld,
397 pred,
398 val, dst_val);
399
400 LLVMBuildStore(builder, real_val, dst);
401 } else
402 LLVMBuildStore(builder, val, dst);
403 }
404
405 static void lp_exec_mask_call(struct lp_exec_mask *mask,
406 int func,
407 int *pc)
408 {
409 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
410 mask->call_stack[mask->call_stack_size].pc = *pc;
411 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
412 mask->call_stack_size++;
413 *pc = func;
414 }
415
416 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
417 {
418 LLVMBuilderRef builder = mask->bld->gallivm->builder;
419 LLVMValueRef exec_mask;
420
421 if (mask->call_stack_size == 0) {
422 /* returning from main() */
423 *pc = -1;
424 return;
425 }
426 exec_mask = LLVMBuildNot(builder,
427 mask->exec_mask,
428 "ret");
429
430 mask->ret_mask = LLVMBuildAnd(builder,
431 mask->ret_mask,
432 exec_mask, "ret_full");
433
434 lp_exec_mask_update(mask);
435 }
436
437 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
438 {
439 }
440
441 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
442 {
443 assert(mask->call_stack_size);
444 mask->call_stack_size--;
445 *pc = mask->call_stack[mask->call_stack_size].pc;
446 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
447 lp_exec_mask_update(mask);
448 }
449
450
451 /**
452 * Return pointer to a temporary register channel (src or dest).
453 * Note that indirect addressing cannot be handled here.
454 * \param index which temporary register
455 * \param chan which channel of the temp register.
456 */
457 static LLVMValueRef
458 get_temp_ptr(struct lp_build_tgsi_soa_context *bld,
459 unsigned index,
460 unsigned chan)
461 {
462 LLVMBuilderRef builder = bld->base.gallivm->builder;
463 assert(chan < 4);
464 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
465 LLVMValueRef lindex = lp_build_const_int32(bld->base.gallivm, index * 4 + chan);
466 return LLVMBuildGEP(builder, bld->temps_array, &lindex, 1, "");
467 }
468 else {
469 return bld->temps[index][chan];
470 }
471 }
472
473 /**
474 * Return pointer to a output register channel (src or dest).
475 * Note that indirect addressing cannot be handled here.
476 * \param index which output register
477 * \param chan which channel of the output register.
478 */
479 static LLVMValueRef
480 get_output_ptr(struct lp_build_tgsi_soa_context *bld,
481 unsigned index,
482 unsigned chan)
483 {
484 LLVMBuilderRef builder = bld->base.gallivm->builder;
485 assert(chan < 4);
486 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
487 LLVMValueRef lindex = lp_build_const_int32(bld->base.gallivm,
488 index * 4 + chan);
489 return LLVMBuildGEP(builder, bld->outputs_array, &lindex, 1, "");
490 }
491 else {
492 return bld->outputs[index][chan];
493 }
494 }
495
496 /**
497 * Gather vector.
498 * XXX the lp_build_gather() function should be capable of doing this
499 * with a little work.
500 */
501 static LLVMValueRef
502 build_gather(struct lp_build_tgsi_soa_context *bld,
503 LLVMValueRef base_ptr,
504 LLVMValueRef indexes)
505 {
506 LLVMBuilderRef builder = bld->base.gallivm->builder;
507 LLVMValueRef res = bld->base.undef;
508 unsigned i;
509
510 /*
511 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
512 */
513 for (i = 0; i < bld->base.type.length; i++) {
514 LLVMValueRef ii = lp_build_const_int32(bld->base.gallivm, i);
515 LLVMValueRef index = LLVMBuildExtractElement(builder,
516 indexes, ii, "");
517 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr,
518 &index, 1, "gather_ptr");
519 LLVMValueRef scalar = LLVMBuildLoad(builder, scalar_ptr, "");
520
521 res = LLVMBuildInsertElement(builder, res, scalar, ii, "");
522 }
523
524 return res;
525 }
526
527
528 /**
529 * Scatter/store vector.
530 */
531 static void
532 emit_mask_scatter(struct lp_build_tgsi_soa_context *bld,
533 LLVMValueRef base_ptr,
534 LLVMValueRef indexes,
535 LLVMValueRef values,
536 struct lp_exec_mask *mask,
537 LLVMValueRef pred)
538 {
539 struct gallivm_state *gallivm = bld->base.gallivm;
540 LLVMBuilderRef builder = gallivm->builder;
541 unsigned i;
542
543 /* Mix the predicate and execution mask */
544 if (mask->has_mask) {
545 if (pred) {
546 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
547 }
548 else {
549 pred = mask->exec_mask;
550 }
551 }
552
553 /*
554 * Loop over elements of index_vec, store scalar value.
555 */
556 for (i = 0; i < bld->base.type.length; i++) {
557 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
558 LLVMValueRef index = LLVMBuildExtractElement(builder, indexes, ii, "");
559 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr, &index, 1, "scatter_ptr");
560 LLVMValueRef val = LLVMBuildExtractElement(builder, values, ii, "scatter_val");
561 LLVMValueRef scalar_pred = pred ?
562 LLVMBuildExtractElement(builder, pred, ii, "scatter_pred") : NULL;
563
564 if (0)
565 lp_build_printf(gallivm, "scatter %d: val %f at %d %p\n",
566 ii, val, index, scalar_ptr);
567
568 if (scalar_pred) {
569 LLVMValueRef real_val, dst_val;
570 dst_val = LLVMBuildLoad(builder, scalar_ptr, "");
571 real_val = lp_build_select(&bld->elem_bld, scalar_pred, val, dst_val);
572 LLVMBuildStore(builder, real_val, scalar_ptr);
573 }
574 else {
575 LLVMBuildStore(builder, val, scalar_ptr);
576 }
577 }
578 }
579
580
581 /**
582 * Read the current value of the ADDR register, convert the floats to
583 * ints, add the base index and return the vector of offsets.
584 * The offsets will be used to index into the constant buffer or
585 * temporary register file.
586 */
587 static LLVMValueRef
588 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
589 unsigned reg_file, unsigned reg_index,
590 const struct tgsi_src_register *indirect_reg)
591 {
592 LLVMBuilderRef builder = bld->base.gallivm->builder;
593 struct lp_build_context *uint_bld = &bld->uint_bld;
594 /* always use X component of address register */
595 unsigned swizzle = indirect_reg->SwizzleX;
596 LLVMValueRef base;
597 LLVMValueRef rel;
598 LLVMValueRef max_index;
599 LLVMValueRef index;
600
601 assert(bld->indirect_files & (1 << reg_file));
602
603 base = lp_build_const_int_vec(bld->base.gallivm, uint_bld->type, reg_index);
604
605 assert(swizzle < 4);
606 rel = LLVMBuildLoad(builder,
607 bld->addr[indirect_reg->Index][swizzle],
608 "load addr reg");
609
610 /* for indexing we want integers */
611 rel = LLVMBuildFPToSI(builder,
612 rel,
613 uint_bld->vec_type, "");
614
615 index = lp_build_add(uint_bld, base, rel);
616
617 max_index = lp_build_const_int_vec(bld->base.gallivm,
618 uint_bld->type,
619 bld->info->file_max[reg_file]);
620
621 assert(!uint_bld->type.sign);
622 index = lp_build_min(uint_bld, index, max_index);
623
624 return index;
625 }
626
627
628 /**
629 * Register fetch.
630 */
631 static LLVMValueRef
632 emit_fetch(
633 struct lp_build_tgsi_soa_context *bld,
634 const struct tgsi_full_instruction *inst,
635 unsigned src_op,
636 const unsigned chan_index )
637 {
638 struct gallivm_state *gallivm = bld->base.gallivm;
639 LLVMBuilderRef builder = gallivm->builder;
640 struct lp_build_context *uint_bld = &bld->uint_bld;
641 const struct tgsi_full_src_register *reg = &inst->Src[src_op];
642 const unsigned swizzle =
643 tgsi_util_get_full_src_register_swizzle(reg, chan_index);
644 LLVMValueRef res;
645 LLVMValueRef indirect_index = NULL;
646
647 if (swizzle > 3) {
648 assert(0 && "invalid swizzle in emit_fetch()");
649 return bld->base.undef;
650 }
651
652 if (reg->Register.Indirect) {
653 indirect_index = get_indirect_index(bld,
654 reg->Register.File,
655 reg->Register.Index,
656 &reg->Indirect);
657 } else {
658 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
659 }
660
661 switch (reg->Register.File) {
662 case TGSI_FILE_CONSTANT:
663 if (reg->Register.Indirect) {
664 LLVMValueRef swizzle_vec =
665 lp_build_const_int_vec(bld->base.gallivm, uint_bld->type, swizzle);
666 LLVMValueRef index_vec; /* index into the const buffer */
667
668 /* index_vec = indirect_index * 4 + swizzle */
669 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
670 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
671
672 /* Gather values from the constant buffer */
673 res = build_gather(bld, bld->consts_ptr, index_vec);
674 }
675 else {
676 LLVMValueRef index; /* index into the const buffer */
677 LLVMValueRef scalar, scalar_ptr;
678
679 index = lp_build_const_int32(gallivm, reg->Register.Index*4 + swizzle);
680
681 scalar_ptr = LLVMBuildGEP(builder, bld->consts_ptr,
682 &index, 1, "");
683 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
684
685 res = lp_build_broadcast_scalar(&bld->base, scalar);
686 }
687 break;
688
689 case TGSI_FILE_IMMEDIATE:
690 res = bld->immediates[reg->Register.Index][swizzle];
691 assert(res);
692 break;
693
694 case TGSI_FILE_INPUT:
695 if (reg->Register.Indirect) {
696 LLVMValueRef swizzle_vec =
697 lp_build_const_int_vec(gallivm, uint_bld->type, swizzle);
698 LLVMValueRef length_vec =
699 lp_build_const_int_vec(gallivm, uint_bld->type, bld->base.type.length);
700 LLVMValueRef index_vec; /* index into the const buffer */
701 LLVMValueRef inputs_array;
702 LLVMTypeRef float4_ptr_type;
703
704 /* index_vec = (indirect_index * 4 + swizzle) * length */
705 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
706 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
707 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
708
709 /* cast inputs_array pointer to float* */
710 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
711 inputs_array = LLVMBuildBitCast(builder, bld->inputs_array,
712 float4_ptr_type, "");
713
714 /* Gather values from the temporary register array */
715 res = build_gather(bld, inputs_array, index_vec);
716 } else {
717 if (bld->indirect_files & (1 << TGSI_FILE_INPUT)) {
718 LLVMValueRef lindex = lp_build_const_int32(gallivm,
719 reg->Register.Index * 4 + swizzle);
720 LLVMValueRef input_ptr = LLVMBuildGEP(builder,
721 bld->inputs_array, &lindex, 1, "");
722 res = LLVMBuildLoad(builder, input_ptr, "");
723 }
724 else {
725 res = bld->inputs[reg->Register.Index][swizzle];
726 }
727 }
728 assert(res);
729 break;
730
731 case TGSI_FILE_TEMPORARY:
732 if (reg->Register.Indirect) {
733 LLVMValueRef swizzle_vec =
734 lp_build_const_int_vec(bld->base.gallivm, uint_bld->type, swizzle);
735 LLVMValueRef length_vec =
736 lp_build_const_int_vec(bld->base.gallivm, uint_bld->type,
737 bld->base.type.length);
738 LLVMValueRef index_vec; /* index into the const buffer */
739 LLVMValueRef temps_array;
740 LLVMTypeRef float4_ptr_type;
741
742 /* index_vec = (indirect_index * 4 + swizzle) * length */
743 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
744 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
745 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
746
747 /* cast temps_array pointer to float* */
748 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(bld->base.gallivm->context), 0);
749 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
750 float4_ptr_type, "");
751
752 /* Gather values from the temporary register array */
753 res = build_gather(bld, temps_array, index_vec);
754 }
755 else {
756 LLVMValueRef temp_ptr;
757 temp_ptr = get_temp_ptr(bld, reg->Register.Index, swizzle);
758 res = LLVMBuildLoad(builder, temp_ptr, "");
759 if (!res)
760 return bld->base.undef;
761 }
762 break;
763
764 case TGSI_FILE_SYSTEM_VALUE:
765 assert(!reg->Register.Indirect);
766 {
767 LLVMValueRef index; /* index into the system value array */
768 LLVMValueRef scalar, scalar_ptr;
769
770 index = lp_build_const_int32(gallivm,
771 reg->Register.Index * 4 + swizzle);
772
773 scalar_ptr = LLVMBuildGEP(builder, bld->system_values_array,
774 &index, 1, "");
775 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
776
777 res = lp_build_broadcast_scalar(&bld->base, scalar);
778 }
779 break;
780
781 default:
782 assert(0 && "invalid src register in emit_fetch()");
783 return bld->base.undef;
784 }
785
786 switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
787 case TGSI_UTIL_SIGN_CLEAR:
788 res = lp_build_abs( &bld->base, res );
789 break;
790
791 case TGSI_UTIL_SIGN_SET:
792 res = lp_build_abs( &bld->base, res );
793 /* fall through */
794 case TGSI_UTIL_SIGN_TOGGLE:
795 res = lp_build_negate( &bld->base, res );
796 break;
797
798 case TGSI_UTIL_SIGN_KEEP:
799 break;
800 }
801
802 return res;
803 }
804
805
806 /**
807 * Register fetch with derivatives.
808 */
809 static void
810 emit_fetch_deriv(
811 struct lp_build_tgsi_soa_context *bld,
812 const struct tgsi_full_instruction *inst,
813 unsigned index,
814 const unsigned chan_index,
815 LLVMValueRef *res,
816 LLVMValueRef *ddx,
817 LLVMValueRef *ddy)
818 {
819 LLVMValueRef src;
820
821 src = emit_fetch(bld, inst, index, chan_index);
822
823 if(res)
824 *res = src;
825
826 /* TODO: use interpolation coeffs for inputs */
827
828 if(ddx)
829 *ddx = lp_build_ddx(&bld->base, src);
830
831 if(ddy)
832 *ddy = lp_build_ddy(&bld->base, src);
833 }
834
835
836 /**
837 * Predicate.
838 */
839 static void
840 emit_fetch_predicate(
841 struct lp_build_tgsi_soa_context *bld,
842 const struct tgsi_full_instruction *inst,
843 LLVMValueRef *pred)
844 {
845 LLVMBuilderRef builder = bld->base.gallivm->builder;
846 unsigned index;
847 unsigned char swizzles[4];
848 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
849 LLVMValueRef value;
850 unsigned chan;
851
852 if (!inst->Instruction.Predicate) {
853 FOR_EACH_CHANNEL( chan ) {
854 pred[chan] = NULL;
855 }
856 return;
857 }
858
859 swizzles[0] = inst->Predicate.SwizzleX;
860 swizzles[1] = inst->Predicate.SwizzleY;
861 swizzles[2] = inst->Predicate.SwizzleZ;
862 swizzles[3] = inst->Predicate.SwizzleW;
863
864 index = inst->Predicate.Index;
865 assert(index < LP_MAX_TGSI_PREDS);
866
867 FOR_EACH_CHANNEL( chan ) {
868 unsigned swizzle = swizzles[chan];
869
870 /*
871 * Only fetch the predicate register channels that are actually listed
872 * in the swizzles
873 */
874 if (!unswizzled[swizzle]) {
875 value = LLVMBuildLoad(builder,
876 bld->preds[index][swizzle], "");
877
878 /*
879 * Convert the value to an integer mask.
880 *
881 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
882 * is needlessly causing two comparisons due to storing the intermediate
883 * result as float vector instead of an integer mask vector.
884 */
885 value = lp_build_compare(bld->base.gallivm,
886 bld->base.type,
887 PIPE_FUNC_NOTEQUAL,
888 value,
889 bld->base.zero);
890 if (inst->Predicate.Negate) {
891 value = LLVMBuildNot(builder, value, "");
892 }
893
894 unswizzled[swizzle] = value;
895 } else {
896 value = unswizzled[swizzle];
897 }
898
899 pred[chan] = value;
900 }
901 }
902
903
904 /**
905 * Register store.
906 */
907 static void
908 emit_store(
909 struct lp_build_tgsi_soa_context *bld,
910 const struct tgsi_full_instruction *inst,
911 unsigned index,
912 unsigned chan_index,
913 LLVMValueRef pred,
914 LLVMValueRef value)
915 {
916 struct gallivm_state *gallivm = bld->base.gallivm;
917 LLVMBuilderRef builder = gallivm->builder;
918 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
919 struct lp_build_context *uint_bld = &bld->uint_bld;
920 LLVMValueRef indirect_index = NULL;
921
922 switch( inst->Instruction.Saturate ) {
923 case TGSI_SAT_NONE:
924 break;
925
926 case TGSI_SAT_ZERO_ONE:
927 value = lp_build_max(&bld->base, value, bld->base.zero);
928 value = lp_build_min(&bld->base, value, bld->base.one);
929 break;
930
931 case TGSI_SAT_MINUS_PLUS_ONE:
932 value = lp_build_max(&bld->base, value, lp_build_const_vec(bld->base.gallivm, bld->base.type, -1.0));
933 value = lp_build_min(&bld->base, value, bld->base.one);
934 break;
935
936 default:
937 assert(0);
938 }
939
940 if (reg->Register.Indirect) {
941 indirect_index = get_indirect_index(bld,
942 reg->Register.File,
943 reg->Register.Index,
944 &reg->Indirect);
945 } else {
946 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
947 }
948
949 switch( reg->Register.File ) {
950 case TGSI_FILE_OUTPUT:
951 if (reg->Register.Indirect) {
952 LLVMValueRef chan_vec =
953 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
954 LLVMValueRef length_vec =
955 lp_build_const_int_vec(gallivm, uint_bld->type, bld->base.type.length);
956 LLVMValueRef index_vec; /* indexes into the temp registers */
957 LLVMValueRef outputs_array;
958 LLVMValueRef pixel_offsets;
959 LLVMTypeRef float_ptr_type;
960 int i;
961
962 /* build pixel offset vector: {0, 1, 2, 3, ...} */
963 pixel_offsets = uint_bld->undef;
964 for (i = 0; i < bld->base.type.length; i++) {
965 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
966 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
967 ii, ii, "");
968 }
969
970 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
971 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
972 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
973 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
974 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
975
976 float_ptr_type =
977 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
978 outputs_array = LLVMBuildBitCast(builder, bld->outputs_array,
979 float_ptr_type, "");
980
981 /* Scatter store values into temp registers */
982 emit_mask_scatter(bld, outputs_array, index_vec, value,
983 &bld->exec_mask, pred);
984 }
985 else {
986 LLVMValueRef out_ptr = get_output_ptr(bld, reg->Register.Index,
987 chan_index);
988 lp_exec_mask_store(&bld->exec_mask, pred, value, out_ptr);
989 }
990 break;
991
992 case TGSI_FILE_TEMPORARY:
993 if (reg->Register.Indirect) {
994 LLVMValueRef chan_vec =
995 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
996 LLVMValueRef length_vec =
997 lp_build_const_int_vec(gallivm, uint_bld->type,
998 bld->base.type.length);
999 LLVMValueRef index_vec; /* indexes into the temp registers */
1000 LLVMValueRef temps_array;
1001 LLVMValueRef pixel_offsets;
1002 LLVMTypeRef float_ptr_type;
1003 int i;
1004
1005 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1006 pixel_offsets = uint_bld->undef;
1007 for (i = 0; i < bld->base.type.length; i++) {
1008 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1009 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
1010 ii, ii, "");
1011 }
1012
1013 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1014 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1015 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1016 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1017 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1018
1019 float_ptr_type =
1020 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1021 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
1022 float_ptr_type, "");
1023
1024 /* Scatter store values into temp registers */
1025 emit_mask_scatter(bld, temps_array, index_vec, value,
1026 &bld->exec_mask, pred);
1027 }
1028 else {
1029 LLVMValueRef temp_ptr = get_temp_ptr(bld, reg->Register.Index,
1030 chan_index);
1031 lp_exec_mask_store(&bld->exec_mask, pred, value, temp_ptr);
1032 }
1033 break;
1034
1035 case TGSI_FILE_ADDRESS:
1036 lp_exec_mask_store(&bld->exec_mask, pred, value,
1037 bld->addr[reg->Register.Index][chan_index]);
1038 break;
1039
1040 case TGSI_FILE_PREDICATE:
1041 lp_exec_mask_store(&bld->exec_mask, pred, value,
1042 bld->preds[reg->Register.Index][chan_index]);
1043 break;
1044
1045 default:
1046 assert( 0 );
1047 }
1048 }
1049
1050
1051 /**
1052 * High-level instruction translators.
1053 */
1054
1055 static void
1056 emit_tex( struct lp_build_tgsi_soa_context *bld,
1057 const struct tgsi_full_instruction *inst,
1058 enum lp_build_tex_modifier modifier,
1059 LLVMValueRef *texel)
1060 {
1061 LLVMBuilderRef builder = bld->base.gallivm->builder;
1062 unsigned unit;
1063 LLVMValueRef lod_bias, explicit_lod;
1064 LLVMValueRef oow = NULL;
1065 LLVMValueRef coords[3];
1066 LLVMValueRef ddx[3];
1067 LLVMValueRef ddy[3];
1068 unsigned num_coords;
1069 unsigned i;
1070
1071 if (!bld->sampler) {
1072 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1073 for (i = 0; i < 4; i++) {
1074 texel[i] = bld->base.undef;
1075 }
1076 return;
1077 }
1078
1079 switch (inst->Texture.Texture) {
1080 case TGSI_TEXTURE_1D:
1081 num_coords = 1;
1082 break;
1083 case TGSI_TEXTURE_1D_ARRAY:
1084 case TGSI_TEXTURE_2D:
1085 case TGSI_TEXTURE_RECT:
1086 num_coords = 2;
1087 break;
1088 case TGSI_TEXTURE_SHADOW1D:
1089 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1090 case TGSI_TEXTURE_SHADOW2D:
1091 case TGSI_TEXTURE_SHADOWRECT:
1092 case TGSI_TEXTURE_2D_ARRAY:
1093 case TGSI_TEXTURE_3D:
1094 case TGSI_TEXTURE_CUBE:
1095 num_coords = 3;
1096 break;
1097 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1098 num_coords = 4;
1099 break;
1100 default:
1101 assert(0);
1102 return;
1103 }
1104
1105 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
1106 lod_bias = emit_fetch( bld, inst, 0, 3 );
1107 explicit_lod = NULL;
1108 }
1109 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
1110 lod_bias = NULL;
1111 explicit_lod = emit_fetch( bld, inst, 0, 3 );
1112 }
1113 else {
1114 lod_bias = NULL;
1115 explicit_lod = NULL;
1116 }
1117
1118 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
1119 oow = emit_fetch( bld, inst, 0, 3 );
1120 oow = lp_build_rcp(&bld->base, oow);
1121 }
1122
1123 for (i = 0; i < num_coords; i++) {
1124 coords[i] = emit_fetch( bld, inst, 0, i );
1125 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
1126 coords[i] = lp_build_mul(&bld->base, coords[i], oow);
1127 }
1128 for (i = num_coords; i < 3; i++) {
1129 coords[i] = bld->base.undef;
1130 }
1131
1132 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
1133 LLVMValueRef index0 = lp_build_const_int32(bld->base.gallivm, 0);
1134 for (i = 0; i < num_coords; i++) {
1135 LLVMValueRef src1 = emit_fetch( bld, inst, 1, i );
1136 LLVMValueRef src2 = emit_fetch( bld, inst, 2, i );
1137 ddx[i] = LLVMBuildExtractElement(builder, src1, index0, "");
1138 ddy[i] = LLVMBuildExtractElement(builder, src2, index0, "");
1139 }
1140 unit = inst->Src[3].Register.Index;
1141 } else {
1142 for (i = 0; i < num_coords; i++) {
1143 ddx[i] = lp_build_scalar_ddx( &bld->base, coords[i] );
1144 ddy[i] = lp_build_scalar_ddy( &bld->base, coords[i] );
1145 }
1146 unit = inst->Src[1].Register.Index;
1147 }
1148 for (i = num_coords; i < 3; i++) {
1149 ddx[i] = LLVMGetUndef(bld->base.elem_type);
1150 ddy[i] = LLVMGetUndef(bld->base.elem_type);
1151 }
1152
1153 bld->sampler->emit_fetch_texel(bld->sampler,
1154 bld->base.gallivm,
1155 bld->base.type,
1156 unit, num_coords, coords,
1157 ddx, ddy,
1158 lod_bias, explicit_lod,
1159 texel);
1160 }
1161
1162 static boolean
1163 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
1164 int pc)
1165 {
1166 int i;
1167
1168 for (i = 0; i < 5; i++) {
1169 unsigned opcode;
1170
1171 if (pc + i >= bld->info->num_instructions)
1172 return TRUE;
1173
1174 opcode = bld->instructions[pc + i].Instruction.Opcode;
1175
1176 if (opcode == TGSI_OPCODE_END)
1177 return TRUE;
1178
1179 if (opcode == TGSI_OPCODE_TEX ||
1180 opcode == TGSI_OPCODE_TXP ||
1181 opcode == TGSI_OPCODE_TXD ||
1182 opcode == TGSI_OPCODE_TXB ||
1183 opcode == TGSI_OPCODE_TXL ||
1184 opcode == TGSI_OPCODE_TXF ||
1185 opcode == TGSI_OPCODE_TXQ ||
1186 opcode == TGSI_OPCODE_CAL ||
1187 opcode == TGSI_OPCODE_CALLNZ ||
1188 opcode == TGSI_OPCODE_IF ||
1189 opcode == TGSI_OPCODE_IFC ||
1190 opcode == TGSI_OPCODE_BGNLOOP ||
1191 opcode == TGSI_OPCODE_SWITCH)
1192 return FALSE;
1193 }
1194
1195 return TRUE;
1196 }
1197
1198
1199
1200 /**
1201 * Kill fragment if any of the src register values are negative.
1202 */
1203 static void
1204 emit_kil(
1205 struct lp_build_tgsi_soa_context *bld,
1206 const struct tgsi_full_instruction *inst,
1207 int pc)
1208 {
1209 LLVMBuilderRef builder = bld->base.gallivm->builder;
1210 const struct tgsi_full_src_register *reg = &inst->Src[0];
1211 LLVMValueRef terms[NUM_CHANNELS];
1212 LLVMValueRef mask;
1213 unsigned chan_index;
1214
1215 memset(&terms, 0, sizeof terms);
1216
1217 FOR_EACH_CHANNEL( chan_index ) {
1218 unsigned swizzle;
1219
1220 /* Unswizzle channel */
1221 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
1222
1223 /* Check if the component has not been already tested. */
1224 assert(swizzle < NUM_CHANNELS);
1225 if( !terms[swizzle] )
1226 /* TODO: change the comparison operator instead of setting the sign */
1227 terms[swizzle] = emit_fetch(bld, inst, 0, chan_index );
1228 }
1229
1230 mask = NULL;
1231 FOR_EACH_CHANNEL( chan_index ) {
1232 if(terms[chan_index]) {
1233 LLVMValueRef chan_mask;
1234
1235 /*
1236 * If term < 0 then mask = 0 else mask = ~0.
1237 */
1238 chan_mask = lp_build_cmp(&bld->base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->base.zero);
1239
1240 if(mask)
1241 mask = LLVMBuildAnd(builder, mask, chan_mask, "");
1242 else
1243 mask = chan_mask;
1244 }
1245 }
1246
1247 if(mask) {
1248 lp_build_mask_update(bld->mask, mask);
1249
1250 if (!near_end_of_shader(bld, pc))
1251 lp_build_mask_check(bld->mask);
1252 }
1253 }
1254
1255
1256 /**
1257 * Predicated fragment kill.
1258 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1259 * The only predication is the execution mask which will apply if
1260 * we're inside a loop or conditional.
1261 */
1262 static void
1263 emit_kilp(struct lp_build_tgsi_soa_context *bld,
1264 const struct tgsi_full_instruction *inst,
1265 int pc)
1266 {
1267 LLVMBuilderRef builder = bld->base.gallivm->builder;
1268 LLVMValueRef mask;
1269
1270 /* For those channels which are "alive", disable fragment shader
1271 * execution.
1272 */
1273 if (bld->exec_mask.has_mask) {
1274 mask = LLVMBuildNot(builder, bld->exec_mask.exec_mask, "kilp");
1275 }
1276 else {
1277 LLVMValueRef zero = LLVMConstNull(bld->base.int_vec_type);
1278 mask = zero;
1279 }
1280
1281 lp_build_mask_update(bld->mask, mask);
1282
1283 if (!near_end_of_shader(bld, pc))
1284 lp_build_mask_check(bld->mask);
1285 }
1286
1287
1288 /**
1289 * Emit code which will dump the value of all the temporary registers
1290 * to stdout.
1291 */
1292 static void
1293 emit_dump_temps(struct lp_build_tgsi_soa_context *bld)
1294 {
1295 struct gallivm_state *gallivm = bld->base.gallivm;
1296 LLVMBuilderRef builder = gallivm->builder;
1297 LLVMValueRef temp_ptr;
1298 LLVMValueRef i0 = lp_build_const_int32(gallivm, 0);
1299 LLVMValueRef i1 = lp_build_const_int32(gallivm, 1);
1300 LLVMValueRef i2 = lp_build_const_int32(gallivm, 2);
1301 LLVMValueRef i3 = lp_build_const_int32(gallivm, 3);
1302 int index;
1303 int n = bld->info->file_max[TGSI_FILE_TEMPORARY];
1304
1305 for (index = 0; index < n; index++) {
1306 LLVMValueRef idx = lp_build_const_int32(gallivm, index);
1307 LLVMValueRef v[4][4], res;
1308 int chan;
1309
1310 lp_build_printf(gallivm, "TEMP[%d]:\n", idx);
1311
1312 for (chan = 0; chan < 4; chan++) {
1313 temp_ptr = get_temp_ptr(bld, index, chan);
1314 res = LLVMBuildLoad(builder, temp_ptr, "");
1315 v[chan][0] = LLVMBuildExtractElement(builder, res, i0, "");
1316 v[chan][1] = LLVMBuildExtractElement(builder, res, i1, "");
1317 v[chan][2] = LLVMBuildExtractElement(builder, res, i2, "");
1318 v[chan][3] = LLVMBuildExtractElement(builder, res, i3, "");
1319 }
1320
1321 lp_build_printf(gallivm, " X: %f %f %f %f\n",
1322 v[0][0], v[0][1], v[0][2], v[0][3]);
1323 lp_build_printf(gallivm, " Y: %f %f %f %f\n",
1324 v[1][0], v[1][1], v[1][2], v[1][3]);
1325 lp_build_printf(gallivm, " Z: %f %f %f %f\n",
1326 v[2][0], v[2][1], v[2][2], v[2][3]);
1327 lp_build_printf(gallivm, " W: %f %f %f %f\n",
1328 v[3][0], v[3][1], v[3][2], v[3][3]);
1329 }
1330 }
1331
1332
1333
1334 static void
1335 emit_declaration(
1336 struct lp_build_tgsi_soa_context *bld,
1337 const struct tgsi_full_declaration *decl)
1338 {
1339 struct gallivm_state *gallivm = bld->base.gallivm;
1340 LLVMTypeRef vec_type = bld->base.vec_type;
1341 const unsigned first = decl->Range.First;
1342 const unsigned last = decl->Range.Last;
1343 unsigned idx, i;
1344
1345 for (idx = first; idx <= last; ++idx) {
1346 assert(last <= bld->info->file_max[decl->Declaration.File]);
1347 switch (decl->Declaration.File) {
1348 case TGSI_FILE_TEMPORARY:
1349 assert(idx < LP_MAX_TGSI_TEMPS);
1350 if (!(bld->indirect_files & (1 << TGSI_FILE_TEMPORARY))) {
1351 for (i = 0; i < NUM_CHANNELS; i++)
1352 bld->temps[idx][i] = lp_build_alloca(gallivm, vec_type, "temp");
1353 }
1354 break;
1355
1356 case TGSI_FILE_OUTPUT:
1357 if (!(bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
1358 for (i = 0; i < NUM_CHANNELS; i++)
1359 bld->outputs[idx][i] = lp_build_alloca(gallivm,
1360 vec_type, "output");
1361 }
1362 break;
1363
1364 case TGSI_FILE_ADDRESS:
1365 assert(idx < LP_MAX_TGSI_ADDRS);
1366 for (i = 0; i < NUM_CHANNELS; i++)
1367 bld->addr[idx][i] = lp_build_alloca(gallivm, vec_type, "addr");
1368 break;
1369
1370 case TGSI_FILE_PREDICATE:
1371 assert(idx < LP_MAX_TGSI_PREDS);
1372 for (i = 0; i < NUM_CHANNELS; i++)
1373 bld->preds[idx][i] = lp_build_alloca(gallivm, vec_type,
1374 "predicate");
1375 break;
1376
1377 default:
1378 /* don't need to declare other vars */
1379 break;
1380 }
1381 }
1382 }
1383
1384
1385 /**
1386 * Emit LLVM for one TGSI instruction.
1387 * \param return TRUE for success, FALSE otherwise
1388 */
1389 static boolean
1390 emit_instruction(
1391 struct lp_build_tgsi_soa_context *bld,
1392 const struct tgsi_full_instruction *inst,
1393 const struct tgsi_opcode_info *info,
1394 int *pc)
1395 {
1396 unsigned chan_index;
1397 LLVMValueRef src0, src1, src2;
1398 LLVMValueRef tmp0, tmp1, tmp2;
1399 LLVMValueRef tmp3 = NULL;
1400 LLVMValueRef tmp4 = NULL;
1401 LLVMValueRef tmp5 = NULL;
1402 LLVMValueRef tmp6 = NULL;
1403 LLVMValueRef tmp7 = NULL;
1404 LLVMValueRef res;
1405 LLVMValueRef dst0[NUM_CHANNELS];
1406
1407 /*
1408 * Stores and write masks are handled in a general fashion after the long
1409 * instruction opcode switch statement.
1410 *
1411 * Although not stricitly necessary, we avoid generating instructions for
1412 * channels which won't be stored, in cases where's that easy. For some
1413 * complex instructions, like texture sampling, it is more convenient to
1414 * assume a full writemask and then let LLVM optimization passes eliminate
1415 * redundant code.
1416 */
1417
1418 (*pc)++;
1419
1420 assert(info->num_dst <= 1);
1421 if (info->num_dst) {
1422 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1423 dst0[chan_index] = bld->base.undef;
1424 }
1425 }
1426
1427 switch (inst->Instruction.Opcode) {
1428 case TGSI_OPCODE_ARL:
1429 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1430 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1431 tmp0 = lp_build_floor(&bld->base, tmp0);
1432 dst0[chan_index] = tmp0;
1433 }
1434 break;
1435
1436 case TGSI_OPCODE_MOV:
1437 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1438 dst0[chan_index] = emit_fetch( bld, inst, 0, chan_index );
1439 }
1440 break;
1441
1442 case TGSI_OPCODE_LIT:
1443 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ) {
1444 dst0[CHAN_X] = bld->base.one;
1445 }
1446 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1447 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1448 dst0[CHAN_Y] = lp_build_max( &bld->base, src0, bld->base.zero);
1449 }
1450 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1451 /* XMM[1] = SrcReg[0].yyyy */
1452 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1453 /* XMM[1] = max(XMM[1], 0) */
1454 tmp1 = lp_build_max( &bld->base, tmp1, bld->base.zero);
1455 /* XMM[2] = SrcReg[0].wwww */
1456 tmp2 = emit_fetch( bld, inst, 0, CHAN_W );
1457 tmp1 = lp_build_pow( &bld->base, tmp1, tmp2);
1458 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1459 tmp2 = lp_build_cmp(&bld->base, PIPE_FUNC_GREATER, tmp0, bld->base.zero);
1460 dst0[CHAN_Z] = lp_build_select(&bld->base, tmp2, tmp1, bld->base.zero);
1461 }
1462 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) ) {
1463 dst0[CHAN_W] = bld->base.one;
1464 }
1465 break;
1466
1467 case TGSI_OPCODE_RCP:
1468 /* TGSI_OPCODE_RECIP */
1469 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1470 res = lp_build_rcp(&bld->base, src0);
1471 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1472 dst0[chan_index] = res;
1473 }
1474 break;
1475
1476 case TGSI_OPCODE_RSQ:
1477 /* TGSI_OPCODE_RECIPSQRT */
1478 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1479 src0 = lp_build_abs(&bld->base, src0);
1480 res = lp_build_rsqrt(&bld->base, src0);
1481 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1482 dst0[chan_index] = res;
1483 }
1484 break;
1485
1486 case TGSI_OPCODE_EXP:
1487 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1488 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1489 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1490 LLVMValueRef *p_exp2_int_part = NULL;
1491 LLVMValueRef *p_frac_part = NULL;
1492 LLVMValueRef *p_exp2 = NULL;
1493
1494 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1495
1496 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1497 p_exp2_int_part = &tmp0;
1498 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1499 p_frac_part = &tmp1;
1500 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1501 p_exp2 = &tmp2;
1502
1503 lp_build_exp2_approx(&bld->base, src0, p_exp2_int_part, p_frac_part, p_exp2);
1504
1505 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1506 dst0[CHAN_X] = tmp0;
1507 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1508 dst0[CHAN_Y] = tmp1;
1509 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1510 dst0[CHAN_Z] = tmp2;
1511 }
1512 /* dst.w = 1.0 */
1513 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1514 dst0[CHAN_W] = bld->base.one;
1515 }
1516 break;
1517
1518 case TGSI_OPCODE_LOG:
1519 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1520 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1521 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1522 LLVMValueRef *p_floor_log2 = NULL;
1523 LLVMValueRef *p_exp = NULL;
1524 LLVMValueRef *p_log2 = NULL;
1525
1526 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1527 src0 = lp_build_abs( &bld->base, src0 );
1528
1529 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1530 p_floor_log2 = &tmp0;
1531 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1532 p_exp = &tmp1;
1533 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1534 p_log2 = &tmp2;
1535
1536 lp_build_log2_approx(&bld->base, src0, p_exp, p_floor_log2, p_log2);
1537
1538 /* dst.x = floor(lg2(abs(src.x))) */
1539 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1540 dst0[CHAN_X] = tmp0;
1541 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1542 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y )) {
1543 dst0[CHAN_Y] = lp_build_div( &bld->base, src0, tmp1);
1544 }
1545 /* dst.z = lg2(abs(src.x)) */
1546 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1547 dst0[CHAN_Z] = tmp2;
1548 }
1549 /* dst.w = 1.0 */
1550 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1551 dst0[CHAN_W] = bld->base.one;
1552 }
1553 break;
1554
1555 case TGSI_OPCODE_MUL:
1556 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1557 src0 = emit_fetch( bld, inst, 0, chan_index );
1558 src1 = emit_fetch( bld, inst, 1, chan_index );
1559 dst0[chan_index] = lp_build_mul(&bld->base, src0, src1);
1560 }
1561 break;
1562
1563 case TGSI_OPCODE_ADD:
1564 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1565 src0 = emit_fetch( bld, inst, 0, chan_index );
1566 src1 = emit_fetch( bld, inst, 1, chan_index );
1567 dst0[chan_index] = lp_build_add(&bld->base, src0, src1);
1568 }
1569 break;
1570
1571 case TGSI_OPCODE_DP3:
1572 /* TGSI_OPCODE_DOT3 */
1573 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1574 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1575 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1576 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1577 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1578 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1579 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1580 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1581 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1582 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1583 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1584 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1585 dst0[chan_index] = tmp0;
1586 }
1587 break;
1588
1589 case TGSI_OPCODE_DP4:
1590 /* TGSI_OPCODE_DOT4 */
1591 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1592 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1593 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1594 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1595 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1596 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1597 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1598 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1599 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1600 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1601 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1602 tmp1 = emit_fetch( bld, inst, 0, CHAN_W );
1603 tmp2 = emit_fetch( bld, inst, 1, CHAN_W );
1604 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1605 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1606 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1607 dst0[chan_index] = tmp0;
1608 }
1609 break;
1610
1611 case TGSI_OPCODE_DST:
1612 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1613 dst0[CHAN_X] = bld->base.one;
1614 }
1615 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1616 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1617 tmp1 = emit_fetch( bld, inst, 1, CHAN_Y );
1618 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp0, tmp1);
1619 }
1620 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1621 dst0[CHAN_Z] = emit_fetch( bld, inst, 0, CHAN_Z );
1622 }
1623 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1624 dst0[CHAN_W] = emit_fetch( bld, inst, 1, CHAN_W );
1625 }
1626 break;
1627
1628 case TGSI_OPCODE_MIN:
1629 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1630 src0 = emit_fetch( bld, inst, 0, chan_index );
1631 src1 = emit_fetch( bld, inst, 1, chan_index );
1632 dst0[chan_index] = lp_build_min( &bld->base, src0, src1 );
1633 }
1634 break;
1635
1636 case TGSI_OPCODE_MAX:
1637 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1638 src0 = emit_fetch( bld, inst, 0, chan_index );
1639 src1 = emit_fetch( bld, inst, 1, chan_index );
1640 dst0[chan_index] = lp_build_max( &bld->base, src0, src1 );
1641 }
1642 break;
1643
1644 case TGSI_OPCODE_SLT:
1645 /* TGSI_OPCODE_SETLT */
1646 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1647 src0 = emit_fetch( bld, inst, 0, chan_index );
1648 src1 = emit_fetch( bld, inst, 1, chan_index );
1649 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, src1 );
1650 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1651 }
1652 break;
1653
1654 case TGSI_OPCODE_SGE:
1655 /* TGSI_OPCODE_SETGE */
1656 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1657 src0 = emit_fetch( bld, inst, 0, chan_index );
1658 src1 = emit_fetch( bld, inst, 1, chan_index );
1659 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GEQUAL, src0, src1 );
1660 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1661 }
1662 break;
1663
1664 case TGSI_OPCODE_MAD:
1665 /* TGSI_OPCODE_MADD */
1666 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1667 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1668 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1669 tmp2 = emit_fetch( bld, inst, 2, chan_index );
1670 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1671 tmp0 = lp_build_add( &bld->base, tmp0, tmp2);
1672 dst0[chan_index] = tmp0;
1673 }
1674 break;
1675
1676 case TGSI_OPCODE_SUB:
1677 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1678 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1679 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1680 dst0[chan_index] = lp_build_sub( &bld->base, tmp0, tmp1);
1681 }
1682 break;
1683
1684 case TGSI_OPCODE_LRP:
1685 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1686 src0 = emit_fetch( bld, inst, 0, chan_index );
1687 src1 = emit_fetch( bld, inst, 1, chan_index );
1688 src2 = emit_fetch( bld, inst, 2, chan_index );
1689 tmp0 = lp_build_sub( &bld->base, src1, src2 );
1690 tmp0 = lp_build_mul( &bld->base, src0, tmp0 );
1691 dst0[chan_index] = lp_build_add( &bld->base, tmp0, src2 );
1692 }
1693 break;
1694
1695 case TGSI_OPCODE_CND:
1696 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1697 src0 = emit_fetch( bld, inst, 0, chan_index );
1698 src1 = emit_fetch( bld, inst, 1, chan_index );
1699 src2 = emit_fetch( bld, inst, 2, chan_index );
1700 tmp1 = lp_build_const_vec(bld->base.gallivm, bld->base.type, 0.5);
1701 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src2, tmp1);
1702 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src0, src1 );
1703 }
1704 break;
1705
1706 case TGSI_OPCODE_DP2A:
1707 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1708 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1709 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1710 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1711 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1712 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1713 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1714 tmp1 = emit_fetch( bld, inst, 2, CHAN_X ); /* xmm1 = src[2].x */
1715 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1716 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1717 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1718 }
1719 break;
1720
1721 case TGSI_OPCODE_FRC:
1722 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1723 src0 = emit_fetch( bld, inst, 0, chan_index );
1724 tmp0 = lp_build_floor(&bld->base, src0);
1725 tmp0 = lp_build_sub(&bld->base, src0, tmp0);
1726 dst0[chan_index] = tmp0;
1727 }
1728 break;
1729
1730 case TGSI_OPCODE_CLAMP:
1731 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1732 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1733 src1 = emit_fetch( bld, inst, 1, chan_index );
1734 src2 = emit_fetch( bld, inst, 2, chan_index );
1735 tmp0 = lp_build_max(&bld->base, tmp0, src1);
1736 tmp0 = lp_build_min(&bld->base, tmp0, src2);
1737 dst0[chan_index] = tmp0;
1738 }
1739 break;
1740
1741 case TGSI_OPCODE_FLR:
1742 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1743 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1744 dst0[chan_index] = lp_build_floor(&bld->base, tmp0);
1745 }
1746 break;
1747
1748 case TGSI_OPCODE_ROUND:
1749 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1750 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1751 dst0[chan_index] = lp_build_round(&bld->base, tmp0);
1752 }
1753 break;
1754
1755 case TGSI_OPCODE_EX2: {
1756 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1757 tmp0 = lp_build_exp2( &bld->base, tmp0);
1758 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1759 dst0[chan_index] = tmp0;
1760 }
1761 break;
1762 }
1763
1764 case TGSI_OPCODE_LG2:
1765 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1766 tmp0 = lp_build_log2( &bld->base, tmp0);
1767 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1768 dst0[chan_index] = tmp0;
1769 }
1770 break;
1771
1772 case TGSI_OPCODE_POW:
1773 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1774 src1 = emit_fetch( bld, inst, 1, CHAN_X );
1775 res = lp_build_pow( &bld->base, src0, src1 );
1776 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1777 dst0[chan_index] = res;
1778 }
1779 break;
1780
1781 case TGSI_OPCODE_XPD:
1782 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1783 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1784 tmp1 = emit_fetch( bld, inst, 1, CHAN_Z );
1785 tmp3 = emit_fetch( bld, inst, 0, CHAN_Z );
1786 }
1787 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1788 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1789 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1790 tmp4 = emit_fetch( bld, inst, 1, CHAN_Y );
1791 }
1792 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1793 tmp2 = tmp0;
1794 tmp2 = lp_build_mul( &bld->base, tmp2, tmp1);
1795 tmp5 = tmp3;
1796 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1797 tmp2 = lp_build_sub( &bld->base, tmp2, tmp5);
1798 dst0[CHAN_X] = tmp2;
1799 }
1800 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1801 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1802 tmp2 = emit_fetch( bld, inst, 1, CHAN_X );
1803 tmp5 = emit_fetch( bld, inst, 0, CHAN_X );
1804 }
1805 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1806 tmp3 = lp_build_mul( &bld->base, tmp3, tmp2);
1807 tmp1 = lp_build_mul( &bld->base, tmp1, tmp5);
1808 tmp3 = lp_build_sub( &bld->base, tmp3, tmp1);
1809 dst0[CHAN_Y] = tmp3;
1810 }
1811 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1812 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1813 tmp0 = lp_build_mul( &bld->base, tmp0, tmp2);
1814 tmp5 = lp_build_sub( &bld->base, tmp5, tmp0);
1815 dst0[CHAN_Z] = tmp5;
1816 }
1817 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1818 dst0[CHAN_W] = bld->base.one;
1819 }
1820 break;
1821
1822 case TGSI_OPCODE_ABS:
1823 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1824 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1825 dst0[chan_index] = lp_build_abs( &bld->base, tmp0 );
1826 }
1827 break;
1828
1829 case TGSI_OPCODE_RCC:
1830 /* deprecated? */
1831 assert(0);
1832 return FALSE;
1833
1834 case TGSI_OPCODE_DPH:
1835 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1836 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1837 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1838 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1839 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1840 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1841 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1842 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1843 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1844 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1845 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1846 tmp1 = emit_fetch( bld, inst, 1, CHAN_W );
1847 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1848 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1849 dst0[chan_index] = tmp0;
1850 }
1851 break;
1852
1853 case TGSI_OPCODE_COS:
1854 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1855 tmp0 = lp_build_cos( &bld->base, tmp0 );
1856 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1857 dst0[chan_index] = tmp0;
1858 }
1859 break;
1860
1861 case TGSI_OPCODE_DDX:
1862 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1863 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, &dst0[chan_index], NULL);
1864 }
1865 break;
1866
1867 case TGSI_OPCODE_DDY:
1868 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1869 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, NULL, &dst0[chan_index]);
1870 }
1871 break;
1872
1873 case TGSI_OPCODE_KILP:
1874 /* predicated kill */
1875 emit_kilp( bld, inst, (*pc)-1 );
1876 break;
1877
1878 case TGSI_OPCODE_KIL:
1879 /* conditional kill */
1880 emit_kil( bld, inst, (*pc)-1 );
1881 break;
1882
1883 case TGSI_OPCODE_PK2H:
1884 return FALSE;
1885 break;
1886
1887 case TGSI_OPCODE_PK2US:
1888 return FALSE;
1889 break;
1890
1891 case TGSI_OPCODE_PK4B:
1892 return FALSE;
1893 break;
1894
1895 case TGSI_OPCODE_PK4UB:
1896 return FALSE;
1897 break;
1898
1899 case TGSI_OPCODE_RFL:
1900 return FALSE;
1901 break;
1902
1903 case TGSI_OPCODE_SEQ:
1904 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1905 src0 = emit_fetch( bld, inst, 0, chan_index );
1906 src1 = emit_fetch( bld, inst, 1, chan_index );
1907 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_EQUAL, src0, src1 );
1908 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1909 }
1910 break;
1911
1912 case TGSI_OPCODE_SFL:
1913 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1914 dst0[chan_index] = bld->base.zero;
1915 }
1916 break;
1917
1918 case TGSI_OPCODE_SGT:
1919 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1920 src0 = emit_fetch( bld, inst, 0, chan_index );
1921 src1 = emit_fetch( bld, inst, 1, chan_index );
1922 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src0, src1 );
1923 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1924 }
1925 break;
1926
1927 case TGSI_OPCODE_SIN:
1928 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1929 tmp0 = lp_build_sin( &bld->base, tmp0 );
1930 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1931 dst0[chan_index] = tmp0;
1932 }
1933 break;
1934
1935 case TGSI_OPCODE_SLE:
1936 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1937 src0 = emit_fetch( bld, inst, 0, chan_index );
1938 src1 = emit_fetch( bld, inst, 1, chan_index );
1939 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LEQUAL, src0, src1 );
1940 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1941 }
1942 break;
1943
1944 case TGSI_OPCODE_SNE:
1945 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1946 src0 = emit_fetch( bld, inst, 0, chan_index );
1947 src1 = emit_fetch( bld, inst, 1, chan_index );
1948 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_NOTEQUAL, src0, src1 );
1949 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1950 }
1951 break;
1952
1953 case TGSI_OPCODE_STR:
1954 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1955 dst0[chan_index] = bld->base.one;
1956 }
1957 break;
1958
1959 case TGSI_OPCODE_TEX:
1960 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_NONE, dst0 );
1961 break;
1962
1963 case TGSI_OPCODE_TXD:
1964 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV, dst0 );
1965 break;
1966
1967 case TGSI_OPCODE_UP2H:
1968 /* deprecated */
1969 assert (0);
1970 return FALSE;
1971 break;
1972
1973 case TGSI_OPCODE_UP2US:
1974 /* deprecated */
1975 assert(0);
1976 return FALSE;
1977 break;
1978
1979 case TGSI_OPCODE_UP4B:
1980 /* deprecated */
1981 assert(0);
1982 return FALSE;
1983 break;
1984
1985 case TGSI_OPCODE_UP4UB:
1986 /* deprecated */
1987 assert(0);
1988 return FALSE;
1989 break;
1990
1991 case TGSI_OPCODE_X2D:
1992 /* deprecated? */
1993 assert(0);
1994 return FALSE;
1995 break;
1996
1997 case TGSI_OPCODE_ARA:
1998 /* deprecated */
1999 assert(0);
2000 return FALSE;
2001 break;
2002
2003 case TGSI_OPCODE_ARR:
2004 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2005 tmp0 = emit_fetch( bld, inst, 0, chan_index );
2006 tmp0 = lp_build_round(&bld->base, tmp0);
2007 dst0[chan_index] = tmp0;
2008 }
2009 break;
2010
2011 case TGSI_OPCODE_BRA:
2012 /* deprecated */
2013 assert(0);
2014 return FALSE;
2015 break;
2016
2017 case TGSI_OPCODE_CAL:
2018 lp_exec_mask_call(&bld->exec_mask,
2019 inst->Label.Label,
2020 pc);
2021
2022 break;
2023
2024 case TGSI_OPCODE_RET:
2025 lp_exec_mask_ret(&bld->exec_mask, pc);
2026 break;
2027
2028 case TGSI_OPCODE_END:
2029 if (0) {
2030 /* for debugging */
2031 emit_dump_temps(bld);
2032 }
2033 *pc = -1;
2034 break;
2035
2036 case TGSI_OPCODE_SSG:
2037 /* TGSI_OPCODE_SGN */
2038 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2039 tmp0 = emit_fetch( bld, inst, 0, chan_index );
2040 dst0[chan_index] = lp_build_sgn( &bld->base, tmp0 );
2041 }
2042 break;
2043
2044 case TGSI_OPCODE_CMP:
2045 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2046 src0 = emit_fetch( bld, inst, 0, chan_index );
2047 src1 = emit_fetch( bld, inst, 1, chan_index );
2048 src2 = emit_fetch( bld, inst, 2, chan_index );
2049 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, bld->base.zero );
2050 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src1, src2);
2051 }
2052 break;
2053
2054 case TGSI_OPCODE_SCS:
2055 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
2056 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
2057 dst0[CHAN_X] = lp_build_cos( &bld->base, tmp0 );
2058 }
2059 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
2060 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
2061 dst0[CHAN_Y] = lp_build_sin( &bld->base, tmp0 );
2062 }
2063 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
2064 dst0[CHAN_Z] = bld->base.zero;
2065 }
2066 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
2067 dst0[CHAN_W] = bld->base.one;
2068 }
2069 break;
2070
2071 case TGSI_OPCODE_TXB:
2072 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_LOD_BIAS, dst0 );
2073 break;
2074
2075 case TGSI_OPCODE_NRM:
2076 /* fall-through */
2077 case TGSI_OPCODE_NRM4:
2078 /* 3 or 4-component normalization */
2079 {
2080 uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
2081
2082 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) ||
2083 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y) ||
2084 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z) ||
2085 (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 4)) {
2086
2087 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
2088
2089 /* xmm4 = src.x */
2090 /* xmm0 = src.x * src.x */
2091 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
2092 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
2093 tmp4 = tmp0;
2094 }
2095 tmp0 = lp_build_mul( &bld->base, tmp0, tmp0);
2096
2097 /* xmm5 = src.y */
2098 /* xmm0 = xmm0 + src.y * src.y */
2099 tmp1 = emit_fetch(bld, inst, 0, CHAN_Y);
2100 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
2101 tmp5 = tmp1;
2102 }
2103 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
2104 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
2105
2106 /* xmm6 = src.z */
2107 /* xmm0 = xmm0 + src.z * src.z */
2108 tmp1 = emit_fetch(bld, inst, 0, CHAN_Z);
2109 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
2110 tmp6 = tmp1;
2111 }
2112 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
2113 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
2114
2115 if (dims == 4) {
2116 /* xmm7 = src.w */
2117 /* xmm0 = xmm0 + src.w * src.w */
2118 tmp1 = emit_fetch(bld, inst, 0, CHAN_W);
2119 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W)) {
2120 tmp7 = tmp1;
2121 }
2122 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
2123 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
2124 }
2125
2126 /* xmm1 = 1 / sqrt(xmm0) */
2127 tmp1 = lp_build_rsqrt( &bld->base, tmp0);
2128
2129 /* dst.x = xmm1 * src.x */
2130 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
2131 dst0[CHAN_X] = lp_build_mul( &bld->base, tmp4, tmp1);
2132 }
2133
2134 /* dst.y = xmm1 * src.y */
2135 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
2136 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp5, tmp1);
2137 }
2138
2139 /* dst.z = xmm1 * src.z */
2140 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
2141 dst0[CHAN_Z] = lp_build_mul( &bld->base, tmp6, tmp1);
2142 }
2143
2144 /* dst.w = xmm1 * src.w */
2145 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) && dims == 4) {
2146 dst0[CHAN_W] = lp_build_mul( &bld->base, tmp7, tmp1);
2147 }
2148 }
2149
2150 /* dst.w = 1.0 */
2151 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 3) {
2152 dst0[CHAN_W] = bld->base.one;
2153 }
2154 }
2155 break;
2156
2157 case TGSI_OPCODE_DIV:
2158 /* deprecated */
2159 assert( 0 );
2160 return FALSE;
2161 break;
2162
2163 case TGSI_OPCODE_DP2:
2164 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
2165 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
2166 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
2167 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
2168 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
2169 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
2170 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
2171 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2172 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
2173 }
2174 break;
2175
2176 case TGSI_OPCODE_TXL:
2177 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD, dst0 );
2178 break;
2179
2180 case TGSI_OPCODE_TXP:
2181 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_PROJECTED, dst0 );
2182 break;
2183
2184 case TGSI_OPCODE_BRK:
2185 lp_exec_break(&bld->exec_mask);
2186 break;
2187
2188 case TGSI_OPCODE_IF:
2189 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
2190 tmp0 = lp_build_cmp(&bld->base, PIPE_FUNC_NOTEQUAL,
2191 tmp0, bld->base.zero);
2192 lp_exec_mask_cond_push(&bld->exec_mask, tmp0);
2193 break;
2194
2195 case TGSI_OPCODE_BGNLOOP:
2196 lp_exec_bgnloop(&bld->exec_mask);
2197 break;
2198
2199 case TGSI_OPCODE_BGNSUB:
2200 lp_exec_mask_bgnsub(&bld->exec_mask);
2201 break;
2202
2203 case TGSI_OPCODE_ELSE:
2204 lp_exec_mask_cond_invert(&bld->exec_mask);
2205 break;
2206
2207 case TGSI_OPCODE_ENDIF:
2208 lp_exec_mask_cond_pop(&bld->exec_mask);
2209 break;
2210
2211 case TGSI_OPCODE_ENDLOOP:
2212 lp_exec_endloop(bld->base.gallivm, &bld->exec_mask);
2213 break;
2214
2215 case TGSI_OPCODE_ENDSUB:
2216 lp_exec_mask_endsub(&bld->exec_mask, pc);
2217 break;
2218
2219 case TGSI_OPCODE_PUSHA:
2220 /* deprecated? */
2221 assert(0);
2222 return FALSE;
2223 break;
2224
2225 case TGSI_OPCODE_POPA:
2226 /* deprecated? */
2227 assert(0);
2228 return FALSE;
2229 break;
2230
2231 case TGSI_OPCODE_CEIL:
2232 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2233 tmp0 = emit_fetch( bld, inst, 0, chan_index );
2234 dst0[chan_index] = lp_build_ceil(&bld->base, tmp0);
2235 }
2236 break;
2237
2238 case TGSI_OPCODE_I2F:
2239 /* deprecated? */
2240 assert(0);
2241 return FALSE;
2242 break;
2243
2244 case TGSI_OPCODE_NOT:
2245 /* deprecated? */
2246 assert(0);
2247 return FALSE;
2248 break;
2249
2250 case TGSI_OPCODE_TRUNC:
2251 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2252 tmp0 = emit_fetch( bld, inst, 0, chan_index );
2253 dst0[chan_index] = lp_build_trunc(&bld->base, tmp0);
2254 }
2255 break;
2256
2257 case TGSI_OPCODE_SHL:
2258 /* deprecated? */
2259 assert(0);
2260 return FALSE;
2261 break;
2262
2263 case TGSI_OPCODE_ISHR:
2264 /* deprecated? */
2265 assert(0);
2266 return FALSE;
2267 break;
2268
2269 case TGSI_OPCODE_AND:
2270 /* deprecated? */
2271 assert(0);
2272 return FALSE;
2273 break;
2274
2275 case TGSI_OPCODE_OR:
2276 /* deprecated? */
2277 assert(0);
2278 return FALSE;
2279 break;
2280
2281 case TGSI_OPCODE_MOD:
2282 /* deprecated? */
2283 assert(0);
2284 return FALSE;
2285 break;
2286
2287 case TGSI_OPCODE_XOR:
2288 /* deprecated? */
2289 assert(0);
2290 return FALSE;
2291 break;
2292
2293 case TGSI_OPCODE_SAD:
2294 /* deprecated? */
2295 assert(0);
2296 return FALSE;
2297 break;
2298
2299 case TGSI_OPCODE_TXF:
2300 /* deprecated? */
2301 assert(0);
2302 return FALSE;
2303 break;
2304
2305 case TGSI_OPCODE_TXQ:
2306 /* deprecated? */
2307 assert(0);
2308 return FALSE;
2309 break;
2310
2311 case TGSI_OPCODE_CONT:
2312 lp_exec_continue(&bld->exec_mask);
2313 break;
2314
2315 case TGSI_OPCODE_EMIT:
2316 return FALSE;
2317 break;
2318
2319 case TGSI_OPCODE_ENDPRIM:
2320 return FALSE;
2321 break;
2322
2323 case TGSI_OPCODE_NOP:
2324 break;
2325
2326 default:
2327 return FALSE;
2328 }
2329
2330 if(info->num_dst) {
2331 LLVMValueRef pred[NUM_CHANNELS];
2332
2333 emit_fetch_predicate( bld, inst, pred );
2334
2335 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2336 emit_store( bld, inst, 0, chan_index, pred[chan_index], dst0[chan_index]);
2337 }
2338 }
2339
2340 return TRUE;
2341 }
2342
2343
2344 void
2345 lp_build_tgsi_soa(struct gallivm_state *gallivm,
2346 const struct tgsi_token *tokens,
2347 struct lp_type type,
2348 struct lp_build_mask_context *mask,
2349 LLVMValueRef consts_ptr,
2350 LLVMValueRef system_values_array,
2351 const LLVMValueRef *pos,
2352 const LLVMValueRef (*inputs)[NUM_CHANNELS],
2353 LLVMValueRef (*outputs)[NUM_CHANNELS],
2354 struct lp_build_sampler_soa *sampler,
2355 const struct tgsi_shader_info *info)
2356 {
2357 struct lp_build_tgsi_soa_context bld;
2358 struct tgsi_parse_context parse;
2359 uint num_immediates = 0;
2360 uint num_instructions = 0;
2361 unsigned i;
2362 int pc = 0;
2363
2364 struct lp_type res_type;
2365
2366 assert(type.length <= LP_MAX_VECTOR_LENGTH);
2367 memset(&res_type, 0, sizeof res_type);
2368 res_type.width = type.width;
2369 res_type.length = type.length;
2370 res_type.sign = 1;
2371
2372 /* Setup build context */
2373 memset(&bld, 0, sizeof bld);
2374 lp_build_context_init(&bld.base, gallivm, type);
2375 lp_build_context_init(&bld.uint_bld, gallivm, lp_uint_type(type));
2376 lp_build_context_init(&bld.elem_bld, gallivm, lp_elem_type(type));
2377 bld.mask = mask;
2378 bld.pos = pos;
2379 bld.inputs = inputs;
2380 bld.outputs = outputs;
2381 bld.consts_ptr = consts_ptr;
2382 bld.sampler = sampler;
2383 bld.info = info;
2384 bld.indirect_files = info->indirect_files;
2385 bld.instructions = (struct tgsi_full_instruction *)
2386 MALLOC( LP_MAX_INSTRUCTIONS * sizeof(struct tgsi_full_instruction) );
2387 bld.max_instructions = LP_MAX_INSTRUCTIONS;
2388
2389 if (!bld.instructions) {
2390 return;
2391 }
2392
2393 lp_exec_mask_init(&bld.exec_mask, &bld.base);
2394
2395 if (bld.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
2396 LLVMValueRef array_size =
2397 lp_build_const_int32(gallivm,
2398 info->file_max[TGSI_FILE_TEMPORARY] * 4 + 4);
2399 bld.temps_array = lp_build_array_alloca(gallivm,
2400 bld.base.vec_type, array_size,
2401 "temp_array");
2402 }
2403
2404 if (bld.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2405 LLVMValueRef array_size =
2406 lp_build_const_int32(gallivm,
2407 info->file_max[TGSI_FILE_OUTPUT] * 4 + 4);
2408 bld.outputs_array = lp_build_array_alloca(gallivm,
2409 bld.base.vec_type, array_size,
2410 "output_array");
2411 }
2412
2413 /* If we have indirect addressing in inputs we need to copy them into
2414 * our alloca array to be able to iterate over them */
2415 if (bld.indirect_files & (1 << TGSI_FILE_INPUT)) {
2416 unsigned index, chan;
2417 LLVMTypeRef vec_type = bld.base.vec_type;
2418 LLVMValueRef array_size =
2419 lp_build_const_int32(gallivm, info->file_max[TGSI_FILE_INPUT]*4 + 4);
2420 bld.inputs_array = lp_build_array_alloca(gallivm,
2421 vec_type, array_size,
2422 "input_array");
2423
2424 assert(info->num_inputs <= info->file_max[TGSI_FILE_INPUT] + 1);
2425
2426 for (index = 0; index < info->num_inputs; ++index) {
2427 for (chan = 0; chan < NUM_CHANNELS; ++chan) {
2428 LLVMValueRef lindex =
2429 lp_build_const_int32(gallivm, index * 4 + chan);
2430 LLVMValueRef input_ptr =
2431 LLVMBuildGEP(gallivm->builder, bld.inputs_array,
2432 &lindex, 1, "");
2433 LLVMValueRef value = bld.inputs[index][chan];
2434 if (value)
2435 LLVMBuildStore(gallivm->builder, value, input_ptr);
2436 }
2437 }
2438 }
2439
2440 bld.system_values_array = system_values_array;
2441
2442 tgsi_parse_init( &parse, tokens );
2443
2444 while( !tgsi_parse_end_of_tokens( &parse ) ) {
2445 tgsi_parse_token( &parse );
2446
2447 switch( parse.FullToken.Token.Type ) {
2448 case TGSI_TOKEN_TYPE_DECLARATION:
2449 /* Inputs already interpolated */
2450 emit_declaration( &bld, &parse.FullToken.FullDeclaration );
2451 break;
2452
2453 case TGSI_TOKEN_TYPE_INSTRUCTION:
2454 {
2455 /* save expanded instruction */
2456 if (num_instructions == bld.max_instructions) {
2457 struct tgsi_full_instruction *instructions;
2458 instructions = REALLOC(bld.instructions,
2459 bld.max_instructions
2460 * sizeof(struct tgsi_full_instruction),
2461 (bld.max_instructions + LP_MAX_INSTRUCTIONS)
2462 * sizeof(struct tgsi_full_instruction));
2463 if (!instructions) {
2464 break;
2465 }
2466 bld.instructions = instructions;
2467 bld.max_instructions += LP_MAX_INSTRUCTIONS;
2468 }
2469
2470 memcpy(bld.instructions + num_instructions,
2471 &parse.FullToken.FullInstruction,
2472 sizeof(bld.instructions[0]));
2473
2474 num_instructions++;
2475 }
2476
2477 break;
2478
2479 case TGSI_TOKEN_TYPE_IMMEDIATE:
2480 /* simply copy the immediate values into the next immediates[] slot */
2481 {
2482 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
2483 assert(size <= 4);
2484 assert(num_immediates < LP_MAX_TGSI_IMMEDIATES);
2485 for( i = 0; i < size; ++i )
2486 bld.immediates[num_immediates][i] =
2487 lp_build_const_vec(gallivm, type, parse.FullToken.FullImmediate.u[i].Float);
2488 for( i = size; i < 4; ++i )
2489 bld.immediates[num_immediates][i] = bld.base.undef;
2490 num_immediates++;
2491 }
2492 break;
2493
2494 case TGSI_TOKEN_TYPE_PROPERTY:
2495 break;
2496
2497 default:
2498 assert( 0 );
2499 }
2500 }
2501
2502 while (pc != -1) {
2503 struct tgsi_full_instruction *instr = bld.instructions + pc;
2504 const struct tgsi_opcode_info *opcode_info =
2505 tgsi_get_opcode_info(instr->Instruction.Opcode);
2506 if (!emit_instruction( &bld, instr, opcode_info, &pc ))
2507 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2508 opcode_info->mnemonic);
2509 }
2510
2511 /* If we have indirect addressing in outputs we need to copy our alloca array
2512 * to the outputs slots specified by the called */
2513 if (bld.indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2514 unsigned index, chan;
2515 assert(info->num_outputs <= info->file_max[TGSI_FILE_OUTPUT] + 1);
2516 for (index = 0; index < info->num_outputs; ++index) {
2517 for (chan = 0; chan < NUM_CHANNELS; ++chan) {
2518 bld.outputs[index][chan] = get_output_ptr(&bld, index, chan);
2519 }
2520 }
2521 }
2522
2523 if (0) {
2524 LLVMBasicBlockRef block = LLVMGetInsertBlock(gallivm->builder);
2525 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2526 debug_printf("11111111111111111111111111111 \n");
2527 tgsi_dump(tokens, 0);
2528 lp_debug_dump_value(function);
2529 debug_printf("2222222222222222222222222222 \n");
2530 }
2531 tgsi_parse_free( &parse );
2532
2533 if (0) {
2534 LLVMModuleRef module = LLVMGetGlobalParent(
2535 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm->builder)));
2536 LLVMDumpModule(module);
2537
2538 }
2539
2540 FREE( bld.instructions );
2541 }
2542
2543
2544 /**
2545 * Build up the system values array out of individual values such as
2546 * the instance ID, front-face, primitive ID, etc. The shader info is
2547 * used to determine which system values are needed and where to put
2548 * them in the system values array.
2549 *
2550 * XXX only instance ID is implemented at this time.
2551 *
2552 * The system values register file is similar to the constants buffer.
2553 * Example declaration:
2554 * DCL SV[0], INSTANCEID
2555 * Example instruction:
2556 * MOVE foo, SV[0].xxxx;
2557 *
2558 * \return LLVM float array (interpreted as float [][4])
2559 */
2560 LLVMValueRef
2561 lp_build_system_values_array(struct gallivm_state *gallivm,
2562 const struct tgsi_shader_info *info,
2563 LLVMValueRef instance_id,
2564 LLVMValueRef facing)
2565 {
2566 LLVMValueRef size = lp_build_const_int32(gallivm, 4 * info->num_system_values);
2567 LLVMTypeRef float_t = LLVMFloatTypeInContext(gallivm->context);
2568 LLVMValueRef array = lp_build_array_alloca(gallivm, float_t,
2569 size, "sysvals_array");
2570 unsigned i;
2571
2572 for (i = 0; i < info->num_system_values; i++) {
2573 LLVMValueRef index = lp_build_const_int32(gallivm, i * 4);
2574 LLVMValueRef ptr, value = 0;
2575
2576 switch (info->system_value_semantic_name[i]) {
2577 case TGSI_SEMANTIC_INSTANCEID:
2578 /* convert instance ID from int to float */
2579 value = LLVMBuildSIToFP(gallivm->builder, instance_id, float_t,
2580 "sysval_instanceid");
2581 break;
2582 case TGSI_SEMANTIC_FACE:
2583 /* fall-through */
2584 default:
2585 assert(0 && "unexpected semantic in build_system_values_array()");
2586 }
2587
2588 ptr = LLVMBuildGEP(gallivm->builder, array, &index, 1, "");
2589 LLVMBuildStore(gallivm->builder, value, ptr);
2590 }
2591
2592 return array;
2593 }