Merge branch 'llvm-cliptest-viewport'
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_scan.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_bitarit.h"
53 #include "lp_bld_gather.h"
54 #include "lp_bld_logic.h"
55 #include "lp_bld_swizzle.h"
56 #include "lp_bld_flow.h"
57 #include "lp_bld_quad.h"
58 #include "lp_bld_tgsi.h"
59 #include "lp_bld_limits.h"
60 #include "lp_bld_debug.h"
61
62
63 #define FOR_EACH_CHANNEL( CHAN )\
64 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
65
66 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
67 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
68
69 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
70 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
71
72 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
73 FOR_EACH_CHANNEL( CHAN )\
74 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
75
76 #define CHAN_X 0
77 #define CHAN_Y 1
78 #define CHAN_Z 2
79 #define CHAN_W 3
80 #define NUM_CHANNELS 4
81
82 #define LP_MAX_INSTRUCTIONS 256
83
84
85 struct lp_exec_mask {
86 struct lp_build_context *bld;
87
88 boolean has_mask;
89
90 LLVMTypeRef int_vec_type;
91
92 LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
93 int cond_stack_size;
94 LLVMValueRef cond_mask;
95
96 LLVMBasicBlockRef loop_block;
97 LLVMValueRef cont_mask;
98 LLVMValueRef break_mask;
99 LLVMValueRef break_var;
100 struct {
101 LLVMBasicBlockRef loop_block;
102 LLVMValueRef cont_mask;
103 LLVMValueRef break_mask;
104 LLVMValueRef break_var;
105 } loop_stack[LP_MAX_TGSI_NESTING];
106 int loop_stack_size;
107
108 LLVMValueRef ret_mask;
109 struct {
110 int pc;
111 LLVMValueRef ret_mask;
112 } call_stack[LP_MAX_TGSI_NESTING];
113 int call_stack_size;
114
115 LLVMValueRef exec_mask;
116 };
117
118 struct lp_build_tgsi_soa_context
119 {
120 struct lp_build_context base;
121
122 /* Builder for integer masks and indices */
123 struct lp_build_context uint_bld;
124
125 LLVMValueRef consts_ptr;
126 const LLVMValueRef *pos;
127 const LLVMValueRef (*inputs)[NUM_CHANNELS];
128 LLVMValueRef (*outputs)[NUM_CHANNELS];
129
130 const struct lp_build_sampler_soa *sampler;
131
132 LLVMValueRef immediates[LP_MAX_TGSI_IMMEDIATES][NUM_CHANNELS];
133 LLVMValueRef temps[LP_MAX_TGSI_TEMPS][NUM_CHANNELS];
134 LLVMValueRef addr[LP_MAX_TGSI_ADDRS][NUM_CHANNELS];
135 LLVMValueRef preds[LP_MAX_TGSI_PREDS][NUM_CHANNELS];
136
137 /* We allocate/use this array of temps if (1 << TGSI_FILE_TEMPORARY) is
138 * set in the indirect_files field.
139 * The temps[] array above is unused then.
140 */
141 LLVMValueRef temps_array;
142
143 const struct tgsi_shader_info *info;
144 /** bitmask indicating which register files are accessed indirectly */
145 unsigned indirect_files;
146
147 struct lp_build_mask_context *mask;
148 struct lp_exec_mask exec_mask;
149
150 struct tgsi_full_instruction *instructions;
151 uint max_instructions;
152 };
153
154 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
155 {
156 mask->bld = bld;
157 mask->has_mask = FALSE;
158 mask->cond_stack_size = 0;
159 mask->loop_stack_size = 0;
160 mask->call_stack_size = 0;
161
162 mask->int_vec_type = lp_build_int_vec_type(mask->bld->type);
163 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
164 LLVMConstAllOnes(mask->int_vec_type);
165 }
166
167 static void lp_exec_mask_update(struct lp_exec_mask *mask)
168 {
169 if (mask->loop_stack_size) {
170 /*for loops we need to update the entire mask at runtime */
171 LLVMValueRef tmp;
172 assert(mask->break_mask);
173 tmp = LLVMBuildAnd(mask->bld->builder,
174 mask->cont_mask,
175 mask->break_mask,
176 "maskcb");
177 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
178 mask->cond_mask,
179 tmp,
180 "maskfull");
181 } else
182 mask->exec_mask = mask->cond_mask;
183
184 if (mask->call_stack_size) {
185 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
186 mask->exec_mask,
187 mask->ret_mask,
188 "callmask");
189 }
190
191 mask->has_mask = (mask->cond_stack_size > 0 ||
192 mask->loop_stack_size > 0 ||
193 mask->call_stack_size > 0);
194 }
195
196 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
197 LLVMValueRef val)
198 {
199 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
200 if (mask->cond_stack_size == 0) {
201 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
202 }
203 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
204 assert(LLVMTypeOf(val) == mask->int_vec_type);
205 mask->cond_mask = LLVMBuildAnd(mask->bld->builder,
206 mask->cond_mask,
207 val,
208 "");
209 lp_exec_mask_update(mask);
210 }
211
212 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
213 {
214 LLVMValueRef prev_mask;
215 LLVMValueRef inv_mask;
216
217 assert(mask->cond_stack_size);
218 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
219 if (mask->cond_stack_size == 1) {
220 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
221 }
222
223 inv_mask = LLVMBuildNot(mask->bld->builder, mask->cond_mask, "");
224
225 mask->cond_mask = LLVMBuildAnd(mask->bld->builder,
226 inv_mask,
227 prev_mask, "");
228 lp_exec_mask_update(mask);
229 }
230
231 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
232 {
233 assert(mask->cond_stack_size);
234 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
235 lp_exec_mask_update(mask);
236 }
237
238 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
239 {
240 if (mask->loop_stack_size == 0) {
241 assert(mask->loop_block == NULL);
242 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
243 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
244 assert(mask->break_var == NULL);
245 }
246
247 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
248
249 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
250 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
251 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
252 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
253 ++mask->loop_stack_size;
254
255 mask->break_var = lp_build_alloca(mask->bld->builder, mask->int_vec_type, "");
256 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
257
258 mask->loop_block = lp_build_insert_new_block(mask->bld->builder, "bgnloop");
259 LLVMBuildBr(mask->bld->builder, mask->loop_block);
260 LLVMPositionBuilderAtEnd(mask->bld->builder, mask->loop_block);
261
262 mask->break_mask = LLVMBuildLoad(mask->bld->builder, mask->break_var, "");
263
264 lp_exec_mask_update(mask);
265 }
266
267 static void lp_exec_break(struct lp_exec_mask *mask)
268 {
269 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
270 mask->exec_mask,
271 "break");
272
273 mask->break_mask = LLVMBuildAnd(mask->bld->builder,
274 mask->break_mask,
275 exec_mask, "break_full");
276
277 lp_exec_mask_update(mask);
278 }
279
280 static void lp_exec_continue(struct lp_exec_mask *mask)
281 {
282 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
283 mask->exec_mask,
284 "");
285
286 mask->cont_mask = LLVMBuildAnd(mask->bld->builder,
287 mask->cont_mask,
288 exec_mask, "");
289
290 lp_exec_mask_update(mask);
291 }
292
293
294 static void lp_exec_endloop(struct lp_exec_mask *mask)
295 {
296 LLVMBasicBlockRef endloop;
297 LLVMTypeRef reg_type = LLVMIntType(mask->bld->type.width*
298 mask->bld->type.length);
299 LLVMValueRef i1cond;
300
301 assert(mask->break_mask);
302
303 /*
304 * Restore the cont_mask, but don't pop
305 */
306 assert(mask->loop_stack_size);
307 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
308 lp_exec_mask_update(mask);
309
310 /*
311 * Unlike the continue mask, the break_mask must be preserved across loop
312 * iterations
313 */
314 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
315
316 /* i1cond = (mask == 0) */
317 i1cond = LLVMBuildICmp(
318 mask->bld->builder,
319 LLVMIntNE,
320 LLVMBuildBitCast(mask->bld->builder, mask->exec_mask, reg_type, ""),
321 LLVMConstNull(reg_type), "");
322
323 endloop = lp_build_insert_new_block(mask->bld->builder, "endloop");
324
325 LLVMBuildCondBr(mask->bld->builder,
326 i1cond, mask->loop_block, endloop);
327
328 LLVMPositionBuilderAtEnd(mask->bld->builder, endloop);
329
330 assert(mask->loop_stack_size);
331 --mask->loop_stack_size;
332 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
333 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
334 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
335 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
336
337 lp_exec_mask_update(mask);
338 }
339
340 /* stores val into an address pointed to by dst.
341 * mask->exec_mask is used to figure out which bits of val
342 * should be stored into the address
343 * (0 means don't store this bit, 1 means do store).
344 */
345 static void lp_exec_mask_store(struct lp_exec_mask *mask,
346 LLVMValueRef pred,
347 LLVMValueRef val,
348 LLVMValueRef dst)
349 {
350 /* Mix the predicate and execution mask */
351 if (mask->has_mask) {
352 if (pred) {
353 pred = LLVMBuildAnd(mask->bld->builder, pred, mask->exec_mask, "");
354 } else {
355 pred = mask->exec_mask;
356 }
357 }
358
359 if (pred) {
360 LLVMValueRef real_val, dst_val;
361
362 dst_val = LLVMBuildLoad(mask->bld->builder, dst, "");
363 real_val = lp_build_select(mask->bld,
364 pred,
365 val, dst_val);
366
367 LLVMBuildStore(mask->bld->builder, real_val, dst);
368 } else
369 LLVMBuildStore(mask->bld->builder, val, dst);
370 }
371
372 static void lp_exec_mask_call(struct lp_exec_mask *mask,
373 int func,
374 int *pc)
375 {
376 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
377 mask->call_stack[mask->call_stack_size].pc = *pc;
378 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
379 mask->call_stack_size++;
380 *pc = func;
381 }
382
383 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
384 {
385 LLVMValueRef exec_mask;
386
387 if (mask->call_stack_size == 0) {
388 /* returning from main() */
389 *pc = -1;
390 return;
391 }
392 exec_mask = LLVMBuildNot(mask->bld->builder,
393 mask->exec_mask,
394 "ret");
395
396 mask->ret_mask = LLVMBuildAnd(mask->bld->builder,
397 mask->ret_mask,
398 exec_mask, "ret_full");
399
400 lp_exec_mask_update(mask);
401 }
402
403 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
404 {
405 }
406
407 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
408 {
409 assert(mask->call_stack_size);
410 mask->call_stack_size--;
411 *pc = mask->call_stack[mask->call_stack_size].pc;
412 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
413 lp_exec_mask_update(mask);
414 }
415
416
417 /**
418 * Return pointer to a temporary register channel (src or dest).
419 * Note that indirect addressing cannot be handled here.
420 * \param index which temporary register
421 * \param chan which channel of the temp register.
422 */
423 static LLVMValueRef
424 get_temp_ptr(struct lp_build_tgsi_soa_context *bld,
425 unsigned index,
426 unsigned chan)
427 {
428 assert(chan < 4);
429 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
430 LLVMValueRef lindex = lp_build_const_int32(index * 4 + chan);
431 return LLVMBuildGEP(bld->base.builder, bld->temps_array, &lindex, 1, "");
432 }
433 else {
434 return bld->temps[index][chan];
435 }
436 }
437
438
439 /**
440 * Gather vector.
441 * XXX the lp_build_gather() function should be capable of doing this
442 * with a little work.
443 */
444 static LLVMValueRef
445 build_gather(struct lp_build_tgsi_soa_context *bld,
446 LLVMValueRef base_ptr,
447 LLVMValueRef indexes)
448 {
449 LLVMValueRef res = bld->base.undef;
450 unsigned i;
451
452 /*
453 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
454 */
455 for (i = 0; i < bld->base.type.length; i++) {
456 LLVMValueRef ii = LLVMConstInt(LLVMInt32Type(), i, 0);
457 LLVMValueRef index = LLVMBuildExtractElement(bld->base.builder,
458 indexes, ii, "");
459 LLVMValueRef scalar_ptr = LLVMBuildGEP(bld->base.builder, base_ptr,
460 &index, 1, "");
461 LLVMValueRef scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
462
463 res = LLVMBuildInsertElement(bld->base.builder, res, scalar, ii, "");
464 }
465
466 return res;
467 }
468
469
470 /**
471 * Read the current value of the ADDR register, convert the floats to
472 * ints, multiply by four and return the vector of offsets.
473 * The offsets will be used to index into the constant buffer or
474 * temporary register file.
475 */
476 static LLVMValueRef
477 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
478 unsigned reg_file, unsigned reg_index,
479 const struct tgsi_src_register *indirect_reg)
480 {
481 struct lp_build_context *uint_bld = &bld->uint_bld;
482 /* always use X component of address register */
483 unsigned swizzle = indirect_reg->SwizzleX;
484 LLVMValueRef base;
485 LLVMValueRef rel;
486 LLVMValueRef max_index;
487 LLVMValueRef index;
488
489 assert(bld->indirect_files & (1 << reg_file));
490
491 base = lp_build_const_int_vec(uint_bld->type, reg_index);
492
493 assert(swizzle < 4);
494 rel = LLVMBuildLoad(bld->base.builder,
495 bld->addr[indirect_reg->Index][swizzle],
496 "load addr reg");
497
498 /* for indexing we want integers */
499 rel = LLVMBuildFPToSI(bld->base.builder,
500 rel,
501 uint_bld->vec_type, "");
502
503 index = lp_build_add(uint_bld, base, rel);
504
505 max_index = lp_build_const_int_vec(uint_bld->type,
506 bld->info->file_max[reg_file]);
507
508 assert(!uint_bld->type.sign);
509 index = lp_build_min(uint_bld, index, max_index);
510
511 return index;
512 }
513
514
515 /**
516 * Register fetch.
517 */
518 static LLVMValueRef
519 emit_fetch(
520 struct lp_build_tgsi_soa_context *bld,
521 const struct tgsi_full_instruction *inst,
522 unsigned src_op,
523 const unsigned chan_index )
524 {
525 struct lp_build_context *uint_bld = &bld->uint_bld;
526 const struct tgsi_full_src_register *reg = &inst->Src[src_op];
527 const unsigned swizzle =
528 tgsi_util_get_full_src_register_swizzle(reg, chan_index);
529 LLVMValueRef res;
530 LLVMValueRef indirect_index = NULL;
531
532 if (swizzle > 3) {
533 assert(0 && "invalid swizzle in emit_fetch()");
534 return bld->base.undef;
535 }
536
537 if (reg->Register.Indirect) {
538 indirect_index = get_indirect_index(bld,
539 reg->Register.File,
540 reg->Register.Index,
541 &reg->Indirect);
542 } else {
543 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
544 }
545
546 switch (reg->Register.File) {
547 case TGSI_FILE_CONSTANT:
548 if (reg->Register.Indirect) {
549 LLVMValueRef swizzle_vec =
550 lp_build_const_int_vec(uint_bld->type, swizzle);
551 LLVMValueRef index_vec; /* index into the const buffer */
552
553 /* index_vec = indirect_index * 4 + swizzle */
554 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
555 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
556
557 /* Gather values from the constant buffer */
558 res = build_gather(bld, bld->consts_ptr, index_vec);
559 }
560 else {
561 LLVMValueRef index; /* index into the const buffer */
562 LLVMValueRef scalar, scalar_ptr;
563
564 index = lp_build_const_int32(reg->Register.Index*4 + swizzle);
565
566 scalar_ptr = LLVMBuildGEP(bld->base.builder, bld->consts_ptr,
567 &index, 1, "");
568 scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
569
570 res = lp_build_broadcast_scalar(&bld->base, scalar);
571 }
572 break;
573
574 case TGSI_FILE_IMMEDIATE:
575 res = bld->immediates[reg->Register.Index][swizzle];
576 assert(res);
577 break;
578
579 case TGSI_FILE_INPUT:
580 res = bld->inputs[reg->Register.Index][swizzle];
581 assert(res);
582 break;
583
584 case TGSI_FILE_TEMPORARY:
585 if (reg->Register.Indirect) {
586 LLVMValueRef swizzle_vec =
587 lp_build_const_int_vec(uint_bld->type, swizzle);
588 LLVMValueRef length_vec =
589 lp_build_const_int_vec(uint_bld->type, bld->base.type.length);
590 LLVMValueRef index_vec; /* index into the const buffer */
591 LLVMValueRef temps_array;
592 LLVMTypeRef float4_ptr_type;
593
594 /* index_vec = (indirect_index * 4 + swizzle) * length */
595 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
596 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
597 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
598
599 /* cast temps_array pointer to float* */
600 float4_ptr_type = LLVMPointerType(LLVMFloatType(), 0);
601 temps_array = LLVMBuildBitCast(uint_bld->builder, bld->temps_array,
602 float4_ptr_type, "");
603
604 /* Gather values from the temporary register array */
605 res = build_gather(bld, temps_array, index_vec);
606 }
607 else {
608 LLVMValueRef temp_ptr;
609 temp_ptr = get_temp_ptr(bld, reg->Register.Index, swizzle);
610 res = LLVMBuildLoad(bld->base.builder, temp_ptr, "");
611 if (!res)
612 return bld->base.undef;
613 }
614 break;
615
616 default:
617 assert(0 && "invalid src register in emit_fetch()");
618 return bld->base.undef;
619 }
620
621 switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
622 case TGSI_UTIL_SIGN_CLEAR:
623 res = lp_build_abs( &bld->base, res );
624 break;
625
626 case TGSI_UTIL_SIGN_SET:
627 res = lp_build_abs( &bld->base, res );
628 /* fall through */
629 case TGSI_UTIL_SIGN_TOGGLE:
630 res = lp_build_negate( &bld->base, res );
631 break;
632
633 case TGSI_UTIL_SIGN_KEEP:
634 break;
635 }
636
637 return res;
638 }
639
640
641 /**
642 * Register fetch with derivatives.
643 */
644 static void
645 emit_fetch_deriv(
646 struct lp_build_tgsi_soa_context *bld,
647 const struct tgsi_full_instruction *inst,
648 unsigned index,
649 const unsigned chan_index,
650 LLVMValueRef *res,
651 LLVMValueRef *ddx,
652 LLVMValueRef *ddy)
653 {
654 LLVMValueRef src;
655
656 src = emit_fetch(bld, inst, index, chan_index);
657
658 if(res)
659 *res = src;
660
661 /* TODO: use interpolation coeffs for inputs */
662
663 if(ddx)
664 *ddx = lp_build_ddx(&bld->base, src);
665
666 if(ddy)
667 *ddy = lp_build_ddy(&bld->base, src);
668 }
669
670
671 /**
672 * Predicate.
673 */
674 static void
675 emit_fetch_predicate(
676 struct lp_build_tgsi_soa_context *bld,
677 const struct tgsi_full_instruction *inst,
678 LLVMValueRef *pred)
679 {
680 unsigned index;
681 unsigned char swizzles[4];
682 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
683 LLVMValueRef value;
684 unsigned chan;
685
686 if (!inst->Instruction.Predicate) {
687 FOR_EACH_CHANNEL( chan ) {
688 pred[chan] = NULL;
689 }
690 return;
691 }
692
693 swizzles[0] = inst->Predicate.SwizzleX;
694 swizzles[1] = inst->Predicate.SwizzleY;
695 swizzles[2] = inst->Predicate.SwizzleZ;
696 swizzles[3] = inst->Predicate.SwizzleW;
697
698 index = inst->Predicate.Index;
699 assert(index < LP_MAX_TGSI_PREDS);
700
701 FOR_EACH_CHANNEL( chan ) {
702 unsigned swizzle = swizzles[chan];
703
704 /*
705 * Only fetch the predicate register channels that are actually listed
706 * in the swizzles
707 */
708 if (!unswizzled[swizzle]) {
709 value = LLVMBuildLoad(bld->base.builder,
710 bld->preds[index][swizzle], "");
711
712 /*
713 * Convert the value to an integer mask.
714 *
715 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
716 * is needlessly causing two comparisons due to storing the intermediate
717 * result as float vector instead of an integer mask vector.
718 */
719 value = lp_build_compare(bld->base.builder,
720 bld->base.type,
721 PIPE_FUNC_NOTEQUAL,
722 value,
723 bld->base.zero);
724 if (inst->Predicate.Negate) {
725 value = LLVMBuildNot(bld->base.builder, value, "");
726 }
727
728 unswizzled[swizzle] = value;
729 } else {
730 value = unswizzled[swizzle];
731 }
732
733 pred[chan] = value;
734 }
735 }
736
737
738 /**
739 * Register store.
740 */
741 static void
742 emit_store(
743 struct lp_build_tgsi_soa_context *bld,
744 const struct tgsi_full_instruction *inst,
745 unsigned index,
746 unsigned chan_index,
747 LLVMValueRef pred,
748 LLVMValueRef value)
749 {
750 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
751 LLVMValueRef indirect_index = NULL;
752
753 switch( inst->Instruction.Saturate ) {
754 case TGSI_SAT_NONE:
755 break;
756
757 case TGSI_SAT_ZERO_ONE:
758 value = lp_build_max(&bld->base, value, bld->base.zero);
759 value = lp_build_min(&bld->base, value, bld->base.one);
760 break;
761
762 case TGSI_SAT_MINUS_PLUS_ONE:
763 value = lp_build_max(&bld->base, value, lp_build_const_vec(bld->base.type, -1.0));
764 value = lp_build_min(&bld->base, value, bld->base.one);
765 break;
766
767 default:
768 assert(0);
769 }
770
771 if (reg->Register.Indirect) {
772 indirect_index = get_indirect_index(bld,
773 reg->Register.File,
774 reg->Register.Index,
775 &reg->Indirect);
776 } else {
777 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
778 }
779
780 switch( reg->Register.File ) {
781 case TGSI_FILE_OUTPUT:
782 lp_exec_mask_store(&bld->exec_mask, pred, value,
783 bld->outputs[reg->Register.Index][chan_index]);
784 break;
785
786 case TGSI_FILE_TEMPORARY:
787 if (reg->Register.Indirect) {
788 /* XXX not done yet */
789 debug_printf("WARNING: LLVM scatter store of temp regs"
790 " not implemented\n");
791 }
792 else {
793 LLVMValueRef temp_ptr = get_temp_ptr(bld, reg->Register.Index,
794 chan_index);
795 lp_exec_mask_store(&bld->exec_mask, pred, value, temp_ptr);
796 }
797 break;
798
799 case TGSI_FILE_ADDRESS:
800 lp_exec_mask_store(&bld->exec_mask, pred, value,
801 bld->addr[reg->Indirect.Index][chan_index]);
802 break;
803
804 case TGSI_FILE_PREDICATE:
805 lp_exec_mask_store(&bld->exec_mask, pred, value,
806 bld->preds[reg->Register.Index][chan_index]);
807 break;
808
809 default:
810 assert( 0 );
811 }
812 }
813
814
815 /**
816 * High-level instruction translators.
817 */
818
819 static void
820 emit_tex( struct lp_build_tgsi_soa_context *bld,
821 const struct tgsi_full_instruction *inst,
822 enum lp_build_tex_modifier modifier,
823 LLVMValueRef *texel)
824 {
825 unsigned unit;
826 LLVMValueRef lod_bias, explicit_lod;
827 LLVMValueRef oow = NULL;
828 LLVMValueRef coords[3];
829 LLVMValueRef ddx[3];
830 LLVMValueRef ddy[3];
831 unsigned num_coords;
832 unsigned i;
833
834 if (!bld->sampler) {
835 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
836 for (i = 0; i < 4; i++) {
837 texel[i] = bld->base.undef;
838 }
839 return;
840 }
841
842 switch (inst->Texture.Texture) {
843 case TGSI_TEXTURE_1D:
844 num_coords = 1;
845 break;
846 case TGSI_TEXTURE_2D:
847 case TGSI_TEXTURE_RECT:
848 num_coords = 2;
849 break;
850 case TGSI_TEXTURE_SHADOW1D:
851 case TGSI_TEXTURE_SHADOW2D:
852 case TGSI_TEXTURE_SHADOWRECT:
853 case TGSI_TEXTURE_3D:
854 case TGSI_TEXTURE_CUBE:
855 num_coords = 3;
856 break;
857 default:
858 assert(0);
859 return;
860 }
861
862 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
863 lod_bias = emit_fetch( bld, inst, 0, 3 );
864 explicit_lod = NULL;
865 }
866 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
867 lod_bias = NULL;
868 explicit_lod = emit_fetch( bld, inst, 0, 3 );
869 }
870 else {
871 lod_bias = NULL;
872 explicit_lod = NULL;
873 }
874
875 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
876 oow = emit_fetch( bld, inst, 0, 3 );
877 oow = lp_build_rcp(&bld->base, oow);
878 }
879
880 for (i = 0; i < num_coords; i++) {
881 coords[i] = emit_fetch( bld, inst, 0, i );
882 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
883 coords[i] = lp_build_mul(&bld->base, coords[i], oow);
884 }
885 for (i = num_coords; i < 3; i++) {
886 coords[i] = bld->base.undef;
887 }
888
889 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
890 LLVMTypeRef i32t = LLVMInt32Type();
891 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
892 for (i = 0; i < num_coords; i++) {
893 LLVMValueRef src1 = emit_fetch( bld, inst, 1, i );
894 LLVMValueRef src2 = emit_fetch( bld, inst, 2, i );
895 ddx[i] = LLVMBuildExtractElement(bld->base.builder, src1, index0, "");
896 ddy[i] = LLVMBuildExtractElement(bld->base.builder, src2, index0, "");
897 }
898 unit = inst->Src[3].Register.Index;
899 } else {
900 for (i = 0; i < num_coords; i++) {
901 ddx[i] = lp_build_scalar_ddx( &bld->base, coords[i] );
902 ddy[i] = lp_build_scalar_ddy( &bld->base, coords[i] );
903 }
904 unit = inst->Src[1].Register.Index;
905 }
906 for (i = num_coords; i < 3; i++) {
907 ddx[i] = LLVMGetUndef(bld->base.elem_type);
908 ddy[i] = LLVMGetUndef(bld->base.elem_type);
909 }
910
911 bld->sampler->emit_fetch_texel(bld->sampler,
912 bld->base.builder,
913 bld->base.type,
914 unit, num_coords, coords,
915 ddx, ddy,
916 lod_bias, explicit_lod,
917 texel);
918 }
919
920 static boolean
921 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
922 int pc)
923 {
924 int i;
925
926 for (i = 0; i < 5; i++) {
927 unsigned opcode;
928
929 if (pc + i >= bld->info->num_instructions)
930 return TRUE;
931
932 opcode = bld->instructions[pc + i].Instruction.Opcode;
933
934 if (opcode == TGSI_OPCODE_END)
935 return TRUE;
936
937 if (opcode == TGSI_OPCODE_TEX ||
938 opcode == TGSI_OPCODE_TXP ||
939 opcode == TGSI_OPCODE_TXD ||
940 opcode == TGSI_OPCODE_TXB ||
941 opcode == TGSI_OPCODE_TXL ||
942 opcode == TGSI_OPCODE_TXF ||
943 opcode == TGSI_OPCODE_TXQ ||
944 opcode == TGSI_OPCODE_CAL ||
945 opcode == TGSI_OPCODE_CALLNZ ||
946 opcode == TGSI_OPCODE_IF ||
947 opcode == TGSI_OPCODE_IFC ||
948 opcode == TGSI_OPCODE_BGNLOOP ||
949 opcode == TGSI_OPCODE_SWITCH)
950 return FALSE;
951 }
952
953 return TRUE;
954 }
955
956
957
958 /**
959 * Kill fragment if any of the src register values are negative.
960 */
961 static void
962 emit_kil(
963 struct lp_build_tgsi_soa_context *bld,
964 const struct tgsi_full_instruction *inst,
965 int pc)
966 {
967 const struct tgsi_full_src_register *reg = &inst->Src[0];
968 LLVMValueRef terms[NUM_CHANNELS];
969 LLVMValueRef mask;
970 unsigned chan_index;
971
972 memset(&terms, 0, sizeof terms);
973
974 FOR_EACH_CHANNEL( chan_index ) {
975 unsigned swizzle;
976
977 /* Unswizzle channel */
978 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
979
980 /* Check if the component has not been already tested. */
981 assert(swizzle < NUM_CHANNELS);
982 if( !terms[swizzle] )
983 /* TODO: change the comparison operator instead of setting the sign */
984 terms[swizzle] = emit_fetch(bld, inst, 0, chan_index );
985 }
986
987 mask = NULL;
988 FOR_EACH_CHANNEL( chan_index ) {
989 if(terms[chan_index]) {
990 LLVMValueRef chan_mask;
991
992 /*
993 * If term < 0 then mask = 0 else mask = ~0.
994 */
995 chan_mask = lp_build_cmp(&bld->base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->base.zero);
996
997 if(mask)
998 mask = LLVMBuildAnd(bld->base.builder, mask, chan_mask, "");
999 else
1000 mask = chan_mask;
1001 }
1002 }
1003
1004 if(mask) {
1005 lp_build_mask_update(bld->mask, mask);
1006
1007 if (!near_end_of_shader(bld, pc))
1008 lp_build_mask_check(bld->mask);
1009 }
1010 }
1011
1012
1013 /**
1014 * Predicated fragment kill.
1015 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1016 * The only predication is the execution mask which will apply if
1017 * we're inside a loop or conditional.
1018 */
1019 static void
1020 emit_kilp(struct lp_build_tgsi_soa_context *bld,
1021 const struct tgsi_full_instruction *inst,
1022 int pc)
1023 {
1024 LLVMValueRef mask;
1025
1026 /* For those channels which are "alive", disable fragment shader
1027 * execution.
1028 */
1029 if (bld->exec_mask.has_mask) {
1030 mask = LLVMBuildNot(bld->base.builder, bld->exec_mask.exec_mask, "kilp");
1031 }
1032 else {
1033 LLVMValueRef zero = LLVMConstNull(bld->base.int_vec_type);
1034 mask = zero;
1035 }
1036
1037 lp_build_mask_update(bld->mask, mask);
1038
1039 if (!near_end_of_shader(bld, pc))
1040 lp_build_mask_check(bld->mask);
1041 }
1042
1043 static void
1044 emit_declaration(
1045 struct lp_build_tgsi_soa_context *bld,
1046 const struct tgsi_full_declaration *decl)
1047 {
1048 LLVMTypeRef vec_type = bld->base.vec_type;
1049
1050 unsigned first = decl->Range.First;
1051 unsigned last = decl->Range.Last;
1052 unsigned idx, i;
1053
1054 for (idx = first; idx <= last; ++idx) {
1055 assert(last <= bld->info->file_max[decl->Declaration.File]);
1056 switch (decl->Declaration.File) {
1057 case TGSI_FILE_TEMPORARY:
1058 assert(idx < LP_MAX_TGSI_TEMPS);
1059 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
1060 LLVMValueRef array_size = LLVMConstInt(LLVMInt32Type(),
1061 last*4 + 4, 0);
1062 bld->temps_array = lp_build_array_alloca(bld->base.builder,
1063 vec_type, array_size, "");
1064 } else {
1065 for (i = 0; i < NUM_CHANNELS; i++)
1066 bld->temps[idx][i] = lp_build_alloca(bld->base.builder,
1067 vec_type, "");
1068 }
1069 break;
1070
1071 case TGSI_FILE_OUTPUT:
1072 for (i = 0; i < NUM_CHANNELS; i++)
1073 bld->outputs[idx][i] = lp_build_alloca(bld->base.builder,
1074 vec_type, "");
1075 break;
1076
1077 case TGSI_FILE_ADDRESS:
1078 assert(idx < LP_MAX_TGSI_ADDRS);
1079 for (i = 0; i < NUM_CHANNELS; i++)
1080 bld->addr[idx][i] = lp_build_alloca(bld->base.builder,
1081 vec_type, "");
1082 break;
1083
1084 case TGSI_FILE_PREDICATE:
1085 assert(idx < LP_MAX_TGSI_PREDS);
1086 for (i = 0; i < NUM_CHANNELS; i++)
1087 bld->preds[idx][i] = lp_build_alloca(bld->base.builder,
1088 vec_type, "");
1089 break;
1090
1091 default:
1092 /* don't need to declare other vars */
1093 break;
1094 }
1095 }
1096 }
1097
1098
1099 /**
1100 * Emit LLVM for one TGSI instruction.
1101 * \param return TRUE for success, FALSE otherwise
1102 */
1103 static boolean
1104 emit_instruction(
1105 struct lp_build_tgsi_soa_context *bld,
1106 const struct tgsi_full_instruction *inst,
1107 const struct tgsi_opcode_info *info,
1108 int *pc)
1109 {
1110 unsigned chan_index;
1111 LLVMValueRef src0, src1, src2;
1112 LLVMValueRef tmp0, tmp1, tmp2;
1113 LLVMValueRef tmp3 = NULL;
1114 LLVMValueRef tmp4 = NULL;
1115 LLVMValueRef tmp5 = NULL;
1116 LLVMValueRef tmp6 = NULL;
1117 LLVMValueRef tmp7 = NULL;
1118 LLVMValueRef res;
1119 LLVMValueRef dst0[NUM_CHANNELS];
1120
1121 /*
1122 * Stores and write masks are handled in a general fashion after the long
1123 * instruction opcode switch statement.
1124 *
1125 * Although not stricitly necessary, we avoid generating instructions for
1126 * channels which won't be stored, in cases where's that easy. For some
1127 * complex instructions, like texture sampling, it is more convenient to
1128 * assume a full writemask and then let LLVM optimization passes eliminate
1129 * redundant code.
1130 */
1131
1132 (*pc)++;
1133
1134 assert(info->num_dst <= 1);
1135 if (info->num_dst) {
1136 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1137 dst0[chan_index] = bld->base.undef;
1138 }
1139 }
1140
1141 switch (inst->Instruction.Opcode) {
1142 case TGSI_OPCODE_ARL:
1143 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1144 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1145 tmp0 = lp_build_floor(&bld->base, tmp0);
1146 dst0[chan_index] = tmp0;
1147 }
1148 break;
1149
1150 case TGSI_OPCODE_MOV:
1151 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1152 dst0[chan_index] = emit_fetch( bld, inst, 0, chan_index );
1153 }
1154 break;
1155
1156 case TGSI_OPCODE_LIT:
1157 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ) {
1158 dst0[CHAN_X] = bld->base.one;
1159 }
1160 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1161 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1162 dst0[CHAN_Y] = lp_build_max( &bld->base, src0, bld->base.zero);
1163 }
1164 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1165 /* XMM[1] = SrcReg[0].yyyy */
1166 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1167 /* XMM[1] = max(XMM[1], 0) */
1168 tmp1 = lp_build_max( &bld->base, tmp1, bld->base.zero);
1169 /* XMM[2] = SrcReg[0].wwww */
1170 tmp2 = emit_fetch( bld, inst, 0, CHAN_W );
1171 tmp1 = lp_build_pow( &bld->base, tmp1, tmp2);
1172 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1173 tmp2 = lp_build_cmp(&bld->base, PIPE_FUNC_GREATER, tmp0, bld->base.zero);
1174 dst0[CHAN_Z] = lp_build_select(&bld->base, tmp2, tmp1, bld->base.zero);
1175 }
1176 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) ) {
1177 dst0[CHAN_W] = bld->base.one;
1178 }
1179 break;
1180
1181 case TGSI_OPCODE_RCP:
1182 /* TGSI_OPCODE_RECIP */
1183 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1184 res = lp_build_rcp(&bld->base, src0);
1185 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1186 dst0[chan_index] = res;
1187 }
1188 break;
1189
1190 case TGSI_OPCODE_RSQ:
1191 /* TGSI_OPCODE_RECIPSQRT */
1192 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1193 src0 = lp_build_abs(&bld->base, src0);
1194 res = lp_build_rsqrt(&bld->base, src0);
1195 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1196 dst0[chan_index] = res;
1197 }
1198 break;
1199
1200 case TGSI_OPCODE_EXP:
1201 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1202 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1203 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1204 LLVMValueRef *p_exp2_int_part = NULL;
1205 LLVMValueRef *p_frac_part = NULL;
1206 LLVMValueRef *p_exp2 = NULL;
1207
1208 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1209
1210 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1211 p_exp2_int_part = &tmp0;
1212 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1213 p_frac_part = &tmp1;
1214 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1215 p_exp2 = &tmp2;
1216
1217 lp_build_exp2_approx(&bld->base, src0, p_exp2_int_part, p_frac_part, p_exp2);
1218
1219 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1220 dst0[CHAN_X] = tmp0;
1221 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1222 dst0[CHAN_Y] = tmp1;
1223 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1224 dst0[CHAN_Z] = tmp2;
1225 }
1226 /* dst.w = 1.0 */
1227 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1228 dst0[CHAN_W] = bld->base.one;
1229 }
1230 break;
1231
1232 case TGSI_OPCODE_LOG:
1233 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1234 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1235 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1236 LLVMValueRef *p_floor_log2 = NULL;
1237 LLVMValueRef *p_exp = NULL;
1238 LLVMValueRef *p_log2 = NULL;
1239
1240 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1241 src0 = lp_build_abs( &bld->base, src0 );
1242
1243 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1244 p_floor_log2 = &tmp0;
1245 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1246 p_exp = &tmp1;
1247 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1248 p_log2 = &tmp2;
1249
1250 lp_build_log2_approx(&bld->base, src0, p_exp, p_floor_log2, p_log2);
1251
1252 /* dst.x = floor(lg2(abs(src.x))) */
1253 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1254 dst0[CHAN_X] = tmp0;
1255 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1256 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y )) {
1257 dst0[CHAN_Y] = lp_build_div( &bld->base, src0, tmp1);
1258 }
1259 /* dst.z = lg2(abs(src.x)) */
1260 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1261 dst0[CHAN_Z] = tmp2;
1262 }
1263 /* dst.w = 1.0 */
1264 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1265 dst0[CHAN_W] = bld->base.one;
1266 }
1267 break;
1268
1269 case TGSI_OPCODE_MUL:
1270 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1271 src0 = emit_fetch( bld, inst, 0, chan_index );
1272 src1 = emit_fetch( bld, inst, 1, chan_index );
1273 dst0[chan_index] = lp_build_mul(&bld->base, src0, src1);
1274 }
1275 break;
1276
1277 case TGSI_OPCODE_ADD:
1278 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1279 src0 = emit_fetch( bld, inst, 0, chan_index );
1280 src1 = emit_fetch( bld, inst, 1, chan_index );
1281 dst0[chan_index] = lp_build_add(&bld->base, src0, src1);
1282 }
1283 break;
1284
1285 case TGSI_OPCODE_DP3:
1286 /* TGSI_OPCODE_DOT3 */
1287 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1288 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1289 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1290 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1291 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1292 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1293 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1294 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1295 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1296 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1297 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1298 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1299 dst0[chan_index] = tmp0;
1300 }
1301 break;
1302
1303 case TGSI_OPCODE_DP4:
1304 /* TGSI_OPCODE_DOT4 */
1305 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1306 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1307 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1308 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1309 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1310 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1311 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1312 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1313 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1314 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1315 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1316 tmp1 = emit_fetch( bld, inst, 0, CHAN_W );
1317 tmp2 = emit_fetch( bld, inst, 1, CHAN_W );
1318 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1319 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1320 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1321 dst0[chan_index] = tmp0;
1322 }
1323 break;
1324
1325 case TGSI_OPCODE_DST:
1326 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1327 dst0[CHAN_X] = bld->base.one;
1328 }
1329 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1330 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1331 tmp1 = emit_fetch( bld, inst, 1, CHAN_Y );
1332 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp0, tmp1);
1333 }
1334 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1335 dst0[CHAN_Z] = emit_fetch( bld, inst, 0, CHAN_Z );
1336 }
1337 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1338 dst0[CHAN_W] = emit_fetch( bld, inst, 1, CHAN_W );
1339 }
1340 break;
1341
1342 case TGSI_OPCODE_MIN:
1343 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1344 src0 = emit_fetch( bld, inst, 0, chan_index );
1345 src1 = emit_fetch( bld, inst, 1, chan_index );
1346 dst0[chan_index] = lp_build_min( &bld->base, src0, src1 );
1347 }
1348 break;
1349
1350 case TGSI_OPCODE_MAX:
1351 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1352 src0 = emit_fetch( bld, inst, 0, chan_index );
1353 src1 = emit_fetch( bld, inst, 1, chan_index );
1354 dst0[chan_index] = lp_build_max( &bld->base, src0, src1 );
1355 }
1356 break;
1357
1358 case TGSI_OPCODE_SLT:
1359 /* TGSI_OPCODE_SETLT */
1360 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1361 src0 = emit_fetch( bld, inst, 0, chan_index );
1362 src1 = emit_fetch( bld, inst, 1, chan_index );
1363 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, src1 );
1364 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1365 }
1366 break;
1367
1368 case TGSI_OPCODE_SGE:
1369 /* TGSI_OPCODE_SETGE */
1370 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1371 src0 = emit_fetch( bld, inst, 0, chan_index );
1372 src1 = emit_fetch( bld, inst, 1, chan_index );
1373 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GEQUAL, src0, src1 );
1374 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1375 }
1376 break;
1377
1378 case TGSI_OPCODE_MAD:
1379 /* TGSI_OPCODE_MADD */
1380 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1381 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1382 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1383 tmp2 = emit_fetch( bld, inst, 2, chan_index );
1384 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1385 tmp0 = lp_build_add( &bld->base, tmp0, tmp2);
1386 dst0[chan_index] = tmp0;
1387 }
1388 break;
1389
1390 case TGSI_OPCODE_SUB:
1391 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1392 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1393 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1394 dst0[chan_index] = lp_build_sub( &bld->base, tmp0, tmp1);
1395 }
1396 break;
1397
1398 case TGSI_OPCODE_LRP:
1399 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1400 src0 = emit_fetch( bld, inst, 0, chan_index );
1401 src1 = emit_fetch( bld, inst, 1, chan_index );
1402 src2 = emit_fetch( bld, inst, 2, chan_index );
1403 tmp0 = lp_build_sub( &bld->base, src1, src2 );
1404 tmp0 = lp_build_mul( &bld->base, src0, tmp0 );
1405 dst0[chan_index] = lp_build_add( &bld->base, tmp0, src2 );
1406 }
1407 break;
1408
1409 case TGSI_OPCODE_CND:
1410 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1411 src0 = emit_fetch( bld, inst, 0, chan_index );
1412 src1 = emit_fetch( bld, inst, 1, chan_index );
1413 src2 = emit_fetch( bld, inst, 2, chan_index );
1414 tmp1 = lp_build_const_vec(bld->base.type, 0.5);
1415 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src2, tmp1);
1416 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src0, src1 );
1417 }
1418 break;
1419
1420 case TGSI_OPCODE_DP2A:
1421 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1422 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1423 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1424 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1425 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1426 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1427 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1428 tmp1 = emit_fetch( bld, inst, 2, CHAN_X ); /* xmm1 = src[2].x */
1429 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1430 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1431 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1432 }
1433 break;
1434
1435 case TGSI_OPCODE_FRC:
1436 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1437 src0 = emit_fetch( bld, inst, 0, chan_index );
1438 tmp0 = lp_build_floor(&bld->base, src0);
1439 tmp0 = lp_build_sub(&bld->base, src0, tmp0);
1440 dst0[chan_index] = tmp0;
1441 }
1442 break;
1443
1444 case TGSI_OPCODE_CLAMP:
1445 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1446 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1447 src1 = emit_fetch( bld, inst, 1, chan_index );
1448 src2 = emit_fetch( bld, inst, 2, chan_index );
1449 tmp0 = lp_build_max(&bld->base, tmp0, src1);
1450 tmp0 = lp_build_min(&bld->base, tmp0, src2);
1451 dst0[chan_index] = tmp0;
1452 }
1453 break;
1454
1455 case TGSI_OPCODE_FLR:
1456 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1457 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1458 dst0[chan_index] = lp_build_floor(&bld->base, tmp0);
1459 }
1460 break;
1461
1462 case TGSI_OPCODE_ROUND:
1463 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1464 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1465 dst0[chan_index] = lp_build_round(&bld->base, tmp0);
1466 }
1467 break;
1468
1469 case TGSI_OPCODE_EX2: {
1470 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1471 tmp0 = lp_build_exp2( &bld->base, tmp0);
1472 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1473 dst0[chan_index] = tmp0;
1474 }
1475 break;
1476 }
1477
1478 case TGSI_OPCODE_LG2:
1479 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1480 tmp0 = lp_build_log2( &bld->base, tmp0);
1481 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1482 dst0[chan_index] = tmp0;
1483 }
1484 break;
1485
1486 case TGSI_OPCODE_POW:
1487 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1488 src1 = emit_fetch( bld, inst, 1, CHAN_X );
1489 res = lp_build_pow( &bld->base, src0, src1 );
1490 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1491 dst0[chan_index] = res;
1492 }
1493 break;
1494
1495 case TGSI_OPCODE_XPD:
1496 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1497 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1498 tmp1 = emit_fetch( bld, inst, 1, CHAN_Z );
1499 tmp3 = emit_fetch( bld, inst, 0, CHAN_Z );
1500 }
1501 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1502 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1503 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1504 tmp4 = emit_fetch( bld, inst, 1, CHAN_Y );
1505 }
1506 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1507 tmp2 = tmp0;
1508 tmp2 = lp_build_mul( &bld->base, tmp2, tmp1);
1509 tmp5 = tmp3;
1510 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1511 tmp2 = lp_build_sub( &bld->base, tmp2, tmp5);
1512 dst0[CHAN_X] = tmp2;
1513 }
1514 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1515 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1516 tmp2 = emit_fetch( bld, inst, 1, CHAN_X );
1517 tmp5 = emit_fetch( bld, inst, 0, CHAN_X );
1518 }
1519 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1520 tmp3 = lp_build_mul( &bld->base, tmp3, tmp2);
1521 tmp1 = lp_build_mul( &bld->base, tmp1, tmp5);
1522 tmp3 = lp_build_sub( &bld->base, tmp3, tmp1);
1523 dst0[CHAN_Y] = tmp3;
1524 }
1525 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1526 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1527 tmp0 = lp_build_mul( &bld->base, tmp0, tmp2);
1528 tmp5 = lp_build_sub( &bld->base, tmp5, tmp0);
1529 dst0[CHAN_Z] = tmp5;
1530 }
1531 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1532 dst0[CHAN_W] = bld->base.one;
1533 }
1534 break;
1535
1536 case TGSI_OPCODE_ABS:
1537 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1538 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1539 dst0[chan_index] = lp_build_abs( &bld->base, tmp0 );
1540 }
1541 break;
1542
1543 case TGSI_OPCODE_RCC:
1544 /* deprecated? */
1545 assert(0);
1546 return FALSE;
1547
1548 case TGSI_OPCODE_DPH:
1549 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1550 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1551 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1552 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1553 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1554 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1555 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1556 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1557 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1558 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1559 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1560 tmp1 = emit_fetch( bld, inst, 1, CHAN_W );
1561 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1562 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1563 dst0[chan_index] = tmp0;
1564 }
1565 break;
1566
1567 case TGSI_OPCODE_COS:
1568 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1569 tmp0 = lp_build_cos( &bld->base, tmp0 );
1570 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1571 dst0[chan_index] = tmp0;
1572 }
1573 break;
1574
1575 case TGSI_OPCODE_DDX:
1576 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1577 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, &dst0[chan_index], NULL);
1578 }
1579 break;
1580
1581 case TGSI_OPCODE_DDY:
1582 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1583 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, NULL, &dst0[chan_index]);
1584 }
1585 break;
1586
1587 case TGSI_OPCODE_KILP:
1588 /* predicated kill */
1589 emit_kilp( bld, inst, (*pc)-1 );
1590 break;
1591
1592 case TGSI_OPCODE_KIL:
1593 /* conditional kill */
1594 emit_kil( bld, inst, (*pc)-1 );
1595 break;
1596
1597 case TGSI_OPCODE_PK2H:
1598 return FALSE;
1599 break;
1600
1601 case TGSI_OPCODE_PK2US:
1602 return FALSE;
1603 break;
1604
1605 case TGSI_OPCODE_PK4B:
1606 return FALSE;
1607 break;
1608
1609 case TGSI_OPCODE_PK4UB:
1610 return FALSE;
1611 break;
1612
1613 case TGSI_OPCODE_RFL:
1614 return FALSE;
1615 break;
1616
1617 case TGSI_OPCODE_SEQ:
1618 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1619 src0 = emit_fetch( bld, inst, 0, chan_index );
1620 src1 = emit_fetch( bld, inst, 1, chan_index );
1621 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_EQUAL, src0, src1 );
1622 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1623 }
1624 break;
1625
1626 case TGSI_OPCODE_SFL:
1627 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1628 dst0[chan_index] = bld->base.zero;
1629 }
1630 break;
1631
1632 case TGSI_OPCODE_SGT:
1633 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1634 src0 = emit_fetch( bld, inst, 0, chan_index );
1635 src1 = emit_fetch( bld, inst, 1, chan_index );
1636 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src0, src1 );
1637 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1638 }
1639 break;
1640
1641 case TGSI_OPCODE_SIN:
1642 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1643 tmp0 = lp_build_sin( &bld->base, tmp0 );
1644 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1645 dst0[chan_index] = tmp0;
1646 }
1647 break;
1648
1649 case TGSI_OPCODE_SLE:
1650 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1651 src0 = emit_fetch( bld, inst, 0, chan_index );
1652 src1 = emit_fetch( bld, inst, 1, chan_index );
1653 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LEQUAL, src0, src1 );
1654 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1655 }
1656 break;
1657
1658 case TGSI_OPCODE_SNE:
1659 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1660 src0 = emit_fetch( bld, inst, 0, chan_index );
1661 src1 = emit_fetch( bld, inst, 1, chan_index );
1662 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_NOTEQUAL, src0, src1 );
1663 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1664 }
1665 break;
1666
1667 case TGSI_OPCODE_STR:
1668 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1669 dst0[chan_index] = bld->base.one;
1670 }
1671 break;
1672
1673 case TGSI_OPCODE_TEX:
1674 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_NONE, dst0 );
1675 break;
1676
1677 case TGSI_OPCODE_TXD:
1678 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV, dst0 );
1679 break;
1680
1681 case TGSI_OPCODE_UP2H:
1682 /* deprecated */
1683 assert (0);
1684 return FALSE;
1685 break;
1686
1687 case TGSI_OPCODE_UP2US:
1688 /* deprecated */
1689 assert(0);
1690 return FALSE;
1691 break;
1692
1693 case TGSI_OPCODE_UP4B:
1694 /* deprecated */
1695 assert(0);
1696 return FALSE;
1697 break;
1698
1699 case TGSI_OPCODE_UP4UB:
1700 /* deprecated */
1701 assert(0);
1702 return FALSE;
1703 break;
1704
1705 case TGSI_OPCODE_X2D:
1706 /* deprecated? */
1707 assert(0);
1708 return FALSE;
1709 break;
1710
1711 case TGSI_OPCODE_ARA:
1712 /* deprecated */
1713 assert(0);
1714 return FALSE;
1715 break;
1716
1717 case TGSI_OPCODE_ARR:
1718 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1719 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1720 tmp0 = lp_build_round(&bld->base, tmp0);
1721 dst0[chan_index] = tmp0;
1722 }
1723 break;
1724
1725 case TGSI_OPCODE_BRA:
1726 /* deprecated */
1727 assert(0);
1728 return FALSE;
1729 break;
1730
1731 case TGSI_OPCODE_CAL:
1732 lp_exec_mask_call(&bld->exec_mask,
1733 inst->Label.Label,
1734 pc);
1735
1736 break;
1737
1738 case TGSI_OPCODE_RET:
1739 lp_exec_mask_ret(&bld->exec_mask, pc);
1740 break;
1741
1742 case TGSI_OPCODE_END:
1743 *pc = -1;
1744 break;
1745
1746 case TGSI_OPCODE_SSG:
1747 /* TGSI_OPCODE_SGN */
1748 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1749 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1750 dst0[chan_index] = lp_build_sgn( &bld->base, tmp0 );
1751 }
1752 break;
1753
1754 case TGSI_OPCODE_CMP:
1755 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1756 src0 = emit_fetch( bld, inst, 0, chan_index );
1757 src1 = emit_fetch( bld, inst, 1, chan_index );
1758 src2 = emit_fetch( bld, inst, 2, chan_index );
1759 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, bld->base.zero );
1760 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src1, src2);
1761 }
1762 break;
1763
1764 case TGSI_OPCODE_SCS:
1765 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1766 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1767 dst0[CHAN_X] = lp_build_cos( &bld->base, tmp0 );
1768 }
1769 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1770 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1771 dst0[CHAN_Y] = lp_build_sin( &bld->base, tmp0 );
1772 }
1773 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1774 dst0[CHAN_Z] = bld->base.zero;
1775 }
1776 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1777 dst0[CHAN_W] = bld->base.one;
1778 }
1779 break;
1780
1781 case TGSI_OPCODE_TXB:
1782 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_LOD_BIAS, dst0 );
1783 break;
1784
1785 case TGSI_OPCODE_NRM:
1786 /* fall-through */
1787 case TGSI_OPCODE_NRM4:
1788 /* 3 or 4-component normalization */
1789 {
1790 uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
1791
1792 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) ||
1793 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y) ||
1794 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z) ||
1795 (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 4)) {
1796
1797 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1798
1799 /* xmm4 = src.x */
1800 /* xmm0 = src.x * src.x */
1801 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1802 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1803 tmp4 = tmp0;
1804 }
1805 tmp0 = lp_build_mul( &bld->base, tmp0, tmp0);
1806
1807 /* xmm5 = src.y */
1808 /* xmm0 = xmm0 + src.y * src.y */
1809 tmp1 = emit_fetch(bld, inst, 0, CHAN_Y);
1810 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1811 tmp5 = tmp1;
1812 }
1813 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1814 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1815
1816 /* xmm6 = src.z */
1817 /* xmm0 = xmm0 + src.z * src.z */
1818 tmp1 = emit_fetch(bld, inst, 0, CHAN_Z);
1819 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1820 tmp6 = tmp1;
1821 }
1822 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1823 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1824
1825 if (dims == 4) {
1826 /* xmm7 = src.w */
1827 /* xmm0 = xmm0 + src.w * src.w */
1828 tmp1 = emit_fetch(bld, inst, 0, CHAN_W);
1829 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W)) {
1830 tmp7 = tmp1;
1831 }
1832 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1833 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1834 }
1835
1836 /* xmm1 = 1 / sqrt(xmm0) */
1837 tmp1 = lp_build_rsqrt( &bld->base, tmp0);
1838
1839 /* dst.x = xmm1 * src.x */
1840 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1841 dst0[CHAN_X] = lp_build_mul( &bld->base, tmp4, tmp1);
1842 }
1843
1844 /* dst.y = xmm1 * src.y */
1845 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1846 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp5, tmp1);
1847 }
1848
1849 /* dst.z = xmm1 * src.z */
1850 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1851 dst0[CHAN_Z] = lp_build_mul( &bld->base, tmp6, tmp1);
1852 }
1853
1854 /* dst.w = xmm1 * src.w */
1855 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) && dims == 4) {
1856 dst0[CHAN_W] = lp_build_mul( &bld->base, tmp7, tmp1);
1857 }
1858 }
1859
1860 /* dst.w = 1.0 */
1861 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 3) {
1862 dst0[CHAN_W] = bld->base.one;
1863 }
1864 }
1865 break;
1866
1867 case TGSI_OPCODE_DIV:
1868 /* deprecated */
1869 assert( 0 );
1870 return FALSE;
1871 break;
1872
1873 case TGSI_OPCODE_DP2:
1874 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1875 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1876 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1877 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1878 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1879 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1880 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1881 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1882 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1883 }
1884 break;
1885
1886 case TGSI_OPCODE_TXL:
1887 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD, dst0 );
1888 break;
1889
1890 case TGSI_OPCODE_TXP:
1891 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_PROJECTED, dst0 );
1892 break;
1893
1894 case TGSI_OPCODE_BRK:
1895 lp_exec_break(&bld->exec_mask);
1896 break;
1897
1898 case TGSI_OPCODE_IF:
1899 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1900 tmp0 = lp_build_cmp(&bld->base, PIPE_FUNC_NOTEQUAL,
1901 tmp0, bld->base.zero);
1902 lp_exec_mask_cond_push(&bld->exec_mask, tmp0);
1903 break;
1904
1905 case TGSI_OPCODE_BGNLOOP:
1906 lp_exec_bgnloop(&bld->exec_mask);
1907 break;
1908
1909 case TGSI_OPCODE_BGNSUB:
1910 lp_exec_mask_bgnsub(&bld->exec_mask);
1911 break;
1912
1913 case TGSI_OPCODE_ELSE:
1914 lp_exec_mask_cond_invert(&bld->exec_mask);
1915 break;
1916
1917 case TGSI_OPCODE_ENDIF:
1918 lp_exec_mask_cond_pop(&bld->exec_mask);
1919 break;
1920
1921 case TGSI_OPCODE_ENDLOOP:
1922 lp_exec_endloop(&bld->exec_mask);
1923 break;
1924
1925 case TGSI_OPCODE_ENDSUB:
1926 lp_exec_mask_endsub(&bld->exec_mask, pc);
1927 break;
1928
1929 case TGSI_OPCODE_PUSHA:
1930 /* deprecated? */
1931 assert(0);
1932 return FALSE;
1933 break;
1934
1935 case TGSI_OPCODE_POPA:
1936 /* deprecated? */
1937 assert(0);
1938 return FALSE;
1939 break;
1940
1941 case TGSI_OPCODE_CEIL:
1942 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1943 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1944 dst0[chan_index] = lp_build_ceil(&bld->base, tmp0);
1945 }
1946 break;
1947
1948 case TGSI_OPCODE_I2F:
1949 /* deprecated? */
1950 assert(0);
1951 return FALSE;
1952 break;
1953
1954 case TGSI_OPCODE_NOT:
1955 /* deprecated? */
1956 assert(0);
1957 return FALSE;
1958 break;
1959
1960 case TGSI_OPCODE_TRUNC:
1961 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1962 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1963 dst0[chan_index] = lp_build_trunc(&bld->base, tmp0);
1964 }
1965 break;
1966
1967 case TGSI_OPCODE_SHL:
1968 /* deprecated? */
1969 assert(0);
1970 return FALSE;
1971 break;
1972
1973 case TGSI_OPCODE_ISHR:
1974 /* deprecated? */
1975 assert(0);
1976 return FALSE;
1977 break;
1978
1979 case TGSI_OPCODE_AND:
1980 /* deprecated? */
1981 assert(0);
1982 return FALSE;
1983 break;
1984
1985 case TGSI_OPCODE_OR:
1986 /* deprecated? */
1987 assert(0);
1988 return FALSE;
1989 break;
1990
1991 case TGSI_OPCODE_MOD:
1992 /* deprecated? */
1993 assert(0);
1994 return FALSE;
1995 break;
1996
1997 case TGSI_OPCODE_XOR:
1998 /* deprecated? */
1999 assert(0);
2000 return FALSE;
2001 break;
2002
2003 case TGSI_OPCODE_SAD:
2004 /* deprecated? */
2005 assert(0);
2006 return FALSE;
2007 break;
2008
2009 case TGSI_OPCODE_TXF:
2010 /* deprecated? */
2011 assert(0);
2012 return FALSE;
2013 break;
2014
2015 case TGSI_OPCODE_TXQ:
2016 /* deprecated? */
2017 assert(0);
2018 return FALSE;
2019 break;
2020
2021 case TGSI_OPCODE_CONT:
2022 lp_exec_continue(&bld->exec_mask);
2023 break;
2024
2025 case TGSI_OPCODE_EMIT:
2026 return FALSE;
2027 break;
2028
2029 case TGSI_OPCODE_ENDPRIM:
2030 return FALSE;
2031 break;
2032
2033 case TGSI_OPCODE_NOP:
2034 break;
2035
2036 default:
2037 return FALSE;
2038 }
2039
2040 if(info->num_dst) {
2041 LLVMValueRef pred[NUM_CHANNELS];
2042
2043 emit_fetch_predicate( bld, inst, pred );
2044
2045 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2046 emit_store( bld, inst, 0, chan_index, pred[chan_index], dst0[chan_index]);
2047 }
2048 }
2049
2050 return TRUE;
2051 }
2052
2053
2054 void
2055 lp_build_tgsi_soa(LLVMBuilderRef builder,
2056 const struct tgsi_token *tokens,
2057 struct lp_type type,
2058 struct lp_build_mask_context *mask,
2059 LLVMValueRef consts_ptr,
2060 const LLVMValueRef *pos,
2061 const LLVMValueRef (*inputs)[NUM_CHANNELS],
2062 LLVMValueRef (*outputs)[NUM_CHANNELS],
2063 struct lp_build_sampler_soa *sampler,
2064 const struct tgsi_shader_info *info)
2065 {
2066 struct lp_build_tgsi_soa_context bld;
2067 struct tgsi_parse_context parse;
2068 uint num_immediates = 0;
2069 uint num_instructions = 0;
2070 unsigned i;
2071 int pc = 0;
2072
2073 struct lp_type res_type;
2074
2075 assert(type.length <= LP_MAX_VECTOR_LENGTH);
2076 memset(&res_type, 0, sizeof res_type);
2077 res_type.width = type.width;
2078 res_type.length = type.length;
2079 res_type.sign = 1;
2080
2081 /* Setup build context */
2082 memset(&bld, 0, sizeof bld);
2083 lp_build_context_init(&bld.base, builder, type);
2084 lp_build_context_init(&bld.uint_bld, builder, lp_uint_type(type));
2085 bld.mask = mask;
2086 bld.pos = pos;
2087 bld.inputs = inputs;
2088 bld.outputs = outputs;
2089 bld.consts_ptr = consts_ptr;
2090 bld.sampler = sampler;
2091 bld.info = info;
2092 bld.indirect_files = info->indirect_files;
2093 bld.instructions = (struct tgsi_full_instruction *)
2094 MALLOC( LP_MAX_INSTRUCTIONS * sizeof(struct tgsi_full_instruction) );
2095 bld.max_instructions = LP_MAX_INSTRUCTIONS;
2096
2097 if (!bld.instructions) {
2098 return;
2099 }
2100
2101 lp_exec_mask_init(&bld.exec_mask, &bld.base);
2102
2103 tgsi_parse_init( &parse, tokens );
2104
2105 while( !tgsi_parse_end_of_tokens( &parse ) ) {
2106 tgsi_parse_token( &parse );
2107
2108 switch( parse.FullToken.Token.Type ) {
2109 case TGSI_TOKEN_TYPE_DECLARATION:
2110 /* Inputs already interpolated */
2111 emit_declaration( &bld, &parse.FullToken.FullDeclaration );
2112 break;
2113
2114 case TGSI_TOKEN_TYPE_INSTRUCTION:
2115 {
2116 /* save expanded instruction */
2117 if (num_instructions == bld.max_instructions) {
2118 struct tgsi_full_instruction *instructions;
2119 instructions = REALLOC(bld.instructions,
2120 bld.max_instructions
2121 * sizeof(struct tgsi_full_instruction),
2122 (bld.max_instructions + LP_MAX_INSTRUCTIONS)
2123 * sizeof(struct tgsi_full_instruction));
2124 if (!instructions) {
2125 break;
2126 }
2127 bld.instructions = instructions;
2128 bld.max_instructions += LP_MAX_INSTRUCTIONS;
2129 }
2130
2131 memcpy(bld.instructions + num_instructions,
2132 &parse.FullToken.FullInstruction,
2133 sizeof(bld.instructions[0]));
2134
2135 num_instructions++;
2136 }
2137
2138 break;
2139
2140 case TGSI_TOKEN_TYPE_IMMEDIATE:
2141 /* simply copy the immediate values into the next immediates[] slot */
2142 {
2143 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
2144 assert(size <= 4);
2145 assert(num_immediates < LP_MAX_TGSI_IMMEDIATES);
2146 for( i = 0; i < size; ++i )
2147 bld.immediates[num_immediates][i] =
2148 lp_build_const_vec(type, parse.FullToken.FullImmediate.u[i].Float);
2149 for( i = size; i < 4; ++i )
2150 bld.immediates[num_immediates][i] = bld.base.undef;
2151 num_immediates++;
2152 }
2153 break;
2154
2155 case TGSI_TOKEN_TYPE_PROPERTY:
2156 break;
2157
2158 default:
2159 assert( 0 );
2160 }
2161 }
2162
2163 while (pc != -1) {
2164 struct tgsi_full_instruction *instr = bld.instructions + pc;
2165 const struct tgsi_opcode_info *opcode_info =
2166 tgsi_get_opcode_info(instr->Instruction.Opcode);
2167 if (!emit_instruction( &bld, instr, opcode_info, &pc ))
2168 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2169 opcode_info->mnemonic);
2170 }
2171
2172 if (0) {
2173 LLVMBasicBlockRef block = LLVMGetInsertBlock(builder);
2174 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2175 debug_printf("11111111111111111111111111111 \n");
2176 tgsi_dump(tokens, 0);
2177 lp_debug_dump_value(function);
2178 debug_printf("2222222222222222222222222222 \n");
2179 }
2180 tgsi_parse_free( &parse );
2181
2182 if (0) {
2183 LLVMModuleRef module = LLVMGetGlobalParent(
2184 LLVMGetBasicBlockParent(LLVMGetInsertBlock(bld.base.builder)));
2185 LLVMDumpModule(module);
2186
2187 }
2188
2189 FREE( bld.instructions );
2190 }
2191