gallivm: added comment
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_scan.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_gather.h"
53 #include "lp_bld_logic.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_quad.h"
57 #include "lp_bld_tgsi.h"
58 #include "lp_bld_limits.h"
59 #include "lp_bld_debug.h"
60
61
62 #define FOR_EACH_CHANNEL( CHAN )\
63 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
64
65 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
66 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
67
68 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
69 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
70
71 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
72 FOR_EACH_CHANNEL( CHAN )\
73 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
74
75 #define CHAN_X 0
76 #define CHAN_Y 1
77 #define CHAN_Z 2
78 #define CHAN_W 3
79 #define NUM_CHANNELS 4
80
81 #define LP_MAX_INSTRUCTIONS 256
82
83
84 struct lp_exec_mask {
85 struct lp_build_context *bld;
86
87 boolean has_mask;
88
89 LLVMTypeRef int_vec_type;
90
91 LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
92 int cond_stack_size;
93 LLVMValueRef cond_mask;
94
95 LLVMBasicBlockRef loop_block;
96 LLVMValueRef cont_mask;
97 LLVMValueRef break_mask;
98 LLVMValueRef break_var;
99 struct {
100 LLVMBasicBlockRef loop_block;
101 LLVMValueRef cont_mask;
102 LLVMValueRef break_mask;
103 LLVMValueRef break_var;
104 } loop_stack[LP_MAX_TGSI_NESTING];
105 int loop_stack_size;
106
107 LLVMValueRef ret_mask;
108 struct {
109 int pc;
110 LLVMValueRef ret_mask;
111 } call_stack[LP_MAX_TGSI_NESTING];
112 int call_stack_size;
113
114 LLVMValueRef exec_mask;
115 };
116
117 struct lp_build_tgsi_soa_context
118 {
119 struct lp_build_context base;
120
121 /* Builder for integer masks and indices */
122 struct lp_build_context int_bld;
123
124 LLVMValueRef consts_ptr;
125 const LLVMValueRef *pos;
126 const LLVMValueRef (*inputs)[NUM_CHANNELS];
127 LLVMValueRef (*outputs)[NUM_CHANNELS];
128
129 const struct lp_build_sampler_soa *sampler;
130
131 LLVMValueRef immediates[LP_MAX_TGSI_IMMEDIATES][NUM_CHANNELS];
132 LLVMValueRef temps[LP_MAX_TGSI_TEMPS][NUM_CHANNELS];
133 LLVMValueRef addr[LP_MAX_TGSI_ADDRS][NUM_CHANNELS];
134 LLVMValueRef preds[LP_MAX_TGSI_PREDS][NUM_CHANNELS];
135
136 /* we allocate an array of temps if we have indirect
137 * addressing and then the temps above is unused */
138 LLVMValueRef temps_array;
139 boolean has_indirect_addressing;
140
141 struct lp_build_mask_context *mask;
142 struct lp_exec_mask exec_mask;
143
144 struct tgsi_full_instruction *instructions;
145 uint max_instructions;
146 };
147
148 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
149 {
150 mask->bld = bld;
151 mask->has_mask = FALSE;
152 mask->cond_stack_size = 0;
153 mask->loop_stack_size = 0;
154 mask->call_stack_size = 0;
155
156 mask->int_vec_type = lp_build_int_vec_type(mask->bld->type);
157 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
158 LLVMConstAllOnes(mask->int_vec_type);
159 }
160
161 static void lp_exec_mask_update(struct lp_exec_mask *mask)
162 {
163 if (mask->loop_stack_size) {
164 /*for loops we need to update the entire mask at runtime */
165 LLVMValueRef tmp;
166 assert(mask->break_mask);
167 tmp = LLVMBuildAnd(mask->bld->builder,
168 mask->cont_mask,
169 mask->break_mask,
170 "maskcb");
171 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
172 mask->cond_mask,
173 tmp,
174 "maskfull");
175 } else
176 mask->exec_mask = mask->cond_mask;
177
178 if (mask->call_stack_size) {
179 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
180 mask->exec_mask,
181 mask->ret_mask,
182 "callmask");
183 }
184
185 mask->has_mask = (mask->cond_stack_size > 0 ||
186 mask->loop_stack_size > 0 ||
187 mask->call_stack_size > 0);
188 }
189
190 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
191 LLVMValueRef val)
192 {
193 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
194 if (mask->cond_stack_size == 0) {
195 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
196 }
197 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
198 assert(LLVMTypeOf(val) == mask->int_vec_type);
199 mask->cond_mask = val;
200
201 lp_exec_mask_update(mask);
202 }
203
204 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
205 {
206 LLVMValueRef prev_mask;
207 LLVMValueRef inv_mask;
208
209 assert(mask->cond_stack_size);
210 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
211 if (mask->cond_stack_size == 1) {
212 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
213 }
214
215 inv_mask = LLVMBuildNot(mask->bld->builder, mask->cond_mask, "");
216
217 mask->cond_mask = LLVMBuildAnd(mask->bld->builder,
218 inv_mask,
219 prev_mask, "");
220 lp_exec_mask_update(mask);
221 }
222
223 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
224 {
225 assert(mask->cond_stack_size);
226 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
227 lp_exec_mask_update(mask);
228 }
229
230 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
231 {
232 if (mask->loop_stack_size == 0) {
233 assert(mask->loop_block == NULL);
234 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
235 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
236 assert(mask->break_var == NULL);
237 }
238
239 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
240
241 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
242 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
243 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
244 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
245 ++mask->loop_stack_size;
246
247 mask->break_var = lp_build_alloca(mask->bld->builder, mask->int_vec_type, "");
248 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
249
250 mask->loop_block = lp_build_insert_new_block(mask->bld->builder, "bgnloop");
251 LLVMBuildBr(mask->bld->builder, mask->loop_block);
252 LLVMPositionBuilderAtEnd(mask->bld->builder, mask->loop_block);
253
254 mask->break_mask = LLVMBuildLoad(mask->bld->builder, mask->break_var, "");
255
256 lp_exec_mask_update(mask);
257 }
258
259 static void lp_exec_break(struct lp_exec_mask *mask)
260 {
261 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
262 mask->exec_mask,
263 "break");
264
265 mask->break_mask = LLVMBuildAnd(mask->bld->builder,
266 mask->break_mask,
267 exec_mask, "break_full");
268
269 lp_exec_mask_update(mask);
270 }
271
272 static void lp_exec_continue(struct lp_exec_mask *mask)
273 {
274 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
275 mask->exec_mask,
276 "");
277
278 mask->cont_mask = LLVMBuildAnd(mask->bld->builder,
279 mask->cont_mask,
280 exec_mask, "");
281
282 lp_exec_mask_update(mask);
283 }
284
285
286 static void lp_exec_endloop(struct lp_exec_mask *mask)
287 {
288 LLVMBasicBlockRef endloop;
289 LLVMTypeRef reg_type = LLVMIntType(mask->bld->type.width*
290 mask->bld->type.length);
291 LLVMValueRef i1cond;
292
293 assert(mask->break_mask);
294
295 /*
296 * Restore the cont_mask, but don't pop
297 */
298 assert(mask->loop_stack_size);
299 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
300 lp_exec_mask_update(mask);
301
302 /*
303 * Unlike the continue mask, the break_mask must be preserved across loop
304 * iterations
305 */
306 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
307
308 /* i1cond = (mask == 0) */
309 i1cond = LLVMBuildICmp(
310 mask->bld->builder,
311 LLVMIntNE,
312 LLVMBuildBitCast(mask->bld->builder, mask->exec_mask, reg_type, ""),
313 LLVMConstNull(reg_type), "");
314
315 endloop = lp_build_insert_new_block(mask->bld->builder, "endloop");
316
317 LLVMBuildCondBr(mask->bld->builder,
318 i1cond, mask->loop_block, endloop);
319
320 LLVMPositionBuilderAtEnd(mask->bld->builder, endloop);
321
322 assert(mask->loop_stack_size);
323 --mask->loop_stack_size;
324 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
325 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
326 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
327 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
328
329 lp_exec_mask_update(mask);
330 }
331
332 /* stores val into an address pointed to by dst.
333 * mask->exec_mask is used to figure out which bits of val
334 * should be stored into the address
335 * (0 means don't store this bit, 1 means do store).
336 */
337 static void lp_exec_mask_store(struct lp_exec_mask *mask,
338 LLVMValueRef pred,
339 LLVMValueRef val,
340 LLVMValueRef dst)
341 {
342 /* Mix the predicate and execution mask */
343 if (mask->has_mask) {
344 if (pred) {
345 pred = LLVMBuildAnd(mask->bld->builder, pred, mask->exec_mask, "");
346 } else {
347 pred = mask->exec_mask;
348 }
349 }
350
351 if (pred) {
352 LLVMValueRef real_val, dst_val;
353
354 dst_val = LLVMBuildLoad(mask->bld->builder, dst, "");
355 real_val = lp_build_select(mask->bld,
356 pred,
357 val, dst_val);
358
359 LLVMBuildStore(mask->bld->builder, real_val, dst);
360 } else
361 LLVMBuildStore(mask->bld->builder, val, dst);
362 }
363
364 static void lp_exec_mask_call(struct lp_exec_mask *mask,
365 int func,
366 int *pc)
367 {
368 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
369 mask->call_stack[mask->call_stack_size].pc = *pc;
370 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
371 mask->call_stack_size++;
372 *pc = func;
373 }
374
375 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
376 {
377 LLVMValueRef exec_mask;
378
379 if (mask->call_stack_size == 0) {
380 /* returning from main() */
381 *pc = -1;
382 return;
383 }
384 exec_mask = LLVMBuildNot(mask->bld->builder,
385 mask->exec_mask,
386 "ret");
387
388 mask->ret_mask = LLVMBuildAnd(mask->bld->builder,
389 mask->ret_mask,
390 exec_mask, "ret_full");
391
392 lp_exec_mask_update(mask);
393 }
394
395 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
396 {
397 }
398
399 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
400 {
401 assert(mask->call_stack_size);
402 mask->call_stack_size--;
403 *pc = mask->call_stack[mask->call_stack_size].pc;
404 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
405 lp_exec_mask_update(mask);
406 }
407
408
409 /**
410 * Return pointer to a temporary register channel (src or dest).
411 * Note that indirect addressing cannot be handled here.
412 * \param index which temporary register
413 * \param chan which channel of the temp register.
414 */
415 static LLVMValueRef
416 get_temp_ptr(struct lp_build_tgsi_soa_context *bld,
417 unsigned index,
418 unsigned chan)
419 {
420 assert(chan < 4);
421 if (bld->has_indirect_addressing) {
422 LLVMValueRef lindex = lp_build_const_int32(index * 4 + chan);
423 return LLVMBuildGEP(bld->base.builder, bld->temps_array, &lindex, 1, "");
424 }
425 else {
426 return bld->temps[index][chan];
427 }
428 }
429
430
431 /**
432 * Gather vector.
433 * XXX the lp_build_gather() function should be capable of doing this
434 * with a little work.
435 */
436 static LLVMValueRef
437 build_gather(struct lp_build_tgsi_soa_context *bld,
438 LLVMValueRef base_ptr,
439 LLVMValueRef indexes)
440 {
441 LLVMValueRef res = bld->base.undef;
442 unsigned i;
443
444 /*
445 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
446 */
447 for (i = 0; i < bld->base.type.length; i++) {
448 LLVMValueRef ii = LLVMConstInt(LLVMInt32Type(), i, 0);
449 LLVMValueRef index = LLVMBuildExtractElement(bld->base.builder,
450 indexes, ii, "");
451 LLVMValueRef scalar_ptr = LLVMBuildGEP(bld->base.builder, base_ptr,
452 &index, 1, "");
453 LLVMValueRef scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
454
455 res = LLVMBuildInsertElement(bld->base.builder, res, scalar, ii, "");
456 }
457
458 return res;
459 }
460
461
462 /**
463 * Register fetch.
464 */
465 static LLVMValueRef
466 emit_fetch(
467 struct lp_build_tgsi_soa_context *bld,
468 const struct tgsi_full_instruction *inst,
469 unsigned src_op,
470 const unsigned chan_index )
471 {
472 const struct tgsi_full_src_register *reg = &inst->Src[src_op];
473 const unsigned swizzle =
474 tgsi_util_get_full_src_register_swizzle(reg, chan_index);
475 LLVMValueRef res;
476 LLVMValueRef addr_vec = NULL;
477
478 if (swizzle > 3) {
479 assert(0 && "invalid swizzle in emit_fetch()");
480 return bld->base.undef;
481 }
482
483 if (reg->Register.Indirect) {
484 /*
485 * Compute addr_vec: a vector of offsets into the register file
486 * from which we need to gather elements. Recall that the ADDR
487 * register's elements can all be different.
488 */
489
490 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->base.type);
491 unsigned swizzle = tgsi_util_get_src_register_swizzle( &reg->Indirect, chan_index );
492
493 LLVMValueRef vec4 = lp_build_const_int_vec(bld->int_bld.type, 4);
494
495 assert(bld->has_indirect_addressing);
496
497 addr_vec = LLVMBuildLoad(bld->base.builder,
498 bld->addr[reg->Indirect.Index][swizzle],
499 "load addr");
500
501 /* for indexing we want integers */
502 addr_vec = LLVMBuildFPToSI(bld->base.builder, addr_vec,
503 int_vec_type, "");
504
505 /* addr_vec = addr_vec * 4 */
506 addr_vec = lp_build_mul(&bld->base, addr_vec, vec4);
507 }
508
509 switch (reg->Register.File) {
510 case TGSI_FILE_CONSTANT:
511 if (reg->Register.Indirect) {
512 LLVMValueRef index_vec; /* index into the const buffer */
513
514 /* index_vec = broadcast(reg->Register.Index * 4 + swizzle) */
515 index_vec = lp_build_const_int_vec(bld->int_bld.type,
516 reg->Register.Index * 4 + swizzle);
517
518 /* index_vec = index_vec + addr_vec */
519 index_vec = lp_build_add(&bld->base, index_vec, addr_vec);
520
521 /* Gather values from the constant buffer */
522 res = build_gather(bld, bld->consts_ptr, index_vec);
523 }
524 else {
525 LLVMValueRef index; /* index into the const buffer */
526 LLVMValueRef scalar, scalar_ptr;
527
528 index = lp_build_const_int32(reg->Register.Index*4 + swizzle);
529
530 scalar_ptr = LLVMBuildGEP(bld->base.builder, bld->consts_ptr,
531 &index, 1, "");
532 scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
533
534 res = lp_build_broadcast_scalar(&bld->base, scalar);
535 }
536 break;
537
538 case TGSI_FILE_IMMEDIATE:
539 res = bld->immediates[reg->Register.Index][swizzle];
540 assert(res);
541 break;
542
543 case TGSI_FILE_INPUT:
544 res = bld->inputs[reg->Register.Index][swizzle];
545 assert(res);
546 break;
547
548 case TGSI_FILE_TEMPORARY:
549 if (reg->Register.Indirect) {
550 LLVMValueRef vec_len =
551 lp_build_const_int_vec(bld->int_bld.type, bld->base.type.length);
552 LLVMValueRef index_vec; /* index into the const buffer */
553 LLVMValueRef temps_array;
554 LLVMTypeRef float4_ptr_type;
555
556 assert(bld->has_indirect_addressing);
557
558 /* index_vec = broadcast(reg->Register.Index * 4 + swizzle) */
559 index_vec = lp_build_const_int_vec(bld->int_bld.type,
560 reg->Register.Index * 4 + swizzle);
561
562 /* index_vec += addr_vec */
563 index_vec = lp_build_add(&bld->int_bld, index_vec, addr_vec);
564
565 /* index_vec *= vector_length */
566 index_vec = lp_build_mul(&bld->int_bld, index_vec, vec_len);
567
568 /* cast temps_array pointer to float* */
569 float4_ptr_type = LLVMPointerType(LLVMFloatType(), 0);
570 temps_array = LLVMBuildBitCast(bld->int_bld.builder, bld->temps_array,
571 float4_ptr_type, "");
572
573 /* Gather values from the temporary register array */
574 res = build_gather(bld, temps_array, index_vec);
575 }
576 else {
577 LLVMValueRef temp_ptr;
578 temp_ptr = get_temp_ptr(bld, reg->Register.Index, swizzle);
579 res = LLVMBuildLoad(bld->base.builder, temp_ptr, "");
580 if (!res)
581 return bld->base.undef;
582 }
583 break;
584
585 default:
586 assert(0 && "invalid src register in emit_fetch()");
587 return bld->base.undef;
588 }
589
590 switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
591 case TGSI_UTIL_SIGN_CLEAR:
592 res = lp_build_abs( &bld->base, res );
593 break;
594
595 case TGSI_UTIL_SIGN_SET:
596 /* TODO: Use bitwese OR for floating point */
597 res = lp_build_abs( &bld->base, res );
598 res = LLVMBuildNeg( bld->base.builder, res, "" );
599 break;
600
601 case TGSI_UTIL_SIGN_TOGGLE:
602 res = LLVMBuildNeg( bld->base.builder, res, "" );
603 break;
604
605 case TGSI_UTIL_SIGN_KEEP:
606 break;
607 }
608
609 return res;
610 }
611
612
613 /**
614 * Register fetch with derivatives.
615 */
616 static void
617 emit_fetch_deriv(
618 struct lp_build_tgsi_soa_context *bld,
619 const struct tgsi_full_instruction *inst,
620 unsigned index,
621 const unsigned chan_index,
622 LLVMValueRef *res,
623 LLVMValueRef *ddx,
624 LLVMValueRef *ddy)
625 {
626 LLVMValueRef src;
627
628 src = emit_fetch(bld, inst, index, chan_index);
629
630 if(res)
631 *res = src;
632
633 /* TODO: use interpolation coeffs for inputs */
634
635 if(ddx)
636 *ddx = lp_build_ddx(&bld->base, src);
637
638 if(ddy)
639 *ddy = lp_build_ddy(&bld->base, src);
640 }
641
642
643 /**
644 * Predicate.
645 */
646 static void
647 emit_fetch_predicate(
648 struct lp_build_tgsi_soa_context *bld,
649 const struct tgsi_full_instruction *inst,
650 LLVMValueRef *pred)
651 {
652 unsigned index;
653 unsigned char swizzles[4];
654 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
655 LLVMValueRef value;
656 unsigned chan;
657
658 if (!inst->Instruction.Predicate) {
659 FOR_EACH_CHANNEL( chan ) {
660 pred[chan] = NULL;
661 }
662 return;
663 }
664
665 swizzles[0] = inst->Predicate.SwizzleX;
666 swizzles[1] = inst->Predicate.SwizzleY;
667 swizzles[2] = inst->Predicate.SwizzleZ;
668 swizzles[3] = inst->Predicate.SwizzleW;
669
670 index = inst->Predicate.Index;
671 assert(index < LP_MAX_TGSI_PREDS);
672
673 FOR_EACH_CHANNEL( chan ) {
674 unsigned swizzle = swizzles[chan];
675
676 /*
677 * Only fetch the predicate register channels that are actually listed
678 * in the swizzles
679 */
680 if (!unswizzled[swizzle]) {
681 value = LLVMBuildLoad(bld->base.builder,
682 bld->preds[index][swizzle], "");
683
684 /*
685 * Convert the value to an integer mask.
686 *
687 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
688 * is needlessly causing two comparisons due to storing the intermediate
689 * result as float vector instead of an integer mask vector.
690 */
691 value = lp_build_compare(bld->base.builder,
692 bld->base.type,
693 PIPE_FUNC_NOTEQUAL,
694 value,
695 bld->base.zero);
696 if (inst->Predicate.Negate) {
697 value = LLVMBuildNot(bld->base.builder, value, "");
698 }
699
700 unswizzled[swizzle] = value;
701 } else {
702 value = unswizzled[swizzle];
703 }
704
705 pred[chan] = value;
706 }
707 }
708
709
710 /**
711 * Register store.
712 */
713 static void
714 emit_store(
715 struct lp_build_tgsi_soa_context *bld,
716 const struct tgsi_full_instruction *inst,
717 unsigned index,
718 unsigned chan_index,
719 LLVMValueRef pred,
720 LLVMValueRef value)
721 {
722 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
723 LLVMValueRef addr = NULL;
724
725 switch( inst->Instruction.Saturate ) {
726 case TGSI_SAT_NONE:
727 break;
728
729 case TGSI_SAT_ZERO_ONE:
730 value = lp_build_max(&bld->base, value, bld->base.zero);
731 value = lp_build_min(&bld->base, value, bld->base.one);
732 break;
733
734 case TGSI_SAT_MINUS_PLUS_ONE:
735 value = lp_build_max(&bld->base, value, lp_build_const_vec(bld->base.type, -1.0));
736 value = lp_build_min(&bld->base, value, bld->base.one);
737 break;
738
739 default:
740 assert(0);
741 }
742
743 if (reg->Register.Indirect) {
744 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->base.type);
745 unsigned swizzle = tgsi_util_get_src_register_swizzle( &reg->Indirect, chan_index );
746 addr = LLVMBuildLoad(bld->base.builder,
747 bld->addr[reg->Indirect.Index][swizzle],
748 "");
749 /* for indexing we want integers */
750 addr = LLVMBuildFPToSI(bld->base.builder, addr,
751 int_vec_type, "");
752 addr = LLVMBuildExtractElement(bld->base.builder,
753 addr, LLVMConstInt(LLVMInt32Type(), 0, 0),
754 "");
755 addr = lp_build_mul(&bld->base, addr, LLVMConstInt(LLVMInt32Type(), 4, 0));
756 }
757
758 switch( reg->Register.File ) {
759 case TGSI_FILE_OUTPUT:
760 lp_exec_mask_store(&bld->exec_mask, pred, value,
761 bld->outputs[reg->Register.Index][chan_index]);
762 break;
763
764 case TGSI_FILE_TEMPORARY:
765 if (reg->Register.Indirect) {
766 /* XXX not done yet */
767 debug_printf("WARNING: LLVM scatter store of temp regs"
768 " not implemented\n");
769 }
770 else {
771 LLVMValueRef temp_ptr = get_temp_ptr(bld, reg->Register.Index,
772 chan_index);
773 lp_exec_mask_store(&bld->exec_mask, pred, value, temp_ptr);
774 }
775 break;
776
777 case TGSI_FILE_ADDRESS:
778 lp_exec_mask_store(&bld->exec_mask, pred, value,
779 bld->addr[reg->Indirect.Index][chan_index]);
780 break;
781
782 case TGSI_FILE_PREDICATE:
783 lp_exec_mask_store(&bld->exec_mask, pred, value,
784 bld->preds[index][chan_index]);
785 break;
786
787 default:
788 assert( 0 );
789 }
790 }
791
792
793 /**
794 * High-level instruction translators.
795 */
796
797 enum tex_modifier {
798 TEX_MODIFIER_NONE = 0,
799 TEX_MODIFIER_PROJECTED,
800 TEX_MODIFIER_LOD_BIAS,
801 TEX_MODIFIER_EXPLICIT_LOD,
802 TEX_MODIFIER_EXPLICIT_DERIV
803 };
804
805 static void
806 emit_tex( struct lp_build_tgsi_soa_context *bld,
807 const struct tgsi_full_instruction *inst,
808 enum tex_modifier modifier,
809 LLVMValueRef *texel)
810 {
811 unsigned unit;
812 LLVMValueRef lod_bias, explicit_lod;
813 LLVMValueRef oow = NULL;
814 LLVMValueRef coords[3];
815 LLVMValueRef ddx[3];
816 LLVMValueRef ddy[3];
817 unsigned num_coords;
818 unsigned i;
819
820 if (!bld->sampler) {
821 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
822 for (i = 0; i < 4; i++) {
823 texel[i] = bld->base.undef;
824 }
825 return;
826 }
827
828 switch (inst->Texture.Texture) {
829 case TGSI_TEXTURE_1D:
830 num_coords = 1;
831 break;
832 case TGSI_TEXTURE_2D:
833 case TGSI_TEXTURE_RECT:
834 num_coords = 2;
835 break;
836 case TGSI_TEXTURE_SHADOW1D:
837 case TGSI_TEXTURE_SHADOW2D:
838 case TGSI_TEXTURE_SHADOWRECT:
839 case TGSI_TEXTURE_3D:
840 case TGSI_TEXTURE_CUBE:
841 num_coords = 3;
842 break;
843 default:
844 assert(0);
845 return;
846 }
847
848 if (modifier == TEX_MODIFIER_LOD_BIAS) {
849 lod_bias = emit_fetch( bld, inst, 0, 3 );
850 explicit_lod = NULL;
851 }
852 else if (modifier == TEX_MODIFIER_EXPLICIT_LOD) {
853 lod_bias = NULL;
854 explicit_lod = emit_fetch( bld, inst, 0, 3 );
855 }
856 else {
857 lod_bias = NULL;
858 explicit_lod = NULL;
859 }
860
861 if (modifier == TEX_MODIFIER_PROJECTED) {
862 oow = emit_fetch( bld, inst, 0, 3 );
863 oow = lp_build_rcp(&bld->base, oow);
864 }
865
866 for (i = 0; i < num_coords; i++) {
867 coords[i] = emit_fetch( bld, inst, 0, i );
868 if (modifier == TEX_MODIFIER_PROJECTED)
869 coords[i] = lp_build_mul(&bld->base, coords[i], oow);
870 }
871 for (i = num_coords; i < 3; i++) {
872 coords[i] = bld->base.undef;
873 }
874
875 if (modifier == TEX_MODIFIER_EXPLICIT_DERIV) {
876 for (i = 0; i < num_coords; i++) {
877 ddx[i] = emit_fetch( bld, inst, 1, i );
878 ddy[i] = emit_fetch( bld, inst, 2, i );
879 }
880 unit = inst->Src[3].Register.Index;
881 } else {
882 for (i = 0; i < num_coords; i++) {
883 ddx[i] = lp_build_ddx( &bld->base, coords[i] );
884 ddy[i] = lp_build_ddy( &bld->base, coords[i] );
885 }
886 unit = inst->Src[1].Register.Index;
887 }
888 for (i = num_coords; i < 3; i++) {
889 ddx[i] = bld->base.undef;
890 ddy[i] = bld->base.undef;
891 }
892
893 bld->sampler->emit_fetch_texel(bld->sampler,
894 bld->base.builder,
895 bld->base.type,
896 unit, num_coords, coords,
897 ddx, ddy,
898 lod_bias, explicit_lod,
899 texel);
900 }
901
902
903 /**
904 * Kill fragment if any of the src register values are negative.
905 */
906 static void
907 emit_kil(
908 struct lp_build_tgsi_soa_context *bld,
909 const struct tgsi_full_instruction *inst )
910 {
911 const struct tgsi_full_src_register *reg = &inst->Src[0];
912 LLVMValueRef terms[NUM_CHANNELS];
913 LLVMValueRef mask;
914 unsigned chan_index;
915
916 memset(&terms, 0, sizeof terms);
917
918 FOR_EACH_CHANNEL( chan_index ) {
919 unsigned swizzle;
920
921 /* Unswizzle channel */
922 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
923
924 /* Check if the component has not been already tested. */
925 assert(swizzle < NUM_CHANNELS);
926 if( !terms[swizzle] )
927 /* TODO: change the comparison operator instead of setting the sign */
928 terms[swizzle] = emit_fetch(bld, inst, 0, chan_index );
929 }
930
931 mask = NULL;
932 FOR_EACH_CHANNEL( chan_index ) {
933 if(terms[chan_index]) {
934 LLVMValueRef chan_mask;
935
936 /*
937 * If term < 0 then mask = 0 else mask = ~0.
938 */
939 chan_mask = lp_build_cmp(&bld->base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->base.zero);
940
941 if(mask)
942 mask = LLVMBuildAnd(bld->base.builder, mask, chan_mask, "");
943 else
944 mask = chan_mask;
945 }
946 }
947
948 if(mask)
949 lp_build_mask_update(bld->mask, mask);
950 }
951
952
953 /**
954 * Predicated fragment kill.
955 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
956 * The only predication is the execution mask which will apply if
957 * we're inside a loop or conditional.
958 */
959 static void
960 emit_kilp(struct lp_build_tgsi_soa_context *bld,
961 const struct tgsi_full_instruction *inst)
962 {
963 LLVMValueRef mask;
964
965 /* For those channels which are "alive", disable fragment shader
966 * execution.
967 */
968 if (bld->exec_mask.has_mask) {
969 mask = LLVMBuildNot(bld->base.builder, bld->exec_mask.exec_mask, "kilp");
970 }
971 else {
972 mask = bld->base.zero;
973 }
974
975 lp_build_mask_update(bld->mask, mask);
976 }
977
978 static void
979 emit_declaration(
980 struct lp_build_tgsi_soa_context *bld,
981 const struct tgsi_full_declaration *decl)
982 {
983 LLVMTypeRef vec_type = lp_build_vec_type(bld->base.type);
984
985 unsigned first = decl->Range.First;
986 unsigned last = decl->Range.Last;
987 unsigned idx, i;
988
989 for (idx = first; idx <= last; ++idx) {
990 switch (decl->Declaration.File) {
991 case TGSI_FILE_TEMPORARY:
992 assert(idx < LP_MAX_TGSI_TEMPS);
993 if (bld->has_indirect_addressing) {
994 LLVMValueRef array_size = LLVMConstInt(LLVMInt32Type(),
995 last*4 + 4, 0);
996 bld->temps_array = lp_build_array_alloca(bld->base.builder,
997 vec_type, array_size, "");
998 } else {
999 for (i = 0; i < NUM_CHANNELS; i++)
1000 bld->temps[idx][i] = lp_build_alloca(bld->base.builder,
1001 vec_type, "");
1002 }
1003 break;
1004
1005 case TGSI_FILE_OUTPUT:
1006 for (i = 0; i < NUM_CHANNELS; i++)
1007 bld->outputs[idx][i] = lp_build_alloca(bld->base.builder,
1008 vec_type, "");
1009 break;
1010
1011 case TGSI_FILE_ADDRESS:
1012 assert(idx < LP_MAX_TGSI_ADDRS);
1013 for (i = 0; i < NUM_CHANNELS; i++)
1014 bld->addr[idx][i] = lp_build_alloca(bld->base.builder,
1015 vec_type, "");
1016 break;
1017
1018 case TGSI_FILE_PREDICATE:
1019 assert(idx < LP_MAX_TGSI_PREDS);
1020 for (i = 0; i < NUM_CHANNELS; i++)
1021 bld->preds[idx][i] = lp_build_alloca(bld->base.builder,
1022 vec_type, "");
1023 break;
1024
1025 default:
1026 /* don't need to declare other vars */
1027 break;
1028 }
1029 }
1030 }
1031
1032
1033 /**
1034 * Emit LLVM for one TGSI instruction.
1035 * \param return TRUE for success, FALSE otherwise
1036 */
1037 static boolean
1038 emit_instruction(
1039 struct lp_build_tgsi_soa_context *bld,
1040 const struct tgsi_full_instruction *inst,
1041 const struct tgsi_opcode_info *info,
1042 int *pc)
1043 {
1044 unsigned chan_index;
1045 LLVMValueRef src0, src1, src2;
1046 LLVMValueRef tmp0, tmp1, tmp2;
1047 LLVMValueRef tmp3 = NULL;
1048 LLVMValueRef tmp4 = NULL;
1049 LLVMValueRef tmp5 = NULL;
1050 LLVMValueRef tmp6 = NULL;
1051 LLVMValueRef tmp7 = NULL;
1052 LLVMValueRef res;
1053 LLVMValueRef dst0[NUM_CHANNELS];
1054
1055 /*
1056 * Stores and write masks are handled in a general fashion after the long
1057 * instruction opcode switch statement.
1058 *
1059 * Although not stricitly necessary, we avoid generating instructions for
1060 * channels which won't be stored, in cases where's that easy. For some
1061 * complex instructions, like texture sampling, it is more convenient to
1062 * assume a full writemask and then let LLVM optimization passes eliminate
1063 * redundant code.
1064 */
1065
1066 (*pc)++;
1067
1068 assert(info->num_dst <= 1);
1069 if (info->num_dst) {
1070 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1071 dst0[chan_index] = bld->base.undef;
1072 }
1073 }
1074
1075 switch (inst->Instruction.Opcode) {
1076 case TGSI_OPCODE_ARL:
1077 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1078 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1079 tmp0 = lp_build_floor(&bld->base, tmp0);
1080 dst0[chan_index] = tmp0;
1081 }
1082 break;
1083
1084 case TGSI_OPCODE_MOV:
1085 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1086 dst0[chan_index] = emit_fetch( bld, inst, 0, chan_index );
1087 }
1088 break;
1089
1090 case TGSI_OPCODE_LIT:
1091 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ) {
1092 dst0[CHAN_X] = bld->base.one;
1093 }
1094 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1095 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1096 dst0[CHAN_Y] = lp_build_max( &bld->base, src0, bld->base.zero);
1097 }
1098 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1099 /* XMM[1] = SrcReg[0].yyyy */
1100 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1101 /* XMM[1] = max(XMM[1], 0) */
1102 tmp1 = lp_build_max( &bld->base, tmp1, bld->base.zero);
1103 /* XMM[2] = SrcReg[0].wwww */
1104 tmp2 = emit_fetch( bld, inst, 0, CHAN_W );
1105 tmp1 = lp_build_pow( &bld->base, tmp1, tmp2);
1106 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1107 tmp2 = lp_build_cmp(&bld->base, PIPE_FUNC_GREATER, tmp0, bld->base.zero);
1108 dst0[CHAN_Z] = lp_build_select(&bld->base, tmp2, tmp1, bld->base.zero);
1109 }
1110 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) ) {
1111 dst0[CHAN_W] = bld->base.one;
1112 }
1113 break;
1114
1115 case TGSI_OPCODE_RCP:
1116 /* TGSI_OPCODE_RECIP */
1117 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1118 res = lp_build_rcp(&bld->base, src0);
1119 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1120 dst0[chan_index] = res;
1121 }
1122 break;
1123
1124 case TGSI_OPCODE_RSQ:
1125 /* TGSI_OPCODE_RECIPSQRT */
1126 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1127 src0 = lp_build_abs(&bld->base, src0);
1128 res = lp_build_rsqrt(&bld->base, src0);
1129 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1130 dst0[chan_index] = res;
1131 }
1132 break;
1133
1134 case TGSI_OPCODE_EXP:
1135 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1136 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1137 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1138 LLVMValueRef *p_exp2_int_part = NULL;
1139 LLVMValueRef *p_frac_part = NULL;
1140 LLVMValueRef *p_exp2 = NULL;
1141
1142 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1143
1144 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1145 p_exp2_int_part = &tmp0;
1146 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1147 p_frac_part = &tmp1;
1148 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1149 p_exp2 = &tmp2;
1150
1151 lp_build_exp2_approx(&bld->base, src0, p_exp2_int_part, p_frac_part, p_exp2);
1152
1153 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1154 dst0[CHAN_X] = tmp0;
1155 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1156 dst0[CHAN_Y] = tmp1;
1157 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1158 dst0[CHAN_Z] = tmp2;
1159 }
1160 /* dst.w = 1.0 */
1161 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1162 dst0[CHAN_W] = bld->base.one;
1163 }
1164 break;
1165
1166 case TGSI_OPCODE_LOG:
1167 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1168 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1169 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1170 LLVMValueRef *p_floor_log2 = NULL;
1171 LLVMValueRef *p_exp = NULL;
1172 LLVMValueRef *p_log2 = NULL;
1173
1174 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1175 src0 = lp_build_abs( &bld->base, src0 );
1176
1177 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1178 p_floor_log2 = &tmp0;
1179 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1180 p_exp = &tmp1;
1181 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1182 p_log2 = &tmp2;
1183
1184 lp_build_log2_approx(&bld->base, src0, p_exp, p_floor_log2, p_log2);
1185
1186 /* dst.x = floor(lg2(abs(src.x))) */
1187 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1188 dst0[CHAN_X] = tmp0;
1189 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1190 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y )) {
1191 dst0[CHAN_Y] = lp_build_div( &bld->base, src0, tmp1);
1192 }
1193 /* dst.z = lg2(abs(src.x)) */
1194 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1195 dst0[CHAN_Z] = tmp2;
1196 }
1197 /* dst.w = 1.0 */
1198 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1199 dst0[CHAN_W] = bld->base.one;
1200 }
1201 break;
1202
1203 case TGSI_OPCODE_MUL:
1204 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1205 src0 = emit_fetch( bld, inst, 0, chan_index );
1206 src1 = emit_fetch( bld, inst, 1, chan_index );
1207 dst0[chan_index] = lp_build_mul(&bld->base, src0, src1);
1208 }
1209 break;
1210
1211 case TGSI_OPCODE_ADD:
1212 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1213 src0 = emit_fetch( bld, inst, 0, chan_index );
1214 src1 = emit_fetch( bld, inst, 1, chan_index );
1215 dst0[chan_index] = lp_build_add(&bld->base, src0, src1);
1216 }
1217 break;
1218
1219 case TGSI_OPCODE_DP3:
1220 /* TGSI_OPCODE_DOT3 */
1221 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1222 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1223 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1224 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1225 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1226 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1227 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1228 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1229 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1230 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1231 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1232 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1233 dst0[chan_index] = tmp0;
1234 }
1235 break;
1236
1237 case TGSI_OPCODE_DP4:
1238 /* TGSI_OPCODE_DOT4 */
1239 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1240 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1241 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1242 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1243 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1244 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1245 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1246 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1247 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1248 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1249 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1250 tmp1 = emit_fetch( bld, inst, 0, CHAN_W );
1251 tmp2 = emit_fetch( bld, inst, 1, CHAN_W );
1252 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1253 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1254 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1255 dst0[chan_index] = tmp0;
1256 }
1257 break;
1258
1259 case TGSI_OPCODE_DST:
1260 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1261 dst0[CHAN_X] = bld->base.one;
1262 }
1263 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1264 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1265 tmp1 = emit_fetch( bld, inst, 1, CHAN_Y );
1266 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp0, tmp1);
1267 }
1268 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1269 dst0[CHAN_Z] = emit_fetch( bld, inst, 0, CHAN_Z );
1270 }
1271 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1272 dst0[CHAN_W] = emit_fetch( bld, inst, 1, CHAN_W );
1273 }
1274 break;
1275
1276 case TGSI_OPCODE_MIN:
1277 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1278 src0 = emit_fetch( bld, inst, 0, chan_index );
1279 src1 = emit_fetch( bld, inst, 1, chan_index );
1280 dst0[chan_index] = lp_build_min( &bld->base, src0, src1 );
1281 }
1282 break;
1283
1284 case TGSI_OPCODE_MAX:
1285 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1286 src0 = emit_fetch( bld, inst, 0, chan_index );
1287 src1 = emit_fetch( bld, inst, 1, chan_index );
1288 dst0[chan_index] = lp_build_max( &bld->base, src0, src1 );
1289 }
1290 break;
1291
1292 case TGSI_OPCODE_SLT:
1293 /* TGSI_OPCODE_SETLT */
1294 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1295 src0 = emit_fetch( bld, inst, 0, chan_index );
1296 src1 = emit_fetch( bld, inst, 1, chan_index );
1297 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, src1 );
1298 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1299 }
1300 break;
1301
1302 case TGSI_OPCODE_SGE:
1303 /* TGSI_OPCODE_SETGE */
1304 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1305 src0 = emit_fetch( bld, inst, 0, chan_index );
1306 src1 = emit_fetch( bld, inst, 1, chan_index );
1307 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GEQUAL, src0, src1 );
1308 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1309 }
1310 break;
1311
1312 case TGSI_OPCODE_MAD:
1313 /* TGSI_OPCODE_MADD */
1314 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1315 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1316 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1317 tmp2 = emit_fetch( bld, inst, 2, chan_index );
1318 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1319 tmp0 = lp_build_add( &bld->base, tmp0, tmp2);
1320 dst0[chan_index] = tmp0;
1321 }
1322 break;
1323
1324 case TGSI_OPCODE_SUB:
1325 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1326 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1327 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1328 dst0[chan_index] = lp_build_sub( &bld->base, tmp0, tmp1);
1329 }
1330 break;
1331
1332 case TGSI_OPCODE_LRP:
1333 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1334 src0 = emit_fetch( bld, inst, 0, chan_index );
1335 src1 = emit_fetch( bld, inst, 1, chan_index );
1336 src2 = emit_fetch( bld, inst, 2, chan_index );
1337 tmp0 = lp_build_sub( &bld->base, src1, src2 );
1338 tmp0 = lp_build_mul( &bld->base, src0, tmp0 );
1339 dst0[chan_index] = lp_build_add( &bld->base, tmp0, src2 );
1340 }
1341 break;
1342
1343 case TGSI_OPCODE_CND:
1344 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1345 src0 = emit_fetch( bld, inst, 0, chan_index );
1346 src1 = emit_fetch( bld, inst, 1, chan_index );
1347 src2 = emit_fetch( bld, inst, 2, chan_index );
1348 tmp1 = lp_build_const_vec(bld->base.type, 0.5);
1349 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src2, tmp1);
1350 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src0, src1 );
1351 }
1352 break;
1353
1354 case TGSI_OPCODE_DP2A:
1355 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1356 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1357 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1358 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1359 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1360 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1361 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1362 tmp1 = emit_fetch( bld, inst, 2, CHAN_X ); /* xmm1 = src[2].x */
1363 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1364 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1365 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1366 }
1367 break;
1368
1369 case TGSI_OPCODE_FRC:
1370 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1371 src0 = emit_fetch( bld, inst, 0, chan_index );
1372 tmp0 = lp_build_floor(&bld->base, src0);
1373 tmp0 = lp_build_sub(&bld->base, src0, tmp0);
1374 dst0[chan_index] = tmp0;
1375 }
1376 break;
1377
1378 case TGSI_OPCODE_CLAMP:
1379 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1380 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1381 src1 = emit_fetch( bld, inst, 1, chan_index );
1382 src2 = emit_fetch( bld, inst, 2, chan_index );
1383 tmp0 = lp_build_max(&bld->base, tmp0, src1);
1384 tmp0 = lp_build_min(&bld->base, tmp0, src2);
1385 dst0[chan_index] = tmp0;
1386 }
1387 break;
1388
1389 case TGSI_OPCODE_FLR:
1390 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1391 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1392 dst0[chan_index] = lp_build_floor(&bld->base, tmp0);
1393 }
1394 break;
1395
1396 case TGSI_OPCODE_ROUND:
1397 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1398 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1399 dst0[chan_index] = lp_build_round(&bld->base, tmp0);
1400 }
1401 break;
1402
1403 case TGSI_OPCODE_EX2: {
1404 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1405 tmp0 = lp_build_exp2( &bld->base, tmp0);
1406 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1407 dst0[chan_index] = tmp0;
1408 }
1409 break;
1410 }
1411
1412 case TGSI_OPCODE_LG2:
1413 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1414 tmp0 = lp_build_log2( &bld->base, tmp0);
1415 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1416 dst0[chan_index] = tmp0;
1417 }
1418 break;
1419
1420 case TGSI_OPCODE_POW:
1421 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1422 src1 = emit_fetch( bld, inst, 1, CHAN_X );
1423 res = lp_build_pow( &bld->base, src0, src1 );
1424 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1425 dst0[chan_index] = res;
1426 }
1427 break;
1428
1429 case TGSI_OPCODE_XPD:
1430 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1431 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1432 tmp1 = emit_fetch( bld, inst, 1, CHAN_Z );
1433 tmp3 = emit_fetch( bld, inst, 0, CHAN_Z );
1434 }
1435 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1436 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1437 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1438 tmp4 = emit_fetch( bld, inst, 1, CHAN_Y );
1439 }
1440 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1441 tmp2 = tmp0;
1442 tmp2 = lp_build_mul( &bld->base, tmp2, tmp1);
1443 tmp5 = tmp3;
1444 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1445 tmp2 = lp_build_sub( &bld->base, tmp2, tmp5);
1446 dst0[CHAN_X] = tmp2;
1447 }
1448 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1449 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1450 tmp2 = emit_fetch( bld, inst, 1, CHAN_X );
1451 tmp5 = emit_fetch( bld, inst, 0, CHAN_X );
1452 }
1453 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1454 tmp3 = lp_build_mul( &bld->base, tmp3, tmp2);
1455 tmp1 = lp_build_mul( &bld->base, tmp1, tmp5);
1456 tmp3 = lp_build_sub( &bld->base, tmp3, tmp1);
1457 dst0[CHAN_Y] = tmp3;
1458 }
1459 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1460 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1461 tmp0 = lp_build_mul( &bld->base, tmp0, tmp2);
1462 tmp5 = lp_build_sub( &bld->base, tmp5, tmp0);
1463 dst0[CHAN_Z] = tmp5;
1464 }
1465 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1466 dst0[CHAN_W] = bld->base.one;
1467 }
1468 break;
1469
1470 case TGSI_OPCODE_ABS:
1471 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1472 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1473 dst0[chan_index] = lp_build_abs( &bld->base, tmp0 );
1474 }
1475 break;
1476
1477 case TGSI_OPCODE_RCC:
1478 /* deprecated? */
1479 assert(0);
1480 return FALSE;
1481
1482 case TGSI_OPCODE_DPH:
1483 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1484 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1485 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1486 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1487 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1488 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1489 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1490 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1491 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1492 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1493 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1494 tmp1 = emit_fetch( bld, inst, 1, CHAN_W );
1495 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1496 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1497 dst0[chan_index] = tmp0;
1498 }
1499 break;
1500
1501 case TGSI_OPCODE_COS:
1502 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1503 tmp0 = lp_build_cos( &bld->base, tmp0 );
1504 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1505 dst0[chan_index] = tmp0;
1506 }
1507 break;
1508
1509 case TGSI_OPCODE_DDX:
1510 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1511 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, &dst0[chan_index], NULL);
1512 }
1513 break;
1514
1515 case TGSI_OPCODE_DDY:
1516 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1517 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, NULL, &dst0[chan_index]);
1518 }
1519 break;
1520
1521 case TGSI_OPCODE_KILP:
1522 /* predicated kill */
1523 emit_kilp( bld, inst );
1524 break;
1525
1526 case TGSI_OPCODE_KIL:
1527 /* conditional kill */
1528 emit_kil( bld, inst );
1529 break;
1530
1531 case TGSI_OPCODE_PK2H:
1532 return FALSE;
1533 break;
1534
1535 case TGSI_OPCODE_PK2US:
1536 return FALSE;
1537 break;
1538
1539 case TGSI_OPCODE_PK4B:
1540 return FALSE;
1541 break;
1542
1543 case TGSI_OPCODE_PK4UB:
1544 return FALSE;
1545 break;
1546
1547 case TGSI_OPCODE_RFL:
1548 return FALSE;
1549 break;
1550
1551 case TGSI_OPCODE_SEQ:
1552 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1553 src0 = emit_fetch( bld, inst, 0, chan_index );
1554 src1 = emit_fetch( bld, inst, 1, chan_index );
1555 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_EQUAL, src0, src1 );
1556 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1557 }
1558 break;
1559
1560 case TGSI_OPCODE_SFL:
1561 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1562 dst0[chan_index] = bld->base.zero;
1563 }
1564 break;
1565
1566 case TGSI_OPCODE_SGT:
1567 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1568 src0 = emit_fetch( bld, inst, 0, chan_index );
1569 src1 = emit_fetch( bld, inst, 1, chan_index );
1570 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src0, src1 );
1571 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1572 }
1573 break;
1574
1575 case TGSI_OPCODE_SIN:
1576 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1577 tmp0 = lp_build_sin( &bld->base, tmp0 );
1578 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1579 dst0[chan_index] = tmp0;
1580 }
1581 break;
1582
1583 case TGSI_OPCODE_SLE:
1584 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1585 src0 = emit_fetch( bld, inst, 0, chan_index );
1586 src1 = emit_fetch( bld, inst, 1, chan_index );
1587 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LEQUAL, src0, src1 );
1588 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1589 }
1590 break;
1591
1592 case TGSI_OPCODE_SNE:
1593 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1594 src0 = emit_fetch( bld, inst, 0, chan_index );
1595 src1 = emit_fetch( bld, inst, 1, chan_index );
1596 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_NOTEQUAL, src0, src1 );
1597 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1598 }
1599 break;
1600
1601 case TGSI_OPCODE_STR:
1602 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1603 dst0[chan_index] = bld->base.one;
1604 }
1605 break;
1606
1607 case TGSI_OPCODE_TEX:
1608 emit_tex( bld, inst, TEX_MODIFIER_NONE, dst0 );
1609 break;
1610
1611 case TGSI_OPCODE_TXD:
1612 emit_tex( bld, inst, TEX_MODIFIER_EXPLICIT_DERIV, dst0 );
1613 break;
1614
1615 case TGSI_OPCODE_UP2H:
1616 /* deprecated */
1617 assert (0);
1618 return FALSE;
1619 break;
1620
1621 case TGSI_OPCODE_UP2US:
1622 /* deprecated */
1623 assert(0);
1624 return FALSE;
1625 break;
1626
1627 case TGSI_OPCODE_UP4B:
1628 /* deprecated */
1629 assert(0);
1630 return FALSE;
1631 break;
1632
1633 case TGSI_OPCODE_UP4UB:
1634 /* deprecated */
1635 assert(0);
1636 return FALSE;
1637 break;
1638
1639 case TGSI_OPCODE_X2D:
1640 /* deprecated? */
1641 assert(0);
1642 return FALSE;
1643 break;
1644
1645 case TGSI_OPCODE_ARA:
1646 /* deprecated */
1647 assert(0);
1648 return FALSE;
1649 break;
1650
1651 case TGSI_OPCODE_ARR:
1652 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1653 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1654 tmp0 = lp_build_round(&bld->base, tmp0);
1655 dst0[chan_index] = tmp0;
1656 }
1657 break;
1658
1659 case TGSI_OPCODE_BRA:
1660 /* deprecated */
1661 assert(0);
1662 return FALSE;
1663 break;
1664
1665 case TGSI_OPCODE_CAL:
1666 lp_exec_mask_call(&bld->exec_mask,
1667 inst->Label.Label,
1668 pc);
1669
1670 break;
1671
1672 case TGSI_OPCODE_RET:
1673 lp_exec_mask_ret(&bld->exec_mask, pc);
1674 break;
1675
1676 case TGSI_OPCODE_END:
1677 *pc = -1;
1678 break;
1679
1680 case TGSI_OPCODE_SSG:
1681 /* TGSI_OPCODE_SGN */
1682 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1683 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1684 dst0[chan_index] = lp_build_sgn( &bld->base, tmp0 );
1685 }
1686 break;
1687
1688 case TGSI_OPCODE_CMP:
1689 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1690 src0 = emit_fetch( bld, inst, 0, chan_index );
1691 src1 = emit_fetch( bld, inst, 1, chan_index );
1692 src2 = emit_fetch( bld, inst, 2, chan_index );
1693 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, bld->base.zero );
1694 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src1, src2);
1695 }
1696 break;
1697
1698 case TGSI_OPCODE_SCS:
1699 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1700 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1701 dst0[CHAN_X] = lp_build_cos( &bld->base, tmp0 );
1702 }
1703 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1704 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1705 dst0[CHAN_Y] = lp_build_sin( &bld->base, tmp0 );
1706 }
1707 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1708 dst0[CHAN_Z] = bld->base.zero;
1709 }
1710 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1711 dst0[CHAN_W] = bld->base.one;
1712 }
1713 break;
1714
1715 case TGSI_OPCODE_TXB:
1716 emit_tex( bld, inst, TEX_MODIFIER_LOD_BIAS, dst0 );
1717 break;
1718
1719 case TGSI_OPCODE_NRM:
1720 /* fall-through */
1721 case TGSI_OPCODE_NRM4:
1722 /* 3 or 4-component normalization */
1723 {
1724 uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
1725
1726 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) ||
1727 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y) ||
1728 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z) ||
1729 (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 4)) {
1730
1731 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1732
1733 /* xmm4 = src.x */
1734 /* xmm0 = src.x * src.x */
1735 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1736 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1737 tmp4 = tmp0;
1738 }
1739 tmp0 = lp_build_mul( &bld->base, tmp0, tmp0);
1740
1741 /* xmm5 = src.y */
1742 /* xmm0 = xmm0 + src.y * src.y */
1743 tmp1 = emit_fetch(bld, inst, 0, CHAN_Y);
1744 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1745 tmp5 = tmp1;
1746 }
1747 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1748 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1749
1750 /* xmm6 = src.z */
1751 /* xmm0 = xmm0 + src.z * src.z */
1752 tmp1 = emit_fetch(bld, inst, 0, CHAN_Z);
1753 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1754 tmp6 = tmp1;
1755 }
1756 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1757 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1758
1759 if (dims == 4) {
1760 /* xmm7 = src.w */
1761 /* xmm0 = xmm0 + src.w * src.w */
1762 tmp1 = emit_fetch(bld, inst, 0, CHAN_W);
1763 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W)) {
1764 tmp7 = tmp1;
1765 }
1766 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1767 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1768 }
1769
1770 /* xmm1 = 1 / sqrt(xmm0) */
1771 tmp1 = lp_build_rsqrt( &bld->base, tmp0);
1772
1773 /* dst.x = xmm1 * src.x */
1774 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1775 dst0[CHAN_X] = lp_build_mul( &bld->base, tmp4, tmp1);
1776 }
1777
1778 /* dst.y = xmm1 * src.y */
1779 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1780 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp5, tmp1);
1781 }
1782
1783 /* dst.z = xmm1 * src.z */
1784 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1785 dst0[CHAN_Z] = lp_build_mul( &bld->base, tmp6, tmp1);
1786 }
1787
1788 /* dst.w = xmm1 * src.w */
1789 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) && dims == 4) {
1790 dst0[CHAN_W] = lp_build_mul( &bld->base, tmp7, tmp1);
1791 }
1792 }
1793
1794 /* dst.w = 1.0 */
1795 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 3) {
1796 dst0[CHAN_W] = bld->base.one;
1797 }
1798 }
1799 break;
1800
1801 case TGSI_OPCODE_DIV:
1802 /* deprecated */
1803 assert( 0 );
1804 return FALSE;
1805 break;
1806
1807 case TGSI_OPCODE_DP2:
1808 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1809 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1810 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1811 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1812 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1813 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1814 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1815 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1816 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1817 }
1818 break;
1819
1820 case TGSI_OPCODE_TXL:
1821 emit_tex( bld, inst, TEX_MODIFIER_EXPLICIT_LOD, dst0 );
1822 break;
1823
1824 case TGSI_OPCODE_TXP:
1825 emit_tex( bld, inst, TEX_MODIFIER_PROJECTED, dst0 );
1826 break;
1827
1828 case TGSI_OPCODE_BRK:
1829 lp_exec_break(&bld->exec_mask);
1830 break;
1831
1832 case TGSI_OPCODE_IF:
1833 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1834 tmp0 = lp_build_cmp(&bld->base, PIPE_FUNC_NOTEQUAL,
1835 tmp0, bld->base.zero);
1836 lp_exec_mask_cond_push(&bld->exec_mask, tmp0);
1837 break;
1838
1839 case TGSI_OPCODE_BGNLOOP:
1840 lp_exec_bgnloop(&bld->exec_mask);
1841 break;
1842
1843 case TGSI_OPCODE_BGNSUB:
1844 lp_exec_mask_bgnsub(&bld->exec_mask);
1845 break;
1846
1847 case TGSI_OPCODE_ELSE:
1848 lp_exec_mask_cond_invert(&bld->exec_mask);
1849 break;
1850
1851 case TGSI_OPCODE_ENDIF:
1852 lp_exec_mask_cond_pop(&bld->exec_mask);
1853 break;
1854
1855 case TGSI_OPCODE_ENDLOOP:
1856 lp_exec_endloop(&bld->exec_mask);
1857 break;
1858
1859 case TGSI_OPCODE_ENDSUB:
1860 lp_exec_mask_endsub(&bld->exec_mask, pc);
1861 break;
1862
1863 case TGSI_OPCODE_PUSHA:
1864 /* deprecated? */
1865 assert(0);
1866 return FALSE;
1867 break;
1868
1869 case TGSI_OPCODE_POPA:
1870 /* deprecated? */
1871 assert(0);
1872 return FALSE;
1873 break;
1874
1875 case TGSI_OPCODE_CEIL:
1876 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1877 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1878 dst0[chan_index] = lp_build_ceil(&bld->base, tmp0);
1879 }
1880 break;
1881
1882 case TGSI_OPCODE_I2F:
1883 /* deprecated? */
1884 assert(0);
1885 return FALSE;
1886 break;
1887
1888 case TGSI_OPCODE_NOT:
1889 /* deprecated? */
1890 assert(0);
1891 return FALSE;
1892 break;
1893
1894 case TGSI_OPCODE_TRUNC:
1895 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1896 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1897 dst0[chan_index] = lp_build_trunc(&bld->base, tmp0);
1898 }
1899 break;
1900
1901 case TGSI_OPCODE_SHL:
1902 /* deprecated? */
1903 assert(0);
1904 return FALSE;
1905 break;
1906
1907 case TGSI_OPCODE_ISHR:
1908 /* deprecated? */
1909 assert(0);
1910 return FALSE;
1911 break;
1912
1913 case TGSI_OPCODE_AND:
1914 /* deprecated? */
1915 assert(0);
1916 return FALSE;
1917 break;
1918
1919 case TGSI_OPCODE_OR:
1920 /* deprecated? */
1921 assert(0);
1922 return FALSE;
1923 break;
1924
1925 case TGSI_OPCODE_MOD:
1926 /* deprecated? */
1927 assert(0);
1928 return FALSE;
1929 break;
1930
1931 case TGSI_OPCODE_XOR:
1932 /* deprecated? */
1933 assert(0);
1934 return FALSE;
1935 break;
1936
1937 case TGSI_OPCODE_SAD:
1938 /* deprecated? */
1939 assert(0);
1940 return FALSE;
1941 break;
1942
1943 case TGSI_OPCODE_TXF:
1944 /* deprecated? */
1945 assert(0);
1946 return FALSE;
1947 break;
1948
1949 case TGSI_OPCODE_TXQ:
1950 /* deprecated? */
1951 assert(0);
1952 return FALSE;
1953 break;
1954
1955 case TGSI_OPCODE_CONT:
1956 lp_exec_continue(&bld->exec_mask);
1957 break;
1958
1959 case TGSI_OPCODE_EMIT:
1960 return FALSE;
1961 break;
1962
1963 case TGSI_OPCODE_ENDPRIM:
1964 return FALSE;
1965 break;
1966
1967 case TGSI_OPCODE_NOP:
1968 break;
1969
1970 default:
1971 return FALSE;
1972 }
1973
1974 if(info->num_dst) {
1975 LLVMValueRef pred[NUM_CHANNELS];
1976
1977 emit_fetch_predicate( bld, inst, pred );
1978
1979 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1980 emit_store( bld, inst, 0, chan_index, pred[chan_index], dst0[chan_index]);
1981 }
1982 }
1983
1984 return TRUE;
1985 }
1986
1987
1988 void
1989 lp_build_tgsi_soa(LLVMBuilderRef builder,
1990 const struct tgsi_token *tokens,
1991 struct lp_type type,
1992 struct lp_build_mask_context *mask,
1993 LLVMValueRef consts_ptr,
1994 const LLVMValueRef *pos,
1995 const LLVMValueRef (*inputs)[NUM_CHANNELS],
1996 LLVMValueRef (*outputs)[NUM_CHANNELS],
1997 struct lp_build_sampler_soa *sampler,
1998 const struct tgsi_shader_info *info)
1999 {
2000 struct lp_build_tgsi_soa_context bld;
2001 struct tgsi_parse_context parse;
2002 uint num_immediates = 0;
2003 uint num_instructions = 0;
2004 unsigned i;
2005 int pc = 0;
2006
2007 /* Setup build context */
2008 memset(&bld, 0, sizeof bld);
2009 lp_build_context_init(&bld.base, builder, type);
2010 lp_build_context_init(&bld.int_bld, builder, lp_int_type(type));
2011 bld.mask = mask;
2012 bld.pos = pos;
2013 bld.inputs = inputs;
2014 bld.outputs = outputs;
2015 bld.consts_ptr = consts_ptr;
2016 bld.sampler = sampler;
2017 bld.has_indirect_addressing = info->opcode_count[TGSI_OPCODE_ARR] > 0 ||
2018 info->opcode_count[TGSI_OPCODE_ARL] > 0;
2019 bld.instructions = (struct tgsi_full_instruction *)
2020 MALLOC( LP_MAX_INSTRUCTIONS * sizeof(struct tgsi_full_instruction) );
2021 bld.max_instructions = LP_MAX_INSTRUCTIONS;
2022
2023 if (!bld.instructions) {
2024 return;
2025 }
2026
2027 lp_exec_mask_init(&bld.exec_mask, &bld.base);
2028
2029 tgsi_parse_init( &parse, tokens );
2030
2031 while( !tgsi_parse_end_of_tokens( &parse ) ) {
2032 tgsi_parse_token( &parse );
2033
2034 switch( parse.FullToken.Token.Type ) {
2035 case TGSI_TOKEN_TYPE_DECLARATION:
2036 /* Inputs already interpolated */
2037 emit_declaration( &bld, &parse.FullToken.FullDeclaration );
2038 break;
2039
2040 case TGSI_TOKEN_TYPE_INSTRUCTION:
2041 {
2042 /* save expanded instruction */
2043 if (num_instructions == bld.max_instructions) {
2044 bld.instructions = REALLOC(bld.instructions,
2045 bld.max_instructions
2046 * sizeof(struct tgsi_full_instruction),
2047 (bld.max_instructions + LP_MAX_INSTRUCTIONS)
2048 * sizeof(struct tgsi_full_instruction));
2049 bld.max_instructions += LP_MAX_INSTRUCTIONS;
2050 }
2051
2052 memcpy(bld.instructions + num_instructions,
2053 &parse.FullToken.FullInstruction,
2054 sizeof(bld.instructions[0]));
2055
2056 num_instructions++;
2057 }
2058
2059 break;
2060
2061 case TGSI_TOKEN_TYPE_IMMEDIATE:
2062 /* simply copy the immediate values into the next immediates[] slot */
2063 {
2064 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
2065 assert(size <= 4);
2066 assert(num_immediates < LP_MAX_TGSI_IMMEDIATES);
2067 for( i = 0; i < size; ++i )
2068 bld.immediates[num_immediates][i] =
2069 lp_build_const_vec(type, parse.FullToken.FullImmediate.u[i].Float);
2070 for( i = size; i < 4; ++i )
2071 bld.immediates[num_immediates][i] = bld.base.undef;
2072 num_immediates++;
2073 }
2074 break;
2075
2076 case TGSI_TOKEN_TYPE_PROPERTY:
2077 break;
2078
2079 default:
2080 assert( 0 );
2081 }
2082 }
2083
2084 while (pc != -1) {
2085 struct tgsi_full_instruction *instr = bld.instructions + pc;
2086 const struct tgsi_opcode_info *opcode_info =
2087 tgsi_get_opcode_info(instr->Instruction.Opcode);
2088 if (!emit_instruction( &bld, instr, opcode_info, &pc ))
2089 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2090 opcode_info->mnemonic);
2091 }
2092
2093 if (0) {
2094 LLVMBasicBlockRef block = LLVMGetInsertBlock(builder);
2095 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2096 debug_printf("11111111111111111111111111111 \n");
2097 tgsi_dump(tokens, 0);
2098 lp_debug_dump_value(function);
2099 debug_printf("2222222222222222222222222222 \n");
2100 }
2101 tgsi_parse_free( &parse );
2102
2103 if (0) {
2104 LLVMModuleRef module = LLVMGetGlobalParent(
2105 LLVMGetBasicBlockParent(LLVMGetInsertBlock(bld.base.builder)));
2106 LLVMDumpModule(module);
2107
2108 }
2109
2110 FREE( bld.instructions );
2111 }
2112