gallivm: implement scatter stores into temp register file
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_tgsi_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * TGSI to LLVM IR translation -- SoA.
32 *
33 * @author Jose Fonseca <jfonseca@vmware.com>
34 *
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
37 */
38
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_info.h"
46 #include "tgsi/tgsi_parse.h"
47 #include "tgsi/tgsi_util.h"
48 #include "tgsi/tgsi_scan.h"
49 #include "lp_bld_type.h"
50 #include "lp_bld_const.h"
51 #include "lp_bld_arit.h"
52 #include "lp_bld_bitarit.h"
53 #include "lp_bld_gather.h"
54 #include "lp_bld_logic.h"
55 #include "lp_bld_swizzle.h"
56 #include "lp_bld_flow.h"
57 #include "lp_bld_quad.h"
58 #include "lp_bld_tgsi.h"
59 #include "lp_bld_limits.h"
60 #include "lp_bld_debug.h"
61
62
63 #define FOR_EACH_CHANNEL( CHAN )\
64 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
65
66 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
67 ((INST)->Dst[0].Register.WriteMask & (1 << (CHAN)))
68
69 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
70 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
71
72 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
73 FOR_EACH_CHANNEL( CHAN )\
74 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
75
76 #define CHAN_X 0
77 #define CHAN_Y 1
78 #define CHAN_Z 2
79 #define CHAN_W 3
80 #define NUM_CHANNELS 4
81
82 #define LP_MAX_INSTRUCTIONS 256
83
84
85 struct lp_exec_mask {
86 struct lp_build_context *bld;
87
88 boolean has_mask;
89
90 LLVMTypeRef int_vec_type;
91
92 LLVMValueRef cond_stack[LP_MAX_TGSI_NESTING];
93 int cond_stack_size;
94 LLVMValueRef cond_mask;
95
96 LLVMBasicBlockRef loop_block;
97 LLVMValueRef cont_mask;
98 LLVMValueRef break_mask;
99 LLVMValueRef break_var;
100 struct {
101 LLVMBasicBlockRef loop_block;
102 LLVMValueRef cont_mask;
103 LLVMValueRef break_mask;
104 LLVMValueRef break_var;
105 } loop_stack[LP_MAX_TGSI_NESTING];
106 int loop_stack_size;
107
108 LLVMValueRef ret_mask;
109 struct {
110 int pc;
111 LLVMValueRef ret_mask;
112 } call_stack[LP_MAX_TGSI_NESTING];
113 int call_stack_size;
114
115 LLVMValueRef exec_mask;
116 };
117
118 struct lp_build_tgsi_soa_context
119 {
120 struct lp_build_context base;
121
122 /* Builder for integer masks and indices */
123 struct lp_build_context uint_bld;
124
125 LLVMValueRef consts_ptr;
126 const LLVMValueRef *pos;
127 const LLVMValueRef (*inputs)[NUM_CHANNELS];
128 LLVMValueRef (*outputs)[NUM_CHANNELS];
129
130 const struct lp_build_sampler_soa *sampler;
131
132 LLVMValueRef immediates[LP_MAX_TGSI_IMMEDIATES][NUM_CHANNELS];
133 LLVMValueRef temps[LP_MAX_TGSI_TEMPS][NUM_CHANNELS];
134 LLVMValueRef addr[LP_MAX_TGSI_ADDRS][NUM_CHANNELS];
135 LLVMValueRef preds[LP_MAX_TGSI_PREDS][NUM_CHANNELS];
136
137 /* We allocate/use this array of temps if (1 << TGSI_FILE_TEMPORARY) is
138 * set in the indirect_files field.
139 * The temps[] array above is unused then.
140 */
141 LLVMValueRef temps_array;
142
143 const struct tgsi_shader_info *info;
144 /** bitmask indicating which register files are accessed indirectly */
145 unsigned indirect_files;
146
147 struct lp_build_mask_context *mask;
148 struct lp_exec_mask exec_mask;
149
150 struct tgsi_full_instruction *instructions;
151 uint max_instructions;
152 };
153
154 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
155 {
156 mask->bld = bld;
157 mask->has_mask = FALSE;
158 mask->cond_stack_size = 0;
159 mask->loop_stack_size = 0;
160 mask->call_stack_size = 0;
161
162 mask->int_vec_type = lp_build_int_vec_type(mask->bld->type);
163 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
164 LLVMConstAllOnes(mask->int_vec_type);
165 }
166
167 static void lp_exec_mask_update(struct lp_exec_mask *mask)
168 {
169 if (mask->loop_stack_size) {
170 /*for loops we need to update the entire mask at runtime */
171 LLVMValueRef tmp;
172 assert(mask->break_mask);
173 tmp = LLVMBuildAnd(mask->bld->builder,
174 mask->cont_mask,
175 mask->break_mask,
176 "maskcb");
177 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
178 mask->cond_mask,
179 tmp,
180 "maskfull");
181 } else
182 mask->exec_mask = mask->cond_mask;
183
184 if (mask->call_stack_size) {
185 mask->exec_mask = LLVMBuildAnd(mask->bld->builder,
186 mask->exec_mask,
187 mask->ret_mask,
188 "callmask");
189 }
190
191 mask->has_mask = (mask->cond_stack_size > 0 ||
192 mask->loop_stack_size > 0 ||
193 mask->call_stack_size > 0);
194 }
195
196 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
197 LLVMValueRef val)
198 {
199 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
200 if (mask->cond_stack_size == 0) {
201 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
202 }
203 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
204 assert(LLVMTypeOf(val) == mask->int_vec_type);
205 mask->cond_mask = LLVMBuildAnd(mask->bld->builder,
206 mask->cond_mask,
207 val,
208 "");
209 lp_exec_mask_update(mask);
210 }
211
212 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
213 {
214 LLVMValueRef prev_mask;
215 LLVMValueRef inv_mask;
216
217 assert(mask->cond_stack_size);
218 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
219 if (mask->cond_stack_size == 1) {
220 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
221 }
222
223 inv_mask = LLVMBuildNot(mask->bld->builder, mask->cond_mask, "");
224
225 mask->cond_mask = LLVMBuildAnd(mask->bld->builder,
226 inv_mask,
227 prev_mask, "");
228 lp_exec_mask_update(mask);
229 }
230
231 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
232 {
233 assert(mask->cond_stack_size);
234 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
235 lp_exec_mask_update(mask);
236 }
237
238 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
239 {
240 if (mask->loop_stack_size == 0) {
241 assert(mask->loop_block == NULL);
242 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
243 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
244 assert(mask->break_var == NULL);
245 }
246
247 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
248
249 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
250 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
251 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
252 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
253 ++mask->loop_stack_size;
254
255 mask->break_var = lp_build_alloca(mask->bld->builder, mask->int_vec_type, "");
256 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
257
258 mask->loop_block = lp_build_insert_new_block(mask->bld->builder, "bgnloop");
259 LLVMBuildBr(mask->bld->builder, mask->loop_block);
260 LLVMPositionBuilderAtEnd(mask->bld->builder, mask->loop_block);
261
262 mask->break_mask = LLVMBuildLoad(mask->bld->builder, mask->break_var, "");
263
264 lp_exec_mask_update(mask);
265 }
266
267 static void lp_exec_break(struct lp_exec_mask *mask)
268 {
269 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
270 mask->exec_mask,
271 "break");
272
273 mask->break_mask = LLVMBuildAnd(mask->bld->builder,
274 mask->break_mask,
275 exec_mask, "break_full");
276
277 lp_exec_mask_update(mask);
278 }
279
280 static void lp_exec_continue(struct lp_exec_mask *mask)
281 {
282 LLVMValueRef exec_mask = LLVMBuildNot(mask->bld->builder,
283 mask->exec_mask,
284 "");
285
286 mask->cont_mask = LLVMBuildAnd(mask->bld->builder,
287 mask->cont_mask,
288 exec_mask, "");
289
290 lp_exec_mask_update(mask);
291 }
292
293
294 static void lp_exec_endloop(struct lp_exec_mask *mask)
295 {
296 LLVMBasicBlockRef endloop;
297 LLVMTypeRef reg_type = LLVMIntType(mask->bld->type.width*
298 mask->bld->type.length);
299 LLVMValueRef i1cond;
300
301 assert(mask->break_mask);
302
303 /*
304 * Restore the cont_mask, but don't pop
305 */
306 assert(mask->loop_stack_size);
307 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
308 lp_exec_mask_update(mask);
309
310 /*
311 * Unlike the continue mask, the break_mask must be preserved across loop
312 * iterations
313 */
314 LLVMBuildStore(mask->bld->builder, mask->break_mask, mask->break_var);
315
316 /* i1cond = (mask == 0) */
317 i1cond = LLVMBuildICmp(
318 mask->bld->builder,
319 LLVMIntNE,
320 LLVMBuildBitCast(mask->bld->builder, mask->exec_mask, reg_type, ""),
321 LLVMConstNull(reg_type), "");
322
323 endloop = lp_build_insert_new_block(mask->bld->builder, "endloop");
324
325 LLVMBuildCondBr(mask->bld->builder,
326 i1cond, mask->loop_block, endloop);
327
328 LLVMPositionBuilderAtEnd(mask->bld->builder, endloop);
329
330 assert(mask->loop_stack_size);
331 --mask->loop_stack_size;
332 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
333 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
334 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
335 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
336
337 lp_exec_mask_update(mask);
338 }
339
340 /* stores val into an address pointed to by dst.
341 * mask->exec_mask is used to figure out which bits of val
342 * should be stored into the address
343 * (0 means don't store this bit, 1 means do store).
344 */
345 static void lp_exec_mask_store(struct lp_exec_mask *mask,
346 LLVMValueRef pred,
347 LLVMValueRef val,
348 LLVMValueRef dst)
349 {
350 /* Mix the predicate and execution mask */
351 if (mask->has_mask) {
352 if (pred) {
353 pred = LLVMBuildAnd(mask->bld->builder, pred, mask->exec_mask, "");
354 } else {
355 pred = mask->exec_mask;
356 }
357 }
358
359 if (pred) {
360 LLVMValueRef real_val, dst_val;
361
362 dst_val = LLVMBuildLoad(mask->bld->builder, dst, "");
363 real_val = lp_build_select(mask->bld,
364 pred,
365 val, dst_val);
366
367 LLVMBuildStore(mask->bld->builder, real_val, dst);
368 } else
369 LLVMBuildStore(mask->bld->builder, val, dst);
370 }
371
372 static void lp_exec_mask_call(struct lp_exec_mask *mask,
373 int func,
374 int *pc)
375 {
376 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
377 mask->call_stack[mask->call_stack_size].pc = *pc;
378 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
379 mask->call_stack_size++;
380 *pc = func;
381 }
382
383 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
384 {
385 LLVMValueRef exec_mask;
386
387 if (mask->call_stack_size == 0) {
388 /* returning from main() */
389 *pc = -1;
390 return;
391 }
392 exec_mask = LLVMBuildNot(mask->bld->builder,
393 mask->exec_mask,
394 "ret");
395
396 mask->ret_mask = LLVMBuildAnd(mask->bld->builder,
397 mask->ret_mask,
398 exec_mask, "ret_full");
399
400 lp_exec_mask_update(mask);
401 }
402
403 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
404 {
405 }
406
407 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
408 {
409 assert(mask->call_stack_size);
410 mask->call_stack_size--;
411 *pc = mask->call_stack[mask->call_stack_size].pc;
412 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
413 lp_exec_mask_update(mask);
414 }
415
416
417 /**
418 * Return pointer to a temporary register channel (src or dest).
419 * Note that indirect addressing cannot be handled here.
420 * \param index which temporary register
421 * \param chan which channel of the temp register.
422 */
423 static LLVMValueRef
424 get_temp_ptr(struct lp_build_tgsi_soa_context *bld,
425 unsigned index,
426 unsigned chan)
427 {
428 assert(chan < 4);
429 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
430 LLVMValueRef lindex = lp_build_const_int32(index * 4 + chan);
431 return LLVMBuildGEP(bld->base.builder, bld->temps_array, &lindex, 1, "");
432 }
433 else {
434 return bld->temps[index][chan];
435 }
436 }
437
438
439 /**
440 * Gather vector.
441 * XXX the lp_build_gather() function should be capable of doing this
442 * with a little work.
443 */
444 static LLVMValueRef
445 build_gather(struct lp_build_tgsi_soa_context *bld,
446 LLVMValueRef base_ptr,
447 LLVMValueRef indexes)
448 {
449 LLVMValueRef res = bld->base.undef;
450 unsigned i;
451
452 /*
453 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
454 */
455 for (i = 0; i < bld->base.type.length; i++) {
456 LLVMValueRef ii = LLVMConstInt(LLVMInt32Type(), i, 0);
457 LLVMValueRef index = LLVMBuildExtractElement(bld->base.builder,
458 indexes, ii, "");
459 LLVMValueRef scalar_ptr = LLVMBuildGEP(bld->base.builder, base_ptr,
460 &index, 1, "");
461 LLVMValueRef scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
462
463 res = LLVMBuildInsertElement(bld->base.builder, res, scalar, ii, "");
464 }
465
466 return res;
467 }
468
469
470 /**
471 * Scatter/store vector.
472 */
473 static void
474 build_scatter(struct lp_build_tgsi_soa_context *bld,
475 LLVMValueRef base_ptr,
476 LLVMValueRef indexes,
477 LLVMValueRef values)
478 {
479 LLVMBuilderRef builder = bld->base.builder;
480 unsigned i;
481
482 /*
483 * Loop over elements of index_vec, store scalar value.
484 */
485 for (i = 0; i < bld->base.type.length; i++) {
486 LLVMValueRef ii = LLVMConstInt(LLVMInt32Type(), i, 0);
487 LLVMValueRef index = LLVMBuildExtractElement(builder, indexes, ii, "");
488 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr, &index, 1, "scatter_ptr");
489 LLVMValueRef val = LLVMBuildExtractElement(builder, values, ii, "scatter_val");
490
491 LLVMBuildStore(builder, val, scalar_ptr);
492 }
493 }
494
495
496 /**
497 * Read the current value of the ADDR register, convert the floats to
498 * ints, add the base index and return the vector of offsets.
499 * The offsets will be used to index into the constant buffer or
500 * temporary register file.
501 */
502 static LLVMValueRef
503 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
504 unsigned reg_file, unsigned reg_index,
505 const struct tgsi_src_register *indirect_reg)
506 {
507 struct lp_build_context *uint_bld = &bld->uint_bld;
508 /* always use X component of address register */
509 unsigned swizzle = indirect_reg->SwizzleX;
510 LLVMValueRef base;
511 LLVMValueRef rel;
512 LLVMValueRef max_index;
513 LLVMValueRef index;
514
515 assert(bld->indirect_files & (1 << reg_file));
516
517 base = lp_build_const_int_vec(uint_bld->type, reg_index);
518
519 assert(swizzle < 4);
520 rel = LLVMBuildLoad(bld->base.builder,
521 bld->addr[indirect_reg->Index][swizzle],
522 "load addr reg");
523
524 /* for indexing we want integers */
525 rel = LLVMBuildFPToSI(bld->base.builder,
526 rel,
527 uint_bld->vec_type, "");
528
529 index = lp_build_add(uint_bld, base, rel);
530
531 max_index = lp_build_const_int_vec(uint_bld->type,
532 bld->info->file_max[reg_file]);
533
534 assert(!uint_bld->type.sign);
535 index = lp_build_min(uint_bld, index, max_index);
536
537 return index;
538 }
539
540
541 /**
542 * Register fetch.
543 */
544 static LLVMValueRef
545 emit_fetch(
546 struct lp_build_tgsi_soa_context *bld,
547 const struct tgsi_full_instruction *inst,
548 unsigned src_op,
549 const unsigned chan_index )
550 {
551 struct lp_build_context *uint_bld = &bld->uint_bld;
552 const struct tgsi_full_src_register *reg = &inst->Src[src_op];
553 const unsigned swizzle =
554 tgsi_util_get_full_src_register_swizzle(reg, chan_index);
555 LLVMValueRef res;
556 LLVMValueRef indirect_index = NULL;
557
558 if (swizzle > 3) {
559 assert(0 && "invalid swizzle in emit_fetch()");
560 return bld->base.undef;
561 }
562
563 if (reg->Register.Indirect) {
564 indirect_index = get_indirect_index(bld,
565 reg->Register.File,
566 reg->Register.Index,
567 &reg->Indirect);
568 } else {
569 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
570 }
571
572 switch (reg->Register.File) {
573 case TGSI_FILE_CONSTANT:
574 if (reg->Register.Indirect) {
575 LLVMValueRef swizzle_vec =
576 lp_build_const_int_vec(uint_bld->type, swizzle);
577 LLVMValueRef index_vec; /* index into the const buffer */
578
579 /* index_vec = indirect_index * 4 + swizzle */
580 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
581 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
582
583 /* Gather values from the constant buffer */
584 res = build_gather(bld, bld->consts_ptr, index_vec);
585 }
586 else {
587 LLVMValueRef index; /* index into the const buffer */
588 LLVMValueRef scalar, scalar_ptr;
589
590 index = lp_build_const_int32(reg->Register.Index*4 + swizzle);
591
592 scalar_ptr = LLVMBuildGEP(bld->base.builder, bld->consts_ptr,
593 &index, 1, "");
594 scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
595
596 res = lp_build_broadcast_scalar(&bld->base, scalar);
597 }
598 break;
599
600 case TGSI_FILE_IMMEDIATE:
601 res = bld->immediates[reg->Register.Index][swizzle];
602 assert(res);
603 break;
604
605 case TGSI_FILE_INPUT:
606 res = bld->inputs[reg->Register.Index][swizzle];
607 assert(res);
608 break;
609
610 case TGSI_FILE_TEMPORARY:
611 if (reg->Register.Indirect) {
612 LLVMValueRef swizzle_vec =
613 lp_build_const_int_vec(uint_bld->type, swizzle);
614 LLVMValueRef length_vec =
615 lp_build_const_int_vec(uint_bld->type, bld->base.type.length);
616 LLVMValueRef index_vec; /* index into the const buffer */
617 LLVMValueRef temps_array;
618 LLVMTypeRef float4_ptr_type;
619
620 /* index_vec = (indirect_index * 4 + swizzle) * length */
621 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
622 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
623 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
624
625 /* cast temps_array pointer to float* */
626 float4_ptr_type = LLVMPointerType(LLVMFloatType(), 0);
627 temps_array = LLVMBuildBitCast(uint_bld->builder, bld->temps_array,
628 float4_ptr_type, "");
629
630 /* Gather values from the temporary register array */
631 res = build_gather(bld, temps_array, index_vec);
632 }
633 else {
634 LLVMValueRef temp_ptr;
635 temp_ptr = get_temp_ptr(bld, reg->Register.Index, swizzle);
636 res = LLVMBuildLoad(bld->base.builder, temp_ptr, "");
637 if (!res)
638 return bld->base.undef;
639 }
640 break;
641
642 default:
643 assert(0 && "invalid src register in emit_fetch()");
644 return bld->base.undef;
645 }
646
647 switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
648 case TGSI_UTIL_SIGN_CLEAR:
649 res = lp_build_abs( &bld->base, res );
650 break;
651
652 case TGSI_UTIL_SIGN_SET:
653 res = lp_build_abs( &bld->base, res );
654 /* fall through */
655 case TGSI_UTIL_SIGN_TOGGLE:
656 res = lp_build_negate( &bld->base, res );
657 break;
658
659 case TGSI_UTIL_SIGN_KEEP:
660 break;
661 }
662
663 return res;
664 }
665
666
667 /**
668 * Register fetch with derivatives.
669 */
670 static void
671 emit_fetch_deriv(
672 struct lp_build_tgsi_soa_context *bld,
673 const struct tgsi_full_instruction *inst,
674 unsigned index,
675 const unsigned chan_index,
676 LLVMValueRef *res,
677 LLVMValueRef *ddx,
678 LLVMValueRef *ddy)
679 {
680 LLVMValueRef src;
681
682 src = emit_fetch(bld, inst, index, chan_index);
683
684 if(res)
685 *res = src;
686
687 /* TODO: use interpolation coeffs for inputs */
688
689 if(ddx)
690 *ddx = lp_build_ddx(&bld->base, src);
691
692 if(ddy)
693 *ddy = lp_build_ddy(&bld->base, src);
694 }
695
696
697 /**
698 * Predicate.
699 */
700 static void
701 emit_fetch_predicate(
702 struct lp_build_tgsi_soa_context *bld,
703 const struct tgsi_full_instruction *inst,
704 LLVMValueRef *pred)
705 {
706 unsigned index;
707 unsigned char swizzles[4];
708 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
709 LLVMValueRef value;
710 unsigned chan;
711
712 if (!inst->Instruction.Predicate) {
713 FOR_EACH_CHANNEL( chan ) {
714 pred[chan] = NULL;
715 }
716 return;
717 }
718
719 swizzles[0] = inst->Predicate.SwizzleX;
720 swizzles[1] = inst->Predicate.SwizzleY;
721 swizzles[2] = inst->Predicate.SwizzleZ;
722 swizzles[3] = inst->Predicate.SwizzleW;
723
724 index = inst->Predicate.Index;
725 assert(index < LP_MAX_TGSI_PREDS);
726
727 FOR_EACH_CHANNEL( chan ) {
728 unsigned swizzle = swizzles[chan];
729
730 /*
731 * Only fetch the predicate register channels that are actually listed
732 * in the swizzles
733 */
734 if (!unswizzled[swizzle]) {
735 value = LLVMBuildLoad(bld->base.builder,
736 bld->preds[index][swizzle], "");
737
738 /*
739 * Convert the value to an integer mask.
740 *
741 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
742 * is needlessly causing two comparisons due to storing the intermediate
743 * result as float vector instead of an integer mask vector.
744 */
745 value = lp_build_compare(bld->base.builder,
746 bld->base.type,
747 PIPE_FUNC_NOTEQUAL,
748 value,
749 bld->base.zero);
750 if (inst->Predicate.Negate) {
751 value = LLVMBuildNot(bld->base.builder, value, "");
752 }
753
754 unswizzled[swizzle] = value;
755 } else {
756 value = unswizzled[swizzle];
757 }
758
759 pred[chan] = value;
760 }
761 }
762
763
764 /**
765 * Register store.
766 */
767 static void
768 emit_store(
769 struct lp_build_tgsi_soa_context *bld,
770 const struct tgsi_full_instruction *inst,
771 unsigned index,
772 unsigned chan_index,
773 LLVMValueRef pred,
774 LLVMValueRef value)
775 {
776 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
777 struct lp_build_context *uint_bld = &bld->uint_bld;
778 LLVMValueRef indirect_index = NULL;
779
780 switch( inst->Instruction.Saturate ) {
781 case TGSI_SAT_NONE:
782 break;
783
784 case TGSI_SAT_ZERO_ONE:
785 value = lp_build_max(&bld->base, value, bld->base.zero);
786 value = lp_build_min(&bld->base, value, bld->base.one);
787 break;
788
789 case TGSI_SAT_MINUS_PLUS_ONE:
790 value = lp_build_max(&bld->base, value, lp_build_const_vec(bld->base.type, -1.0));
791 value = lp_build_min(&bld->base, value, bld->base.one);
792 break;
793
794 default:
795 assert(0);
796 }
797
798 if (reg->Register.Indirect) {
799 indirect_index = get_indirect_index(bld,
800 reg->Register.File,
801 reg->Register.Index,
802 &reg->Indirect);
803 } else {
804 assert(reg->Register.Index <= bld->info->file_max[reg->Register.File]);
805 }
806
807 switch( reg->Register.File ) {
808 case TGSI_FILE_OUTPUT:
809 lp_exec_mask_store(&bld->exec_mask, pred, value,
810 bld->outputs[reg->Register.Index][chan_index]);
811 break;
812
813 case TGSI_FILE_TEMPORARY:
814 if (reg->Register.Indirect) {
815 LLVMValueRef chan_vec =
816 lp_build_const_int_vec(uint_bld->type, chan_index);
817 LLVMValueRef length_vec =
818 lp_build_const_int_vec(uint_bld->type, bld->base.type.length);
819 LLVMValueRef index_vec; /* indexes into the temp registers */
820 LLVMValueRef temps_array;
821 LLVMTypeRef float_ptr_type;
822
823 /* index_vec = (indirect_index * 4 + chan_index) * length */
824 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
825 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
826 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
827
828 float_ptr_type = LLVMPointerType(LLVMFloatType(), 0);
829 temps_array = LLVMBuildBitCast(bld->base.builder, bld->temps_array,
830 float_ptr_type, "");
831
832 /* Scatter store values into temp registers */
833 build_scatter(bld, temps_array, index_vec, value);
834 }
835 else {
836 LLVMValueRef temp_ptr = get_temp_ptr(bld, reg->Register.Index,
837 chan_index);
838 lp_exec_mask_store(&bld->exec_mask, pred, value, temp_ptr);
839 }
840 break;
841
842 case TGSI_FILE_ADDRESS:
843 lp_exec_mask_store(&bld->exec_mask, pred, value,
844 bld->addr[reg->Indirect.Index][chan_index]);
845 break;
846
847 case TGSI_FILE_PREDICATE:
848 lp_exec_mask_store(&bld->exec_mask, pred, value,
849 bld->preds[reg->Register.Index][chan_index]);
850 break;
851
852 default:
853 assert( 0 );
854 }
855 }
856
857
858 /**
859 * High-level instruction translators.
860 */
861
862 static void
863 emit_tex( struct lp_build_tgsi_soa_context *bld,
864 const struct tgsi_full_instruction *inst,
865 enum lp_build_tex_modifier modifier,
866 LLVMValueRef *texel)
867 {
868 unsigned unit;
869 LLVMValueRef lod_bias, explicit_lod;
870 LLVMValueRef oow = NULL;
871 LLVMValueRef coords[3];
872 LLVMValueRef ddx[3];
873 LLVMValueRef ddy[3];
874 unsigned num_coords;
875 unsigned i;
876
877 if (!bld->sampler) {
878 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
879 for (i = 0; i < 4; i++) {
880 texel[i] = bld->base.undef;
881 }
882 return;
883 }
884
885 switch (inst->Texture.Texture) {
886 case TGSI_TEXTURE_1D:
887 num_coords = 1;
888 break;
889 case TGSI_TEXTURE_2D:
890 case TGSI_TEXTURE_RECT:
891 num_coords = 2;
892 break;
893 case TGSI_TEXTURE_SHADOW1D:
894 case TGSI_TEXTURE_SHADOW2D:
895 case TGSI_TEXTURE_SHADOWRECT:
896 case TGSI_TEXTURE_3D:
897 case TGSI_TEXTURE_CUBE:
898 num_coords = 3;
899 break;
900 default:
901 assert(0);
902 return;
903 }
904
905 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
906 lod_bias = emit_fetch( bld, inst, 0, 3 );
907 explicit_lod = NULL;
908 }
909 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
910 lod_bias = NULL;
911 explicit_lod = emit_fetch( bld, inst, 0, 3 );
912 }
913 else {
914 lod_bias = NULL;
915 explicit_lod = NULL;
916 }
917
918 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
919 oow = emit_fetch( bld, inst, 0, 3 );
920 oow = lp_build_rcp(&bld->base, oow);
921 }
922
923 for (i = 0; i < num_coords; i++) {
924 coords[i] = emit_fetch( bld, inst, 0, i );
925 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
926 coords[i] = lp_build_mul(&bld->base, coords[i], oow);
927 }
928 for (i = num_coords; i < 3; i++) {
929 coords[i] = bld->base.undef;
930 }
931
932 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
933 LLVMTypeRef i32t = LLVMInt32Type();
934 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
935 for (i = 0; i < num_coords; i++) {
936 LLVMValueRef src1 = emit_fetch( bld, inst, 1, i );
937 LLVMValueRef src2 = emit_fetch( bld, inst, 2, i );
938 ddx[i] = LLVMBuildExtractElement(bld->base.builder, src1, index0, "");
939 ddy[i] = LLVMBuildExtractElement(bld->base.builder, src2, index0, "");
940 }
941 unit = inst->Src[3].Register.Index;
942 } else {
943 for (i = 0; i < num_coords; i++) {
944 ddx[i] = lp_build_scalar_ddx( &bld->base, coords[i] );
945 ddy[i] = lp_build_scalar_ddy( &bld->base, coords[i] );
946 }
947 unit = inst->Src[1].Register.Index;
948 }
949 for (i = num_coords; i < 3; i++) {
950 ddx[i] = LLVMGetUndef(bld->base.elem_type);
951 ddy[i] = LLVMGetUndef(bld->base.elem_type);
952 }
953
954 bld->sampler->emit_fetch_texel(bld->sampler,
955 bld->base.builder,
956 bld->base.type,
957 unit, num_coords, coords,
958 ddx, ddy,
959 lod_bias, explicit_lod,
960 texel);
961 }
962
963 static boolean
964 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
965 int pc)
966 {
967 int i;
968
969 for (i = 0; i < 5; i++) {
970 unsigned opcode;
971
972 if (pc + i >= bld->info->num_instructions)
973 return TRUE;
974
975 opcode = bld->instructions[pc + i].Instruction.Opcode;
976
977 if (opcode == TGSI_OPCODE_END)
978 return TRUE;
979
980 if (opcode == TGSI_OPCODE_TEX ||
981 opcode == TGSI_OPCODE_TXP ||
982 opcode == TGSI_OPCODE_TXD ||
983 opcode == TGSI_OPCODE_TXB ||
984 opcode == TGSI_OPCODE_TXL ||
985 opcode == TGSI_OPCODE_TXF ||
986 opcode == TGSI_OPCODE_TXQ ||
987 opcode == TGSI_OPCODE_CAL ||
988 opcode == TGSI_OPCODE_CALLNZ ||
989 opcode == TGSI_OPCODE_IF ||
990 opcode == TGSI_OPCODE_IFC ||
991 opcode == TGSI_OPCODE_BGNLOOP ||
992 opcode == TGSI_OPCODE_SWITCH)
993 return FALSE;
994 }
995
996 return TRUE;
997 }
998
999
1000
1001 /**
1002 * Kill fragment if any of the src register values are negative.
1003 */
1004 static void
1005 emit_kil(
1006 struct lp_build_tgsi_soa_context *bld,
1007 const struct tgsi_full_instruction *inst,
1008 int pc)
1009 {
1010 const struct tgsi_full_src_register *reg = &inst->Src[0];
1011 LLVMValueRef terms[NUM_CHANNELS];
1012 LLVMValueRef mask;
1013 unsigned chan_index;
1014
1015 memset(&terms, 0, sizeof terms);
1016
1017 FOR_EACH_CHANNEL( chan_index ) {
1018 unsigned swizzle;
1019
1020 /* Unswizzle channel */
1021 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
1022
1023 /* Check if the component has not been already tested. */
1024 assert(swizzle < NUM_CHANNELS);
1025 if( !terms[swizzle] )
1026 /* TODO: change the comparison operator instead of setting the sign */
1027 terms[swizzle] = emit_fetch(bld, inst, 0, chan_index );
1028 }
1029
1030 mask = NULL;
1031 FOR_EACH_CHANNEL( chan_index ) {
1032 if(terms[chan_index]) {
1033 LLVMValueRef chan_mask;
1034
1035 /*
1036 * If term < 0 then mask = 0 else mask = ~0.
1037 */
1038 chan_mask = lp_build_cmp(&bld->base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->base.zero);
1039
1040 if(mask)
1041 mask = LLVMBuildAnd(bld->base.builder, mask, chan_mask, "");
1042 else
1043 mask = chan_mask;
1044 }
1045 }
1046
1047 if(mask) {
1048 lp_build_mask_update(bld->mask, mask);
1049
1050 if (!near_end_of_shader(bld, pc))
1051 lp_build_mask_check(bld->mask);
1052 }
1053 }
1054
1055
1056 /**
1057 * Predicated fragment kill.
1058 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1059 * The only predication is the execution mask which will apply if
1060 * we're inside a loop or conditional.
1061 */
1062 static void
1063 emit_kilp(struct lp_build_tgsi_soa_context *bld,
1064 const struct tgsi_full_instruction *inst,
1065 int pc)
1066 {
1067 LLVMValueRef mask;
1068
1069 /* For those channels which are "alive", disable fragment shader
1070 * execution.
1071 */
1072 if (bld->exec_mask.has_mask) {
1073 mask = LLVMBuildNot(bld->base.builder, bld->exec_mask.exec_mask, "kilp");
1074 }
1075 else {
1076 LLVMValueRef zero = LLVMConstNull(bld->base.int_vec_type);
1077 mask = zero;
1078 }
1079
1080 lp_build_mask_update(bld->mask, mask);
1081
1082 if (!near_end_of_shader(bld, pc))
1083 lp_build_mask_check(bld->mask);
1084 }
1085
1086 static void
1087 emit_declaration(
1088 struct lp_build_tgsi_soa_context *bld,
1089 const struct tgsi_full_declaration *decl)
1090 {
1091 LLVMTypeRef vec_type = bld->base.vec_type;
1092
1093 unsigned first = decl->Range.First;
1094 unsigned last = decl->Range.Last;
1095 unsigned idx, i;
1096
1097 for (idx = first; idx <= last; ++idx) {
1098 assert(last <= bld->info->file_max[decl->Declaration.File]);
1099 switch (decl->Declaration.File) {
1100 case TGSI_FILE_TEMPORARY:
1101 assert(idx < LP_MAX_TGSI_TEMPS);
1102 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
1103 LLVMValueRef array_size = LLVMConstInt(LLVMInt32Type(),
1104 last*4 + 4, 0);
1105 bld->temps_array = lp_build_array_alloca(bld->base.builder,
1106 vec_type, array_size, "");
1107 } else {
1108 for (i = 0; i < NUM_CHANNELS; i++)
1109 bld->temps[idx][i] = lp_build_alloca(bld->base.builder,
1110 vec_type, "");
1111 }
1112 break;
1113
1114 case TGSI_FILE_OUTPUT:
1115 for (i = 0; i < NUM_CHANNELS; i++)
1116 bld->outputs[idx][i] = lp_build_alloca(bld->base.builder,
1117 vec_type, "");
1118 break;
1119
1120 case TGSI_FILE_ADDRESS:
1121 assert(idx < LP_MAX_TGSI_ADDRS);
1122 for (i = 0; i < NUM_CHANNELS; i++)
1123 bld->addr[idx][i] = lp_build_alloca(bld->base.builder,
1124 vec_type, "");
1125 break;
1126
1127 case TGSI_FILE_PREDICATE:
1128 assert(idx < LP_MAX_TGSI_PREDS);
1129 for (i = 0; i < NUM_CHANNELS; i++)
1130 bld->preds[idx][i] = lp_build_alloca(bld->base.builder,
1131 vec_type, "");
1132 break;
1133
1134 default:
1135 /* don't need to declare other vars */
1136 break;
1137 }
1138 }
1139 }
1140
1141
1142 /**
1143 * Emit LLVM for one TGSI instruction.
1144 * \param return TRUE for success, FALSE otherwise
1145 */
1146 static boolean
1147 emit_instruction(
1148 struct lp_build_tgsi_soa_context *bld,
1149 const struct tgsi_full_instruction *inst,
1150 const struct tgsi_opcode_info *info,
1151 int *pc)
1152 {
1153 unsigned chan_index;
1154 LLVMValueRef src0, src1, src2;
1155 LLVMValueRef tmp0, tmp1, tmp2;
1156 LLVMValueRef tmp3 = NULL;
1157 LLVMValueRef tmp4 = NULL;
1158 LLVMValueRef tmp5 = NULL;
1159 LLVMValueRef tmp6 = NULL;
1160 LLVMValueRef tmp7 = NULL;
1161 LLVMValueRef res;
1162 LLVMValueRef dst0[NUM_CHANNELS];
1163
1164 /*
1165 * Stores and write masks are handled in a general fashion after the long
1166 * instruction opcode switch statement.
1167 *
1168 * Although not stricitly necessary, we avoid generating instructions for
1169 * channels which won't be stored, in cases where's that easy. For some
1170 * complex instructions, like texture sampling, it is more convenient to
1171 * assume a full writemask and then let LLVM optimization passes eliminate
1172 * redundant code.
1173 */
1174
1175 (*pc)++;
1176
1177 assert(info->num_dst <= 1);
1178 if (info->num_dst) {
1179 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1180 dst0[chan_index] = bld->base.undef;
1181 }
1182 }
1183
1184 switch (inst->Instruction.Opcode) {
1185 case TGSI_OPCODE_ARL:
1186 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1187 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1188 tmp0 = lp_build_floor(&bld->base, tmp0);
1189 dst0[chan_index] = tmp0;
1190 }
1191 break;
1192
1193 case TGSI_OPCODE_MOV:
1194 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1195 dst0[chan_index] = emit_fetch( bld, inst, 0, chan_index );
1196 }
1197 break;
1198
1199 case TGSI_OPCODE_LIT:
1200 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ) {
1201 dst0[CHAN_X] = bld->base.one;
1202 }
1203 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1204 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1205 dst0[CHAN_Y] = lp_build_max( &bld->base, src0, bld->base.zero);
1206 }
1207 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1208 /* XMM[1] = SrcReg[0].yyyy */
1209 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1210 /* XMM[1] = max(XMM[1], 0) */
1211 tmp1 = lp_build_max( &bld->base, tmp1, bld->base.zero);
1212 /* XMM[2] = SrcReg[0].wwww */
1213 tmp2 = emit_fetch( bld, inst, 0, CHAN_W );
1214 tmp1 = lp_build_pow( &bld->base, tmp1, tmp2);
1215 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1216 tmp2 = lp_build_cmp(&bld->base, PIPE_FUNC_GREATER, tmp0, bld->base.zero);
1217 dst0[CHAN_Z] = lp_build_select(&bld->base, tmp2, tmp1, bld->base.zero);
1218 }
1219 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) ) {
1220 dst0[CHAN_W] = bld->base.one;
1221 }
1222 break;
1223
1224 case TGSI_OPCODE_RCP:
1225 /* TGSI_OPCODE_RECIP */
1226 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1227 res = lp_build_rcp(&bld->base, src0);
1228 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1229 dst0[chan_index] = res;
1230 }
1231 break;
1232
1233 case TGSI_OPCODE_RSQ:
1234 /* TGSI_OPCODE_RECIPSQRT */
1235 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1236 src0 = lp_build_abs(&bld->base, src0);
1237 res = lp_build_rsqrt(&bld->base, src0);
1238 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1239 dst0[chan_index] = res;
1240 }
1241 break;
1242
1243 case TGSI_OPCODE_EXP:
1244 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1245 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1246 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1247 LLVMValueRef *p_exp2_int_part = NULL;
1248 LLVMValueRef *p_frac_part = NULL;
1249 LLVMValueRef *p_exp2 = NULL;
1250
1251 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1252
1253 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1254 p_exp2_int_part = &tmp0;
1255 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1256 p_frac_part = &tmp1;
1257 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1258 p_exp2 = &tmp2;
1259
1260 lp_build_exp2_approx(&bld->base, src0, p_exp2_int_part, p_frac_part, p_exp2);
1261
1262 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1263 dst0[CHAN_X] = tmp0;
1264 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1265 dst0[CHAN_Y] = tmp1;
1266 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1267 dst0[CHAN_Z] = tmp2;
1268 }
1269 /* dst.w = 1.0 */
1270 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1271 dst0[CHAN_W] = bld->base.one;
1272 }
1273 break;
1274
1275 case TGSI_OPCODE_LOG:
1276 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1277 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1278 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
1279 LLVMValueRef *p_floor_log2 = NULL;
1280 LLVMValueRef *p_exp = NULL;
1281 LLVMValueRef *p_log2 = NULL;
1282
1283 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1284 src0 = lp_build_abs( &bld->base, src0 );
1285
1286 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1287 p_floor_log2 = &tmp0;
1288 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
1289 p_exp = &tmp1;
1290 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1291 p_log2 = &tmp2;
1292
1293 lp_build_log2_approx(&bld->base, src0, p_exp, p_floor_log2, p_log2);
1294
1295 /* dst.x = floor(lg2(abs(src.x))) */
1296 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
1297 dst0[CHAN_X] = tmp0;
1298 /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
1299 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y )) {
1300 dst0[CHAN_Y] = lp_build_div( &bld->base, src0, tmp1);
1301 }
1302 /* dst.z = lg2(abs(src.x)) */
1303 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
1304 dst0[CHAN_Z] = tmp2;
1305 }
1306 /* dst.w = 1.0 */
1307 if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
1308 dst0[CHAN_W] = bld->base.one;
1309 }
1310 break;
1311
1312 case TGSI_OPCODE_MUL:
1313 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1314 src0 = emit_fetch( bld, inst, 0, chan_index );
1315 src1 = emit_fetch( bld, inst, 1, chan_index );
1316 dst0[chan_index] = lp_build_mul(&bld->base, src0, src1);
1317 }
1318 break;
1319
1320 case TGSI_OPCODE_ADD:
1321 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1322 src0 = emit_fetch( bld, inst, 0, chan_index );
1323 src1 = emit_fetch( bld, inst, 1, chan_index );
1324 dst0[chan_index] = lp_build_add(&bld->base, src0, src1);
1325 }
1326 break;
1327
1328 case TGSI_OPCODE_DP3:
1329 /* TGSI_OPCODE_DOT3 */
1330 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1331 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1332 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1333 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1334 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1335 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1336 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1337 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1338 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1339 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1340 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1341 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1342 dst0[chan_index] = tmp0;
1343 }
1344 break;
1345
1346 case TGSI_OPCODE_DP4:
1347 /* TGSI_OPCODE_DOT4 */
1348 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1349 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1350 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1351 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1352 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1353 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1354 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1355 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1356 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1357 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1358 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1359 tmp1 = emit_fetch( bld, inst, 0, CHAN_W );
1360 tmp2 = emit_fetch( bld, inst, 1, CHAN_W );
1361 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1362 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1363 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1364 dst0[chan_index] = tmp0;
1365 }
1366 break;
1367
1368 case TGSI_OPCODE_DST:
1369 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1370 dst0[CHAN_X] = bld->base.one;
1371 }
1372 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1373 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1374 tmp1 = emit_fetch( bld, inst, 1, CHAN_Y );
1375 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp0, tmp1);
1376 }
1377 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1378 dst0[CHAN_Z] = emit_fetch( bld, inst, 0, CHAN_Z );
1379 }
1380 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1381 dst0[CHAN_W] = emit_fetch( bld, inst, 1, CHAN_W );
1382 }
1383 break;
1384
1385 case TGSI_OPCODE_MIN:
1386 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1387 src0 = emit_fetch( bld, inst, 0, chan_index );
1388 src1 = emit_fetch( bld, inst, 1, chan_index );
1389 dst0[chan_index] = lp_build_min( &bld->base, src0, src1 );
1390 }
1391 break;
1392
1393 case TGSI_OPCODE_MAX:
1394 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1395 src0 = emit_fetch( bld, inst, 0, chan_index );
1396 src1 = emit_fetch( bld, inst, 1, chan_index );
1397 dst0[chan_index] = lp_build_max( &bld->base, src0, src1 );
1398 }
1399 break;
1400
1401 case TGSI_OPCODE_SLT:
1402 /* TGSI_OPCODE_SETLT */
1403 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1404 src0 = emit_fetch( bld, inst, 0, chan_index );
1405 src1 = emit_fetch( bld, inst, 1, chan_index );
1406 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, src1 );
1407 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1408 }
1409 break;
1410
1411 case TGSI_OPCODE_SGE:
1412 /* TGSI_OPCODE_SETGE */
1413 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1414 src0 = emit_fetch( bld, inst, 0, chan_index );
1415 src1 = emit_fetch( bld, inst, 1, chan_index );
1416 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GEQUAL, src0, src1 );
1417 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1418 }
1419 break;
1420
1421 case TGSI_OPCODE_MAD:
1422 /* TGSI_OPCODE_MADD */
1423 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1424 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1425 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1426 tmp2 = emit_fetch( bld, inst, 2, chan_index );
1427 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1428 tmp0 = lp_build_add( &bld->base, tmp0, tmp2);
1429 dst0[chan_index] = tmp0;
1430 }
1431 break;
1432
1433 case TGSI_OPCODE_SUB:
1434 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1435 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1436 tmp1 = emit_fetch( bld, inst, 1, chan_index );
1437 dst0[chan_index] = lp_build_sub( &bld->base, tmp0, tmp1);
1438 }
1439 break;
1440
1441 case TGSI_OPCODE_LRP:
1442 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1443 src0 = emit_fetch( bld, inst, 0, chan_index );
1444 src1 = emit_fetch( bld, inst, 1, chan_index );
1445 src2 = emit_fetch( bld, inst, 2, chan_index );
1446 tmp0 = lp_build_sub( &bld->base, src1, src2 );
1447 tmp0 = lp_build_mul( &bld->base, src0, tmp0 );
1448 dst0[chan_index] = lp_build_add( &bld->base, tmp0, src2 );
1449 }
1450 break;
1451
1452 case TGSI_OPCODE_CND:
1453 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1454 src0 = emit_fetch( bld, inst, 0, chan_index );
1455 src1 = emit_fetch( bld, inst, 1, chan_index );
1456 src2 = emit_fetch( bld, inst, 2, chan_index );
1457 tmp1 = lp_build_const_vec(bld->base.type, 0.5);
1458 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src2, tmp1);
1459 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src0, src1 );
1460 }
1461 break;
1462
1463 case TGSI_OPCODE_DP2A:
1464 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1465 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1466 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1467 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1468 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1469 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1470 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1471 tmp1 = emit_fetch( bld, inst, 2, CHAN_X ); /* xmm1 = src[2].x */
1472 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1473 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1474 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1475 }
1476 break;
1477
1478 case TGSI_OPCODE_FRC:
1479 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1480 src0 = emit_fetch( bld, inst, 0, chan_index );
1481 tmp0 = lp_build_floor(&bld->base, src0);
1482 tmp0 = lp_build_sub(&bld->base, src0, tmp0);
1483 dst0[chan_index] = tmp0;
1484 }
1485 break;
1486
1487 case TGSI_OPCODE_CLAMP:
1488 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1489 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1490 src1 = emit_fetch( bld, inst, 1, chan_index );
1491 src2 = emit_fetch( bld, inst, 2, chan_index );
1492 tmp0 = lp_build_max(&bld->base, tmp0, src1);
1493 tmp0 = lp_build_min(&bld->base, tmp0, src2);
1494 dst0[chan_index] = tmp0;
1495 }
1496 break;
1497
1498 case TGSI_OPCODE_FLR:
1499 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1500 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1501 dst0[chan_index] = lp_build_floor(&bld->base, tmp0);
1502 }
1503 break;
1504
1505 case TGSI_OPCODE_ROUND:
1506 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1507 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1508 dst0[chan_index] = lp_build_round(&bld->base, tmp0);
1509 }
1510 break;
1511
1512 case TGSI_OPCODE_EX2: {
1513 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1514 tmp0 = lp_build_exp2( &bld->base, tmp0);
1515 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1516 dst0[chan_index] = tmp0;
1517 }
1518 break;
1519 }
1520
1521 case TGSI_OPCODE_LG2:
1522 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1523 tmp0 = lp_build_log2( &bld->base, tmp0);
1524 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1525 dst0[chan_index] = tmp0;
1526 }
1527 break;
1528
1529 case TGSI_OPCODE_POW:
1530 src0 = emit_fetch( bld, inst, 0, CHAN_X );
1531 src1 = emit_fetch( bld, inst, 1, CHAN_X );
1532 res = lp_build_pow( &bld->base, src0, src1 );
1533 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1534 dst0[chan_index] = res;
1535 }
1536 break;
1537
1538 case TGSI_OPCODE_XPD:
1539 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1540 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
1541 tmp1 = emit_fetch( bld, inst, 1, CHAN_Z );
1542 tmp3 = emit_fetch( bld, inst, 0, CHAN_Z );
1543 }
1544 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
1545 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1546 tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
1547 tmp4 = emit_fetch( bld, inst, 1, CHAN_Y );
1548 }
1549 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1550 tmp2 = tmp0;
1551 tmp2 = lp_build_mul( &bld->base, tmp2, tmp1);
1552 tmp5 = tmp3;
1553 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1554 tmp2 = lp_build_sub( &bld->base, tmp2, tmp5);
1555 dst0[CHAN_X] = tmp2;
1556 }
1557 if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
1558 IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
1559 tmp2 = emit_fetch( bld, inst, 1, CHAN_X );
1560 tmp5 = emit_fetch( bld, inst, 0, CHAN_X );
1561 }
1562 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1563 tmp3 = lp_build_mul( &bld->base, tmp3, tmp2);
1564 tmp1 = lp_build_mul( &bld->base, tmp1, tmp5);
1565 tmp3 = lp_build_sub( &bld->base, tmp3, tmp1);
1566 dst0[CHAN_Y] = tmp3;
1567 }
1568 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1569 tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
1570 tmp0 = lp_build_mul( &bld->base, tmp0, tmp2);
1571 tmp5 = lp_build_sub( &bld->base, tmp5, tmp0);
1572 dst0[CHAN_Z] = tmp5;
1573 }
1574 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1575 dst0[CHAN_W] = bld->base.one;
1576 }
1577 break;
1578
1579 case TGSI_OPCODE_ABS:
1580 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1581 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1582 dst0[chan_index] = lp_build_abs( &bld->base, tmp0 );
1583 }
1584 break;
1585
1586 case TGSI_OPCODE_RCC:
1587 /* deprecated? */
1588 assert(0);
1589 return FALSE;
1590
1591 case TGSI_OPCODE_DPH:
1592 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1593 tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
1594 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
1595 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
1596 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
1597 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1598 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1599 tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
1600 tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
1601 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
1602 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1603 tmp1 = emit_fetch( bld, inst, 1, CHAN_W );
1604 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1605 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1606 dst0[chan_index] = tmp0;
1607 }
1608 break;
1609
1610 case TGSI_OPCODE_COS:
1611 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1612 tmp0 = lp_build_cos( &bld->base, tmp0 );
1613 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1614 dst0[chan_index] = tmp0;
1615 }
1616 break;
1617
1618 case TGSI_OPCODE_DDX:
1619 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1620 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, &dst0[chan_index], NULL);
1621 }
1622 break;
1623
1624 case TGSI_OPCODE_DDY:
1625 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1626 emit_fetch_deriv( bld, inst, 0, chan_index, NULL, NULL, &dst0[chan_index]);
1627 }
1628 break;
1629
1630 case TGSI_OPCODE_KILP:
1631 /* predicated kill */
1632 emit_kilp( bld, inst, (*pc)-1 );
1633 break;
1634
1635 case TGSI_OPCODE_KIL:
1636 /* conditional kill */
1637 emit_kil( bld, inst, (*pc)-1 );
1638 break;
1639
1640 case TGSI_OPCODE_PK2H:
1641 return FALSE;
1642 break;
1643
1644 case TGSI_OPCODE_PK2US:
1645 return FALSE;
1646 break;
1647
1648 case TGSI_OPCODE_PK4B:
1649 return FALSE;
1650 break;
1651
1652 case TGSI_OPCODE_PK4UB:
1653 return FALSE;
1654 break;
1655
1656 case TGSI_OPCODE_RFL:
1657 return FALSE;
1658 break;
1659
1660 case TGSI_OPCODE_SEQ:
1661 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1662 src0 = emit_fetch( bld, inst, 0, chan_index );
1663 src1 = emit_fetch( bld, inst, 1, chan_index );
1664 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_EQUAL, src0, src1 );
1665 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1666 }
1667 break;
1668
1669 case TGSI_OPCODE_SFL:
1670 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1671 dst0[chan_index] = bld->base.zero;
1672 }
1673 break;
1674
1675 case TGSI_OPCODE_SGT:
1676 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1677 src0 = emit_fetch( bld, inst, 0, chan_index );
1678 src1 = emit_fetch( bld, inst, 1, chan_index );
1679 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src0, src1 );
1680 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1681 }
1682 break;
1683
1684 case TGSI_OPCODE_SIN:
1685 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1686 tmp0 = lp_build_sin( &bld->base, tmp0 );
1687 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1688 dst0[chan_index] = tmp0;
1689 }
1690 break;
1691
1692 case TGSI_OPCODE_SLE:
1693 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1694 src0 = emit_fetch( bld, inst, 0, chan_index );
1695 src1 = emit_fetch( bld, inst, 1, chan_index );
1696 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LEQUAL, src0, src1 );
1697 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1698 }
1699 break;
1700
1701 case TGSI_OPCODE_SNE:
1702 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1703 src0 = emit_fetch( bld, inst, 0, chan_index );
1704 src1 = emit_fetch( bld, inst, 1, chan_index );
1705 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_NOTEQUAL, src0, src1 );
1706 dst0[chan_index] = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
1707 }
1708 break;
1709
1710 case TGSI_OPCODE_STR:
1711 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1712 dst0[chan_index] = bld->base.one;
1713 }
1714 break;
1715
1716 case TGSI_OPCODE_TEX:
1717 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_NONE, dst0 );
1718 break;
1719
1720 case TGSI_OPCODE_TXD:
1721 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV, dst0 );
1722 break;
1723
1724 case TGSI_OPCODE_UP2H:
1725 /* deprecated */
1726 assert (0);
1727 return FALSE;
1728 break;
1729
1730 case TGSI_OPCODE_UP2US:
1731 /* deprecated */
1732 assert(0);
1733 return FALSE;
1734 break;
1735
1736 case TGSI_OPCODE_UP4B:
1737 /* deprecated */
1738 assert(0);
1739 return FALSE;
1740 break;
1741
1742 case TGSI_OPCODE_UP4UB:
1743 /* deprecated */
1744 assert(0);
1745 return FALSE;
1746 break;
1747
1748 case TGSI_OPCODE_X2D:
1749 /* deprecated? */
1750 assert(0);
1751 return FALSE;
1752 break;
1753
1754 case TGSI_OPCODE_ARA:
1755 /* deprecated */
1756 assert(0);
1757 return FALSE;
1758 break;
1759
1760 case TGSI_OPCODE_ARR:
1761 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1762 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1763 tmp0 = lp_build_round(&bld->base, tmp0);
1764 dst0[chan_index] = tmp0;
1765 }
1766 break;
1767
1768 case TGSI_OPCODE_BRA:
1769 /* deprecated */
1770 assert(0);
1771 return FALSE;
1772 break;
1773
1774 case TGSI_OPCODE_CAL:
1775 lp_exec_mask_call(&bld->exec_mask,
1776 inst->Label.Label,
1777 pc);
1778
1779 break;
1780
1781 case TGSI_OPCODE_RET:
1782 lp_exec_mask_ret(&bld->exec_mask, pc);
1783 break;
1784
1785 case TGSI_OPCODE_END:
1786 *pc = -1;
1787 break;
1788
1789 case TGSI_OPCODE_SSG:
1790 /* TGSI_OPCODE_SGN */
1791 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1792 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1793 dst0[chan_index] = lp_build_sgn( &bld->base, tmp0 );
1794 }
1795 break;
1796
1797 case TGSI_OPCODE_CMP:
1798 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1799 src0 = emit_fetch( bld, inst, 0, chan_index );
1800 src1 = emit_fetch( bld, inst, 1, chan_index );
1801 src2 = emit_fetch( bld, inst, 2, chan_index );
1802 tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, bld->base.zero );
1803 dst0[chan_index] = lp_build_select( &bld->base, tmp0, src1, src2);
1804 }
1805 break;
1806
1807 case TGSI_OPCODE_SCS:
1808 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
1809 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1810 dst0[CHAN_X] = lp_build_cos( &bld->base, tmp0 );
1811 }
1812 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
1813 tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
1814 dst0[CHAN_Y] = lp_build_sin( &bld->base, tmp0 );
1815 }
1816 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
1817 dst0[CHAN_Z] = bld->base.zero;
1818 }
1819 IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
1820 dst0[CHAN_W] = bld->base.one;
1821 }
1822 break;
1823
1824 case TGSI_OPCODE_TXB:
1825 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_LOD_BIAS, dst0 );
1826 break;
1827
1828 case TGSI_OPCODE_NRM:
1829 /* fall-through */
1830 case TGSI_OPCODE_NRM4:
1831 /* 3 or 4-component normalization */
1832 {
1833 uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
1834
1835 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) ||
1836 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y) ||
1837 IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z) ||
1838 (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 4)) {
1839
1840 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
1841
1842 /* xmm4 = src.x */
1843 /* xmm0 = src.x * src.x */
1844 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1845 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1846 tmp4 = tmp0;
1847 }
1848 tmp0 = lp_build_mul( &bld->base, tmp0, tmp0);
1849
1850 /* xmm5 = src.y */
1851 /* xmm0 = xmm0 + src.y * src.y */
1852 tmp1 = emit_fetch(bld, inst, 0, CHAN_Y);
1853 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1854 tmp5 = tmp1;
1855 }
1856 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1857 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1858
1859 /* xmm6 = src.z */
1860 /* xmm0 = xmm0 + src.z * src.z */
1861 tmp1 = emit_fetch(bld, inst, 0, CHAN_Z);
1862 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1863 tmp6 = tmp1;
1864 }
1865 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1866 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1867
1868 if (dims == 4) {
1869 /* xmm7 = src.w */
1870 /* xmm0 = xmm0 + src.w * src.w */
1871 tmp1 = emit_fetch(bld, inst, 0, CHAN_W);
1872 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W)) {
1873 tmp7 = tmp1;
1874 }
1875 tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
1876 tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
1877 }
1878
1879 /* xmm1 = 1 / sqrt(xmm0) */
1880 tmp1 = lp_build_rsqrt( &bld->base, tmp0);
1881
1882 /* dst.x = xmm1 * src.x */
1883 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
1884 dst0[CHAN_X] = lp_build_mul( &bld->base, tmp4, tmp1);
1885 }
1886
1887 /* dst.y = xmm1 * src.y */
1888 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
1889 dst0[CHAN_Y] = lp_build_mul( &bld->base, tmp5, tmp1);
1890 }
1891
1892 /* dst.z = xmm1 * src.z */
1893 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
1894 dst0[CHAN_Z] = lp_build_mul( &bld->base, tmp6, tmp1);
1895 }
1896
1897 /* dst.w = xmm1 * src.w */
1898 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) && dims == 4) {
1899 dst0[CHAN_W] = lp_build_mul( &bld->base, tmp7, tmp1);
1900 }
1901 }
1902
1903 /* dst.w = 1.0 */
1904 if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 3) {
1905 dst0[CHAN_W] = bld->base.one;
1906 }
1907 }
1908 break;
1909
1910 case TGSI_OPCODE_DIV:
1911 /* deprecated */
1912 assert( 0 );
1913 return FALSE;
1914 break;
1915
1916 case TGSI_OPCODE_DP2:
1917 tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
1918 tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
1919 tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
1920 tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
1921 tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
1922 tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
1923 tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
1924 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1925 dst0[chan_index] = tmp0; /* dest[ch] = xmm0 */
1926 }
1927 break;
1928
1929 case TGSI_OPCODE_TXL:
1930 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD, dst0 );
1931 break;
1932
1933 case TGSI_OPCODE_TXP:
1934 emit_tex( bld, inst, LP_BLD_TEX_MODIFIER_PROJECTED, dst0 );
1935 break;
1936
1937 case TGSI_OPCODE_BRK:
1938 lp_exec_break(&bld->exec_mask);
1939 break;
1940
1941 case TGSI_OPCODE_IF:
1942 tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
1943 tmp0 = lp_build_cmp(&bld->base, PIPE_FUNC_NOTEQUAL,
1944 tmp0, bld->base.zero);
1945 lp_exec_mask_cond_push(&bld->exec_mask, tmp0);
1946 break;
1947
1948 case TGSI_OPCODE_BGNLOOP:
1949 lp_exec_bgnloop(&bld->exec_mask);
1950 break;
1951
1952 case TGSI_OPCODE_BGNSUB:
1953 lp_exec_mask_bgnsub(&bld->exec_mask);
1954 break;
1955
1956 case TGSI_OPCODE_ELSE:
1957 lp_exec_mask_cond_invert(&bld->exec_mask);
1958 break;
1959
1960 case TGSI_OPCODE_ENDIF:
1961 lp_exec_mask_cond_pop(&bld->exec_mask);
1962 break;
1963
1964 case TGSI_OPCODE_ENDLOOP:
1965 lp_exec_endloop(&bld->exec_mask);
1966 break;
1967
1968 case TGSI_OPCODE_ENDSUB:
1969 lp_exec_mask_endsub(&bld->exec_mask, pc);
1970 break;
1971
1972 case TGSI_OPCODE_PUSHA:
1973 /* deprecated? */
1974 assert(0);
1975 return FALSE;
1976 break;
1977
1978 case TGSI_OPCODE_POPA:
1979 /* deprecated? */
1980 assert(0);
1981 return FALSE;
1982 break;
1983
1984 case TGSI_OPCODE_CEIL:
1985 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1986 tmp0 = emit_fetch( bld, inst, 0, chan_index );
1987 dst0[chan_index] = lp_build_ceil(&bld->base, tmp0);
1988 }
1989 break;
1990
1991 case TGSI_OPCODE_I2F:
1992 /* deprecated? */
1993 assert(0);
1994 return FALSE;
1995 break;
1996
1997 case TGSI_OPCODE_NOT:
1998 /* deprecated? */
1999 assert(0);
2000 return FALSE;
2001 break;
2002
2003 case TGSI_OPCODE_TRUNC:
2004 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2005 tmp0 = emit_fetch( bld, inst, 0, chan_index );
2006 dst0[chan_index] = lp_build_trunc(&bld->base, tmp0);
2007 }
2008 break;
2009
2010 case TGSI_OPCODE_SHL:
2011 /* deprecated? */
2012 assert(0);
2013 return FALSE;
2014 break;
2015
2016 case TGSI_OPCODE_ISHR:
2017 /* deprecated? */
2018 assert(0);
2019 return FALSE;
2020 break;
2021
2022 case TGSI_OPCODE_AND:
2023 /* deprecated? */
2024 assert(0);
2025 return FALSE;
2026 break;
2027
2028 case TGSI_OPCODE_OR:
2029 /* deprecated? */
2030 assert(0);
2031 return FALSE;
2032 break;
2033
2034 case TGSI_OPCODE_MOD:
2035 /* deprecated? */
2036 assert(0);
2037 return FALSE;
2038 break;
2039
2040 case TGSI_OPCODE_XOR:
2041 /* deprecated? */
2042 assert(0);
2043 return FALSE;
2044 break;
2045
2046 case TGSI_OPCODE_SAD:
2047 /* deprecated? */
2048 assert(0);
2049 return FALSE;
2050 break;
2051
2052 case TGSI_OPCODE_TXF:
2053 /* deprecated? */
2054 assert(0);
2055 return FALSE;
2056 break;
2057
2058 case TGSI_OPCODE_TXQ:
2059 /* deprecated? */
2060 assert(0);
2061 return FALSE;
2062 break;
2063
2064 case TGSI_OPCODE_CONT:
2065 lp_exec_continue(&bld->exec_mask);
2066 break;
2067
2068 case TGSI_OPCODE_EMIT:
2069 return FALSE;
2070 break;
2071
2072 case TGSI_OPCODE_ENDPRIM:
2073 return FALSE;
2074 break;
2075
2076 case TGSI_OPCODE_NOP:
2077 break;
2078
2079 default:
2080 return FALSE;
2081 }
2082
2083 if(info->num_dst) {
2084 LLVMValueRef pred[NUM_CHANNELS];
2085
2086 emit_fetch_predicate( bld, inst, pred );
2087
2088 FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
2089 emit_store( bld, inst, 0, chan_index, pred[chan_index], dst0[chan_index]);
2090 }
2091 }
2092
2093 return TRUE;
2094 }
2095
2096
2097 void
2098 lp_build_tgsi_soa(LLVMBuilderRef builder,
2099 const struct tgsi_token *tokens,
2100 struct lp_type type,
2101 struct lp_build_mask_context *mask,
2102 LLVMValueRef consts_ptr,
2103 const LLVMValueRef *pos,
2104 const LLVMValueRef (*inputs)[NUM_CHANNELS],
2105 LLVMValueRef (*outputs)[NUM_CHANNELS],
2106 struct lp_build_sampler_soa *sampler,
2107 const struct tgsi_shader_info *info)
2108 {
2109 struct lp_build_tgsi_soa_context bld;
2110 struct tgsi_parse_context parse;
2111 uint num_immediates = 0;
2112 uint num_instructions = 0;
2113 unsigned i;
2114 int pc = 0;
2115
2116 struct lp_type res_type;
2117
2118 assert(type.length <= LP_MAX_VECTOR_LENGTH);
2119 memset(&res_type, 0, sizeof res_type);
2120 res_type.width = type.width;
2121 res_type.length = type.length;
2122 res_type.sign = 1;
2123
2124 /* Setup build context */
2125 memset(&bld, 0, sizeof bld);
2126 lp_build_context_init(&bld.base, builder, type);
2127 lp_build_context_init(&bld.uint_bld, builder, lp_uint_type(type));
2128 bld.mask = mask;
2129 bld.pos = pos;
2130 bld.inputs = inputs;
2131 bld.outputs = outputs;
2132 bld.consts_ptr = consts_ptr;
2133 bld.sampler = sampler;
2134 bld.info = info;
2135 bld.indirect_files = info->indirect_files;
2136 bld.instructions = (struct tgsi_full_instruction *)
2137 MALLOC( LP_MAX_INSTRUCTIONS * sizeof(struct tgsi_full_instruction) );
2138 bld.max_instructions = LP_MAX_INSTRUCTIONS;
2139
2140 if (!bld.instructions) {
2141 return;
2142 }
2143
2144 lp_exec_mask_init(&bld.exec_mask, &bld.base);
2145
2146 tgsi_parse_init( &parse, tokens );
2147
2148 while( !tgsi_parse_end_of_tokens( &parse ) ) {
2149 tgsi_parse_token( &parse );
2150
2151 switch( parse.FullToken.Token.Type ) {
2152 case TGSI_TOKEN_TYPE_DECLARATION:
2153 /* Inputs already interpolated */
2154 emit_declaration( &bld, &parse.FullToken.FullDeclaration );
2155 break;
2156
2157 case TGSI_TOKEN_TYPE_INSTRUCTION:
2158 {
2159 /* save expanded instruction */
2160 if (num_instructions == bld.max_instructions) {
2161 struct tgsi_full_instruction *instructions;
2162 instructions = REALLOC(bld.instructions,
2163 bld.max_instructions
2164 * sizeof(struct tgsi_full_instruction),
2165 (bld.max_instructions + LP_MAX_INSTRUCTIONS)
2166 * sizeof(struct tgsi_full_instruction));
2167 if (!instructions) {
2168 break;
2169 }
2170 bld.instructions = instructions;
2171 bld.max_instructions += LP_MAX_INSTRUCTIONS;
2172 }
2173
2174 memcpy(bld.instructions + num_instructions,
2175 &parse.FullToken.FullInstruction,
2176 sizeof(bld.instructions[0]));
2177
2178 num_instructions++;
2179 }
2180
2181 break;
2182
2183 case TGSI_TOKEN_TYPE_IMMEDIATE:
2184 /* simply copy the immediate values into the next immediates[] slot */
2185 {
2186 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
2187 assert(size <= 4);
2188 assert(num_immediates < LP_MAX_TGSI_IMMEDIATES);
2189 for( i = 0; i < size; ++i )
2190 bld.immediates[num_immediates][i] =
2191 lp_build_const_vec(type, parse.FullToken.FullImmediate.u[i].Float);
2192 for( i = size; i < 4; ++i )
2193 bld.immediates[num_immediates][i] = bld.base.undef;
2194 num_immediates++;
2195 }
2196 break;
2197
2198 case TGSI_TOKEN_TYPE_PROPERTY:
2199 break;
2200
2201 default:
2202 assert( 0 );
2203 }
2204 }
2205
2206 while (pc != -1) {
2207 struct tgsi_full_instruction *instr = bld.instructions + pc;
2208 const struct tgsi_opcode_info *opcode_info =
2209 tgsi_get_opcode_info(instr->Instruction.Opcode);
2210 if (!emit_instruction( &bld, instr, opcode_info, &pc ))
2211 _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
2212 opcode_info->mnemonic);
2213 }
2214
2215 if (0) {
2216 LLVMBasicBlockRef block = LLVMGetInsertBlock(builder);
2217 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2218 debug_printf("11111111111111111111111111111 \n");
2219 tgsi_dump(tokens, 0);
2220 lp_debug_dump_value(function);
2221 debug_printf("2222222222222222222222222222 \n");
2222 }
2223 tgsi_parse_free( &parse );
2224
2225 if (0) {
2226 LLVMModuleRef module = LLVMGetGlobalParent(
2227 LLVMGetBasicBlockParent(LLVMGetInsertBlock(bld.base.builder)));
2228 LLVMDumpModule(module);
2229
2230 }
2231
2232 FREE( bld.instructions );
2233 }
2234