glsl_to_tgsi: remove handling of XPD opcode in compile_tgsi_instruction()
[mesa.git] / src / mesa / state_tracker / st_glsl_to_tgsi.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file glsl_to_tgsi.cpp
29 *
30 * Translate GLSL IR to TGSI.
31 */
32
33 #include <stdio.h>
34 #include "main/compiler.h"
35 #include "ir.h"
36 #include "ir_visitor.h"
37 #include "ir_print_visitor.h"
38 #include "ir_expression_flattening.h"
39 #include "glsl_types.h"
40 #include "glsl_parser_extras.h"
41 #include "../glsl/program.h"
42 #include "ir_optimization.h"
43 #include "ast.h"
44
45 extern "C" {
46 #include "main/mtypes.h"
47 #include "main/shaderapi.h"
48 #include "main/shaderobj.h"
49 #include "main/uniforms.h"
50 #include "program/hash_table.h"
51 #include "program/prog_instruction.h"
52 #include "program/prog_optimize.h"
53 #include "program/prog_print.h"
54 #include "program/program.h"
55 #include "program/prog_uniform.h"
56 #include "program/prog_parameter.h"
57 #include "program/sampler.h"
58
59 #include "pipe/p_compiler.h"
60 #include "pipe/p_context.h"
61 #include "pipe/p_screen.h"
62 #include "pipe/p_shader_tokens.h"
63 #include "pipe/p_state.h"
64 #include "util/u_math.h"
65 #include "tgsi/tgsi_ureg.h"
66 #include "tgsi/tgsi_info.h"
67 #include "st_context.h"
68 #include "st_program.h"
69 #include "st_glsl_to_tgsi.h"
70 #include "st_mesa_to_tgsi.h"
71 }
72
73 #define PROGRAM_ANY_CONST ((1 << PROGRAM_LOCAL_PARAM) | \
74 (1 << PROGRAM_ENV_PARAM) | \
75 (1 << PROGRAM_STATE_VAR) | \
76 (1 << PROGRAM_NAMED_PARAM) | \
77 (1 << PROGRAM_CONSTANT) | \
78 (1 << PROGRAM_UNIFORM))
79
80 class st_src_reg;
81 class st_dst_reg;
82
83 static int swizzle_for_size(int size);
84
85 /**
86 * This struct is a corresponding struct to TGSI ureg_src.
87 */
88 class st_src_reg {
89 public:
90 st_src_reg(gl_register_file file, int index, const glsl_type *type)
91 {
92 this->file = file;
93 this->index = index;
94 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
95 this->swizzle = swizzle_for_size(type->vector_elements);
96 else
97 this->swizzle = SWIZZLE_XYZW;
98 this->negate = 0;
99 this->type = type ? type->base_type : GLSL_TYPE_ERROR;
100 this->reladdr = NULL;
101 }
102
103 st_src_reg(gl_register_file file, int index, int type)
104 {
105 this->type = type;
106 this->file = file;
107 this->index = index;
108 this->swizzle = SWIZZLE_XYZW;
109 this->negate = 0;
110 this->reladdr = NULL;
111 }
112
113 st_src_reg()
114 {
115 this->type = GLSL_TYPE_ERROR;
116 this->file = PROGRAM_UNDEFINED;
117 this->index = 0;
118 this->swizzle = 0;
119 this->negate = 0;
120 this->reladdr = NULL;
121 }
122
123 explicit st_src_reg(st_dst_reg reg);
124
125 gl_register_file file; /**< PROGRAM_* from Mesa */
126 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
127 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
128 int negate; /**< NEGATE_XYZW mask from mesa */
129 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
130 /** Register index should be offset by the integer in this reg. */
131 st_src_reg *reladdr;
132 };
133
134 class st_dst_reg {
135 public:
136 st_dst_reg(gl_register_file file, int writemask, int type)
137 {
138 this->file = file;
139 this->index = 0;
140 this->writemask = writemask;
141 this->cond_mask = COND_TR;
142 this->reladdr = NULL;
143 this->type = type;
144 }
145
146 st_dst_reg()
147 {
148 this->type = GLSL_TYPE_ERROR;
149 this->file = PROGRAM_UNDEFINED;
150 this->index = 0;
151 this->writemask = 0;
152 this->cond_mask = COND_TR;
153 this->reladdr = NULL;
154 }
155
156 explicit st_dst_reg(st_src_reg reg);
157
158 gl_register_file file; /**< PROGRAM_* from Mesa */
159 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
160 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
161 GLuint cond_mask:4;
162 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
163 /** Register index should be offset by the integer in this reg. */
164 st_src_reg *reladdr;
165 };
166
167 st_src_reg::st_src_reg(st_dst_reg reg)
168 {
169 this->type = reg.type;
170 this->file = reg.file;
171 this->index = reg.index;
172 this->swizzle = SWIZZLE_XYZW;
173 this->negate = 0;
174 this->reladdr = NULL;
175 }
176
177 st_dst_reg::st_dst_reg(st_src_reg reg)
178 {
179 this->type = reg.type;
180 this->file = reg.file;
181 this->index = reg.index;
182 this->writemask = WRITEMASK_XYZW;
183 this->cond_mask = COND_TR;
184 this->reladdr = reg.reladdr;
185 }
186
187 class glsl_to_tgsi_instruction : public exec_node {
188 public:
189 /* Callers of this ralloc-based new need not call delete. It's
190 * easier to just ralloc_free 'ctx' (or any of its ancestors). */
191 static void* operator new(size_t size, void *ctx)
192 {
193 void *node;
194
195 node = rzalloc_size(ctx, size);
196 assert(node != NULL);
197
198 return node;
199 }
200
201 unsigned op;
202 st_dst_reg dst;
203 st_src_reg src[3];
204 /** Pointer to the ir source this tree came from for debugging */
205 ir_instruction *ir;
206 GLboolean cond_update;
207 bool saturate;
208 int sampler; /**< sampler index */
209 int tex_target; /**< One of TEXTURE_*_INDEX */
210 GLboolean tex_shadow;
211
212 class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */
213 };
214
215 class variable_storage : public exec_node {
216 public:
217 variable_storage(ir_variable *var, gl_register_file file, int index)
218 : file(file), index(index), var(var)
219 {
220 /* empty */
221 }
222
223 gl_register_file file;
224 int index;
225 ir_variable *var; /* variable that maps to this, if any */
226 };
227
228 class function_entry : public exec_node {
229 public:
230 ir_function_signature *sig;
231
232 /**
233 * identifier of this function signature used by the program.
234 *
235 * At the point that Mesa instructions for function calls are
236 * generated, we don't know the address of the first instruction of
237 * the function body. So we make the BranchTarget that is called a
238 * small integer and rewrite them during set_branchtargets().
239 */
240 int sig_id;
241
242 /**
243 * Pointer to first instruction of the function body.
244 *
245 * Set during function body emits after main() is processed.
246 */
247 glsl_to_tgsi_instruction *bgn_inst;
248
249 /**
250 * Index of the first instruction of the function body in actual
251 * Mesa IR.
252 *
253 * Set after convertion from glsl_to_tgsi_instruction to prog_instruction.
254 */
255 int inst;
256
257 /** Storage for the return value. */
258 st_src_reg return_reg;
259 };
260
261 class glsl_to_tgsi_visitor : public ir_visitor {
262 public:
263 glsl_to_tgsi_visitor();
264 ~glsl_to_tgsi_visitor();
265
266 function_entry *current_function;
267
268 struct gl_context *ctx;
269 struct gl_program *prog;
270 struct gl_shader_program *shader_program;
271 struct gl_shader_compiler_options *options;
272
273 int next_temp;
274
275 int num_address_regs;
276 int samplers_used;
277 bool indirect_addr_temps;
278 bool indirect_addr_consts;
279
280 int glsl_version;
281
282 variable_storage *find_variable_storage(ir_variable *var);
283
284 function_entry *get_function_signature(ir_function_signature *sig);
285
286 st_src_reg get_temp(const glsl_type *type);
287 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr);
288
289 st_src_reg st_src_reg_for_float(float val);
290 st_src_reg st_src_reg_for_int(int val);
291 st_src_reg st_src_reg_for_type(int type, int val);
292
293 /**
294 * \name Visit methods
295 *
296 * As typical for the visitor pattern, there must be one \c visit method for
297 * each concrete subclass of \c ir_instruction. Virtual base classes within
298 * the hierarchy should not have \c visit methods.
299 */
300 /*@{*/
301 virtual void visit(ir_variable *);
302 virtual void visit(ir_loop *);
303 virtual void visit(ir_loop_jump *);
304 virtual void visit(ir_function_signature *);
305 virtual void visit(ir_function *);
306 virtual void visit(ir_expression *);
307 virtual void visit(ir_swizzle *);
308 virtual void visit(ir_dereference_variable *);
309 virtual void visit(ir_dereference_array *);
310 virtual void visit(ir_dereference_record *);
311 virtual void visit(ir_assignment *);
312 virtual void visit(ir_constant *);
313 virtual void visit(ir_call *);
314 virtual void visit(ir_return *);
315 virtual void visit(ir_discard *);
316 virtual void visit(ir_texture *);
317 virtual void visit(ir_if *);
318 /*@}*/
319
320 st_src_reg result;
321
322 /** List of variable_storage */
323 exec_list variables;
324
325 /** List of function_entry */
326 exec_list function_signatures;
327 int next_signature_id;
328
329 /** List of glsl_to_tgsi_instruction */
330 exec_list instructions;
331
332 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op);
333
334 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
335 st_dst_reg dst, st_src_reg src0);
336
337 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
338 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
339
340 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
341 st_dst_reg dst,
342 st_src_reg src0, st_src_reg src1, st_src_reg src2);
343
344 unsigned get_opcode(ir_instruction *ir, unsigned op,
345 st_dst_reg dst,
346 st_src_reg src0, st_src_reg src1);
347
348 /**
349 * Emit the correct dot-product instruction for the type of arguments
350 */
351 void emit_dp(ir_instruction *ir,
352 st_dst_reg dst,
353 st_src_reg src0,
354 st_src_reg src1,
355 unsigned elements);
356
357 void emit_scalar(ir_instruction *ir, unsigned op,
358 st_dst_reg dst, st_src_reg src0);
359
360 void emit_scalar(ir_instruction *ir, unsigned op,
361 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
362
363 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0);
364
365 void emit_scs(ir_instruction *ir, unsigned op,
366 st_dst_reg dst, const st_src_reg &src);
367
368 GLboolean try_emit_mad(ir_expression *ir,
369 int mul_operand);
370 GLboolean try_emit_sat(ir_expression *ir);
371
372 void emit_swz(ir_expression *ir);
373
374 bool process_move_condition(ir_rvalue *ir);
375
376 void remove_output_reads(gl_register_file type);
377 void simplify_cmp(void);
378
379 void rename_temp_register(int index, int new_index);
380 int get_first_temp_read(int index);
381 int get_first_temp_write(int index);
382 int get_last_temp_read(int index);
383 int get_last_temp_write(int index);
384
385 void copy_propagate(void);
386 void eliminate_dead_code(void);
387 void merge_registers(void);
388 void renumber_registers(void);
389
390 void *mem_ctx;
391 };
392
393 static st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR);
394
395 static st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR);
396
397 static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X, GLSL_TYPE_FLOAT);
398
399 static void
400 fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3);
401
402 static void
403 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
404 {
405 va_list args;
406 va_start(args, fmt);
407 ralloc_vasprintf_append(&prog->InfoLog, fmt, args);
408 va_end(args);
409
410 prog->LinkStatus = GL_FALSE;
411 }
412
413 static int
414 swizzle_for_size(int size)
415 {
416 int size_swizzles[4] = {
417 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
418 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
419 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
420 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
421 };
422
423 assert((size >= 1) && (size <= 4));
424 return size_swizzles[size - 1];
425 }
426
427 static bool
428 is_tex_instruction(unsigned opcode)
429 {
430 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
431 return info->is_tex;
432 }
433
434 static unsigned
435 num_inst_dst_regs(unsigned opcode)
436 {
437 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
438 return info->num_dst;
439 }
440
441 static unsigned
442 num_inst_src_regs(unsigned opcode)
443 {
444 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
445 return info->is_tex ? info->num_src - 1 : info->num_src;
446 }
447
448 glsl_to_tgsi_instruction *
449 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
450 st_dst_reg dst,
451 st_src_reg src0, st_src_reg src1, st_src_reg src2)
452 {
453 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction();
454 int num_reladdr = 0, i;
455
456 op = get_opcode(ir, op, dst, src0, src1);
457
458 /* If we have to do relative addressing, we want to load the ARL
459 * reg directly for one of the regs, and preload the other reladdr
460 * sources into temps.
461 */
462 num_reladdr += dst.reladdr != NULL;
463 num_reladdr += src0.reladdr != NULL;
464 num_reladdr += src1.reladdr != NULL;
465 num_reladdr += src2.reladdr != NULL;
466
467 reladdr_to_temp(ir, &src2, &num_reladdr);
468 reladdr_to_temp(ir, &src1, &num_reladdr);
469 reladdr_to_temp(ir, &src0, &num_reladdr);
470
471 if (dst.reladdr) {
472 emit_arl(ir, address_reg, *dst.reladdr);
473 num_reladdr--;
474 }
475 assert(num_reladdr == 0);
476
477 inst->op = op;
478 inst->dst = dst;
479 inst->src[0] = src0;
480 inst->src[1] = src1;
481 inst->src[2] = src2;
482 inst->ir = ir;
483
484 inst->function = NULL;
485
486 if (op == TGSI_OPCODE_ARL)
487 this->num_address_regs = 1;
488
489 /* Update indirect addressing status used by TGSI */
490 if (dst.reladdr) {
491 switch(dst.file) {
492 case PROGRAM_TEMPORARY:
493 this->indirect_addr_temps = true;
494 break;
495 case PROGRAM_LOCAL_PARAM:
496 case PROGRAM_ENV_PARAM:
497 case PROGRAM_STATE_VAR:
498 case PROGRAM_NAMED_PARAM:
499 case PROGRAM_CONSTANT:
500 case PROGRAM_UNIFORM:
501 this->indirect_addr_consts = true;
502 break;
503 default:
504 break;
505 }
506 }
507 else {
508 for (i=0; i<3; i++) {
509 if(inst->src[i].reladdr) {
510 switch(inst->src[i].file) {
511 case PROGRAM_TEMPORARY:
512 this->indirect_addr_temps = true;
513 break;
514 case PROGRAM_LOCAL_PARAM:
515 case PROGRAM_ENV_PARAM:
516 case PROGRAM_STATE_VAR:
517 case PROGRAM_NAMED_PARAM:
518 case PROGRAM_CONSTANT:
519 case PROGRAM_UNIFORM:
520 this->indirect_addr_consts = true;
521 break;
522 default:
523 break;
524 }
525 }
526 }
527 }
528
529 this->instructions.push_tail(inst);
530
531 return inst;
532 }
533
534
535 glsl_to_tgsi_instruction *
536 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
537 st_dst_reg dst, st_src_reg src0, st_src_reg src1)
538 {
539 return emit(ir, op, dst, src0, src1, undef_src);
540 }
541
542 glsl_to_tgsi_instruction *
543 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
544 st_dst_reg dst, st_src_reg src0)
545 {
546 assert(dst.writemask != 0);
547 return emit(ir, op, dst, src0, undef_src, undef_src);
548 }
549
550 glsl_to_tgsi_instruction *
551 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op)
552 {
553 return emit(ir, op, undef_dst, undef_src, undef_src, undef_src);
554 }
555
556 /**
557 * Determines whether to use an integer, unsigned integer, or float opcode
558 * based on the operands and input opcode, then emits the result.
559 *
560 * TODO: type checking for remaining TGSI opcodes
561 */
562 unsigned
563 glsl_to_tgsi_visitor::get_opcode(ir_instruction *ir, unsigned op,
564 st_dst_reg dst,
565 st_src_reg src0, st_src_reg src1)
566 {
567 int type = GLSL_TYPE_FLOAT;
568
569 if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
570 type = GLSL_TYPE_FLOAT;
571 else if (glsl_version >= 130)
572 type = src0.type;
573
574 #define case4(c, f, i, u) \
575 case TGSI_OPCODE_##c: \
576 if (type == GLSL_TYPE_INT) op = TGSI_OPCODE_##i; \
577 else if (type == GLSL_TYPE_UINT) op = TGSI_OPCODE_##u; \
578 else op = TGSI_OPCODE_##f; \
579 break;
580 #define case3(f, i, u) case4(f, f, i, u)
581 #define case2fi(f, i) case4(f, f, i, i)
582 #define case2iu(i, u) case4(i, LAST, i, u)
583
584 switch(op) {
585 case2fi(ADD, UADD);
586 case2fi(MUL, UMUL);
587 case2fi(MAD, UMAD);
588 case3(DIV, IDIV, UDIV);
589 case3(MAX, IMAX, UMAX);
590 case3(MIN, IMIN, UMIN);
591 case2iu(MOD, UMOD);
592
593 case2fi(SEQ, USEQ);
594 case2fi(SNE, USNE);
595 case3(SGE, ISGE, USGE);
596 case3(SLT, ISLT, USLT);
597
598 case2iu(SHL, SHL);
599 case2iu(ISHR, USHR);
600 case2iu(NOT, NOT);
601 case2iu(AND, AND);
602 case2iu(OR, OR);
603 case2iu(XOR, XOR);
604
605 default: break;
606 }
607
608 assert(op != TGSI_OPCODE_LAST);
609 return op;
610 }
611
612 void
613 glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir,
614 st_dst_reg dst, st_src_reg src0, st_src_reg src1,
615 unsigned elements)
616 {
617 static const unsigned dot_opcodes[] = {
618 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4
619 };
620
621 emit(ir, dot_opcodes[elements - 2], dst, src0, src1);
622 }
623
624 /**
625 * Emits TGSI scalar opcodes to produce unique answers across channels.
626 *
627 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
628 * channel determines the result across all channels. So to do a vec4
629 * of this operation, we want to emit a scalar per source channel used
630 * to produce dest channels.
631 */
632 void
633 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
634 st_dst_reg dst,
635 st_src_reg orig_src0, st_src_reg orig_src1)
636 {
637 int i, j;
638 int done_mask = ~dst.writemask;
639
640 /* TGSI RCP is a scalar operation splatting results to all channels,
641 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
642 * dst channels.
643 */
644 for (i = 0; i < 4; i++) {
645 GLuint this_mask = (1 << i);
646 glsl_to_tgsi_instruction *inst;
647 st_src_reg src0 = orig_src0;
648 st_src_reg src1 = orig_src1;
649
650 if (done_mask & this_mask)
651 continue;
652
653 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
654 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
655 for (j = i + 1; j < 4; j++) {
656 /* If there is another enabled component in the destination that is
657 * derived from the same inputs, generate its value on this pass as
658 * well.
659 */
660 if (!(done_mask & (1 << j)) &&
661 GET_SWZ(src0.swizzle, j) == src0_swiz &&
662 GET_SWZ(src1.swizzle, j) == src1_swiz) {
663 this_mask |= (1 << j);
664 }
665 }
666 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
667 src0_swiz, src0_swiz);
668 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
669 src1_swiz, src1_swiz);
670
671 inst = emit(ir, op, dst, src0, src1);
672 inst->dst.writemask = this_mask;
673 done_mask |= this_mask;
674 }
675 }
676
677 void
678 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
679 st_dst_reg dst, st_src_reg src0)
680 {
681 st_src_reg undef = undef_src;
682
683 undef.swizzle = SWIZZLE_XXXX;
684
685 emit_scalar(ir, op, dst, src0, undef);
686 }
687
688 void
689 glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir,
690 st_dst_reg dst, st_src_reg src0)
691 {
692 st_src_reg tmp = get_temp(glsl_type::float_type);
693
694 if (src0.type == GLSL_TYPE_INT)
695 emit(ir, TGSI_OPCODE_I2F, st_dst_reg(tmp), src0);
696 else if (src0.type == GLSL_TYPE_UINT)
697 emit(ir, TGSI_OPCODE_U2F, st_dst_reg(tmp), src0);
698 else
699 tmp = src0;
700
701 emit(ir, TGSI_OPCODE_ARL, dst, tmp);
702 }
703
704 /**
705 * Emit an TGSI_OPCODE_SCS instruction
706 *
707 * The \c SCS opcode functions a bit differently than the other TGSI opcodes.
708 * Instead of splatting its result across all four components of the
709 * destination, it writes one value to the \c x component and another value to
710 * the \c y component.
711 *
712 * \param ir IR instruction being processed
713 * \param op Either \c TGSI_OPCODE_SIN or \c TGSI_OPCODE_COS depending
714 * on which value is desired.
715 * \param dst Destination register
716 * \param src Source register
717 */
718 void
719 glsl_to_tgsi_visitor::emit_scs(ir_instruction *ir, unsigned op,
720 st_dst_reg dst,
721 const st_src_reg &src)
722 {
723 /* Vertex programs cannot use the SCS opcode.
724 */
725 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) {
726 emit_scalar(ir, op, dst, src);
727 return;
728 }
729
730 const unsigned component = (op == TGSI_OPCODE_SIN) ? 0 : 1;
731 const unsigned scs_mask = (1U << component);
732 int done_mask = ~dst.writemask;
733 st_src_reg tmp;
734
735 assert(op == TGSI_OPCODE_SIN || op == TGSI_OPCODE_COS);
736
737 /* If there are compnents in the destination that differ from the component
738 * that will be written by the SCS instrution, we'll need a temporary.
739 */
740 if (scs_mask != unsigned(dst.writemask)) {
741 tmp = get_temp(glsl_type::vec4_type);
742 }
743
744 for (unsigned i = 0; i < 4; i++) {
745 unsigned this_mask = (1U << i);
746 st_src_reg src0 = src;
747
748 if ((done_mask & this_mask) != 0)
749 continue;
750
751 /* The source swizzle specified which component of the source generates
752 * sine / cosine for the current component in the destination. The SCS
753 * instruction requires that this value be swizzle to the X component.
754 * Replace the current swizzle with a swizzle that puts the source in
755 * the X component.
756 */
757 unsigned src0_swiz = GET_SWZ(src.swizzle, i);
758
759 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
760 src0_swiz, src0_swiz);
761 for (unsigned j = i + 1; j < 4; j++) {
762 /* If there is another enabled component in the destination that is
763 * derived from the same inputs, generate its value on this pass as
764 * well.
765 */
766 if (!(done_mask & (1 << j)) &&
767 GET_SWZ(src0.swizzle, j) == src0_swiz) {
768 this_mask |= (1 << j);
769 }
770 }
771
772 if (this_mask != scs_mask) {
773 glsl_to_tgsi_instruction *inst;
774 st_dst_reg tmp_dst = st_dst_reg(tmp);
775
776 /* Emit the SCS instruction.
777 */
778 inst = emit(ir, TGSI_OPCODE_SCS, tmp_dst, src0);
779 inst->dst.writemask = scs_mask;
780
781 /* Move the result of the SCS instruction to the desired location in
782 * the destination.
783 */
784 tmp.swizzle = MAKE_SWIZZLE4(component, component,
785 component, component);
786 inst = emit(ir, TGSI_OPCODE_SCS, dst, tmp);
787 inst->dst.writemask = this_mask;
788 } else {
789 /* Emit the SCS instruction to write directly to the destination.
790 */
791 glsl_to_tgsi_instruction *inst = emit(ir, TGSI_OPCODE_SCS, dst, src0);
792 inst->dst.writemask = scs_mask;
793 }
794
795 done_mask |= this_mask;
796 }
797 }
798
799 struct st_src_reg
800 glsl_to_tgsi_visitor::st_src_reg_for_float(float val)
801 {
802 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_FLOAT);
803 union gl_constant_value uval;
804
805 uval.f = val;
806 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
807 &uval, 1, GL_FLOAT, &src.swizzle);
808
809 return src;
810 }
811
812 struct st_src_reg
813 glsl_to_tgsi_visitor::st_src_reg_for_int(int val)
814 {
815 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_INT);
816 union gl_constant_value uval;
817
818 assert(glsl_version >= 130);
819
820 uval.i = val;
821 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
822 &uval, 1, GL_INT, &src.swizzle);
823
824 return src;
825 }
826
827 struct st_src_reg
828 glsl_to_tgsi_visitor::st_src_reg_for_type(int type, int val)
829 {
830 if (glsl_version >= 130)
831 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) :
832 st_src_reg_for_int(val);
833 else
834 return st_src_reg_for_float(val);
835 }
836
837 static int
838 type_size(const struct glsl_type *type)
839 {
840 unsigned int i;
841 int size;
842
843 switch (type->base_type) {
844 case GLSL_TYPE_UINT:
845 case GLSL_TYPE_INT:
846 case GLSL_TYPE_FLOAT:
847 case GLSL_TYPE_BOOL:
848 if (type->is_matrix()) {
849 return type->matrix_columns;
850 } else {
851 /* Regardless of size of vector, it gets a vec4. This is bad
852 * packing for things like floats, but otherwise arrays become a
853 * mess. Hopefully a later pass over the code can pack scalars
854 * down if appropriate.
855 */
856 return 1;
857 }
858 case GLSL_TYPE_ARRAY:
859 assert(type->length > 0);
860 return type_size(type->fields.array) * type->length;
861 case GLSL_TYPE_STRUCT:
862 size = 0;
863 for (i = 0; i < type->length; i++) {
864 size += type_size(type->fields.structure[i].type);
865 }
866 return size;
867 case GLSL_TYPE_SAMPLER:
868 /* Samplers take up one slot in UNIFORMS[], but they're baked in
869 * at link time.
870 */
871 return 1;
872 default:
873 assert(0);
874 return 0;
875 }
876 }
877
878 /**
879 * In the initial pass of codegen, we assign temporary numbers to
880 * intermediate results. (not SSA -- variable assignments will reuse
881 * storage).
882 */
883 st_src_reg
884 glsl_to_tgsi_visitor::get_temp(const glsl_type *type)
885 {
886 st_src_reg src;
887 int swizzle[4];
888 int i;
889
890 src.type = glsl_version >= 130 ? type->base_type : GLSL_TYPE_FLOAT;
891 src.file = PROGRAM_TEMPORARY;
892 src.index = next_temp;
893 src.reladdr = NULL;
894 next_temp += type_size(type);
895
896 if (type->is_array() || type->is_record()) {
897 src.swizzle = SWIZZLE_NOOP;
898 } else {
899 for (i = 0; i < type->vector_elements; i++)
900 swizzle[i] = i;
901 for (; i < 4; i++)
902 swizzle[i] = type->vector_elements - 1;
903 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
904 swizzle[2], swizzle[3]);
905 }
906 src.negate = 0;
907
908 return src;
909 }
910
911 variable_storage *
912 glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var)
913 {
914
915 variable_storage *entry;
916
917 foreach_iter(exec_list_iterator, iter, this->variables) {
918 entry = (variable_storage *)iter.get();
919
920 if (entry->var == var)
921 return entry;
922 }
923
924 return NULL;
925 }
926
927 void
928 glsl_to_tgsi_visitor::visit(ir_variable *ir)
929 {
930 if (strcmp(ir->name, "gl_FragCoord") == 0) {
931 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
932
933 fp->OriginUpperLeft = ir->origin_upper_left;
934 fp->PixelCenterInteger = ir->pixel_center_integer;
935
936 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
937 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
938 switch (ir->depth_layout) {
939 case ir_depth_layout_none:
940 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
941 break;
942 case ir_depth_layout_any:
943 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
944 break;
945 case ir_depth_layout_greater:
946 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
947 break;
948 case ir_depth_layout_less:
949 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
950 break;
951 case ir_depth_layout_unchanged:
952 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
953 break;
954 default:
955 assert(0);
956 break;
957 }
958 }
959
960 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
961 unsigned int i;
962 const ir_state_slot *const slots = ir->state_slots;
963 assert(ir->state_slots != NULL);
964
965 /* Check if this statevar's setup in the STATE file exactly
966 * matches how we'll want to reference it as a
967 * struct/array/whatever. If not, then we need to move it into
968 * temporary storage and hope that it'll get copy-propagated
969 * out.
970 */
971 for (i = 0; i < ir->num_state_slots; i++) {
972 if (slots[i].swizzle != SWIZZLE_XYZW) {
973 break;
974 }
975 }
976
977 struct variable_storage *storage;
978 st_dst_reg dst;
979 if (i == ir->num_state_slots) {
980 /* We'll set the index later. */
981 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
982 this->variables.push_tail(storage);
983
984 dst = undef_dst;
985 } else {
986 /* The variable_storage constructor allocates slots based on the size
987 * of the type. However, this had better match the number of state
988 * elements that we're going to copy into the new temporary.
989 */
990 assert((int) ir->num_state_slots == type_size(ir->type));
991
992 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY,
993 this->next_temp);
994 this->variables.push_tail(storage);
995 this->next_temp += type_size(ir->type);
996
997 dst = st_dst_reg(st_src_reg(PROGRAM_TEMPORARY, storage->index,
998 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT));
999 }
1000
1001
1002 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
1003 int index = _mesa_add_state_reference(this->prog->Parameters,
1004 (gl_state_index *)slots[i].tokens);
1005
1006 if (storage->file == PROGRAM_STATE_VAR) {
1007 if (storage->index == -1) {
1008 storage->index = index;
1009 } else {
1010 assert(index == storage->index + (int)i);
1011 }
1012 } else {
1013 st_src_reg src(PROGRAM_STATE_VAR, index,
1014 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT);
1015 src.swizzle = slots[i].swizzle;
1016 emit(ir, TGSI_OPCODE_MOV, dst, src);
1017 /* even a float takes up a whole vec4 reg in a struct/array. */
1018 dst.index++;
1019 }
1020 }
1021
1022 if (storage->file == PROGRAM_TEMPORARY &&
1023 dst.index != storage->index + (int) ir->num_state_slots) {
1024 fail_link(this->shader_program,
1025 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1026 ir->name, dst.index - storage->index,
1027 type_size(ir->type));
1028 }
1029 }
1030 }
1031
1032 void
1033 glsl_to_tgsi_visitor::visit(ir_loop *ir)
1034 {
1035 ir_dereference_variable *counter = NULL;
1036
1037 if (ir->counter != NULL)
1038 counter = new(ir) ir_dereference_variable(ir->counter);
1039
1040 if (ir->from != NULL) {
1041 assert(ir->counter != NULL);
1042
1043 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL);
1044
1045 a->accept(this);
1046 delete a;
1047 }
1048
1049 emit(NULL, TGSI_OPCODE_BGNLOOP);
1050
1051 if (ir->to) {
1052 ir_expression *e =
1053 new(ir) ir_expression(ir->cmp, glsl_type::bool_type,
1054 counter, ir->to);
1055 ir_if *if_stmt = new(ir) ir_if(e);
1056
1057 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
1058
1059 if_stmt->then_instructions.push_tail(brk);
1060
1061 if_stmt->accept(this);
1062
1063 delete if_stmt;
1064 delete e;
1065 delete brk;
1066 }
1067
1068 visit_exec_list(&ir->body_instructions, this);
1069
1070 if (ir->increment) {
1071 ir_expression *e =
1072 new(ir) ir_expression(ir_binop_add, counter->type,
1073 counter, ir->increment);
1074
1075 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL);
1076
1077 a->accept(this);
1078 delete a;
1079 delete e;
1080 }
1081
1082 emit(NULL, TGSI_OPCODE_ENDLOOP);
1083 }
1084
1085 void
1086 glsl_to_tgsi_visitor::visit(ir_loop_jump *ir)
1087 {
1088 switch (ir->mode) {
1089 case ir_loop_jump::jump_break:
1090 emit(NULL, TGSI_OPCODE_BRK);
1091 break;
1092 case ir_loop_jump::jump_continue:
1093 emit(NULL, TGSI_OPCODE_CONT);
1094 break;
1095 }
1096 }
1097
1098
1099 void
1100 glsl_to_tgsi_visitor::visit(ir_function_signature *ir)
1101 {
1102 assert(0);
1103 (void)ir;
1104 }
1105
1106 void
1107 glsl_to_tgsi_visitor::visit(ir_function *ir)
1108 {
1109 /* Ignore function bodies other than main() -- we shouldn't see calls to
1110 * them since they should all be inlined before we get to glsl_to_tgsi.
1111 */
1112 if (strcmp(ir->name, "main") == 0) {
1113 const ir_function_signature *sig;
1114 exec_list empty;
1115
1116 sig = ir->matching_signature(&empty);
1117
1118 assert(sig);
1119
1120 foreach_iter(exec_list_iterator, iter, sig->body) {
1121 ir_instruction *ir = (ir_instruction *)iter.get();
1122
1123 ir->accept(this);
1124 }
1125 }
1126 }
1127
1128 GLboolean
1129 glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
1130 {
1131 int nonmul_operand = 1 - mul_operand;
1132 st_src_reg a, b, c;
1133
1134 ir_expression *expr = ir->operands[mul_operand]->as_expression();
1135 if (!expr || expr->operation != ir_binop_mul)
1136 return false;
1137
1138 expr->operands[0]->accept(this);
1139 a = this->result;
1140 expr->operands[1]->accept(this);
1141 b = this->result;
1142 ir->operands[nonmul_operand]->accept(this);
1143 c = this->result;
1144
1145 this->result = get_temp(ir->type);
1146 emit(ir, TGSI_OPCODE_MAD, st_dst_reg(this->result), a, b, c);
1147
1148 return true;
1149 }
1150
1151 GLboolean
1152 glsl_to_tgsi_visitor::try_emit_sat(ir_expression *ir)
1153 {
1154 /* Saturates were only introduced to vertex programs in
1155 * NV_vertex_program3, so don't give them to drivers in the VP.
1156 */
1157 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB)
1158 return false;
1159
1160 ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1161 if (!sat_src)
1162 return false;
1163
1164 sat_src->accept(this);
1165 st_src_reg src = this->result;
1166
1167 this->result = get_temp(ir->type);
1168 glsl_to_tgsi_instruction *inst;
1169 inst = emit(ir, TGSI_OPCODE_MOV, st_dst_reg(this->result), src);
1170 inst->saturate = true;
1171
1172 return true;
1173 }
1174
1175 void
1176 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir,
1177 st_src_reg *reg, int *num_reladdr)
1178 {
1179 if (!reg->reladdr)
1180 return;
1181
1182 emit_arl(ir, address_reg, *reg->reladdr);
1183
1184 if (*num_reladdr != 1) {
1185 st_src_reg temp = get_temp(glsl_type::vec4_type);
1186
1187 emit(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg);
1188 *reg = temp;
1189 }
1190
1191 (*num_reladdr)--;
1192 }
1193
1194 void
1195 glsl_to_tgsi_visitor::visit(ir_expression *ir)
1196 {
1197 unsigned int operand;
1198 st_src_reg op[Elements(ir->operands)];
1199 st_src_reg result_src;
1200 st_dst_reg result_dst;
1201
1202 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1203 */
1204 if (ir->operation == ir_binop_add) {
1205 if (try_emit_mad(ir, 1))
1206 return;
1207 if (try_emit_mad(ir, 0))
1208 return;
1209 }
1210 if (try_emit_sat(ir))
1211 return;
1212
1213 if (ir->operation == ir_quadop_vector)
1214 assert(!"ir_quadop_vector should have been lowered");
1215
1216 for (operand = 0; operand < ir->get_num_operands(); operand++) {
1217 this->result.file = PROGRAM_UNDEFINED;
1218 ir->operands[operand]->accept(this);
1219 if (this->result.file == PROGRAM_UNDEFINED) {
1220 ir_print_visitor v;
1221 printf("Failed to get tree for expression operand:\n");
1222 ir->operands[operand]->accept(&v);
1223 exit(1);
1224 }
1225 op[operand] = this->result;
1226
1227 /* Matrix expression operands should have been broken down to vector
1228 * operations already.
1229 */
1230 assert(!ir->operands[operand]->type->is_matrix());
1231 }
1232
1233 int vector_elements = ir->operands[0]->type->vector_elements;
1234 if (ir->operands[1]) {
1235 vector_elements = MAX2(vector_elements,
1236 ir->operands[1]->type->vector_elements);
1237 }
1238
1239 this->result.file = PROGRAM_UNDEFINED;
1240
1241 /* Storage for our result. Ideally for an assignment we'd be using
1242 * the actual storage for the result here, instead.
1243 */
1244 result_src = get_temp(ir->type);
1245 /* convenience for the emit functions below. */
1246 result_dst = st_dst_reg(result_src);
1247 /* Limit writes to the channels that will be used by result_src later.
1248 * This does limit this temp's use as a temporary for multi-instruction
1249 * sequences.
1250 */
1251 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1252
1253 switch (ir->operation) {
1254 case ir_unop_logic_not:
1255 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], st_src_reg_for_type(result_dst.type, 0));
1256 break;
1257 case ir_unop_neg:
1258 assert(result_dst.type == GLSL_TYPE_FLOAT || result_dst.type == GLSL_TYPE_INT);
1259 if (result_dst.type == GLSL_TYPE_INT)
1260 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]);
1261 else {
1262 op[0].negate = ~op[0].negate;
1263 result_src = op[0];
1264 }
1265 break;
1266 case ir_unop_abs:
1267 assert(result_dst.type == GLSL_TYPE_FLOAT);
1268 emit(ir, TGSI_OPCODE_ABS, result_dst, op[0]);
1269 break;
1270 case ir_unop_sign:
1271 emit(ir, TGSI_OPCODE_SSG, result_dst, op[0]);
1272 break;
1273 case ir_unop_rcp:
1274 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]);
1275 break;
1276
1277 case ir_unop_exp2:
1278 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]);
1279 break;
1280 case ir_unop_exp:
1281 case ir_unop_log:
1282 assert(!"not reached: should be handled by ir_explog_to_explog2");
1283 break;
1284 case ir_unop_log2:
1285 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]);
1286 break;
1287 case ir_unop_sin:
1288 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1289 break;
1290 case ir_unop_cos:
1291 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1292 break;
1293 case ir_unop_sin_reduced:
1294 emit_scs(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1295 break;
1296 case ir_unop_cos_reduced:
1297 emit_scs(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1298 break;
1299
1300 case ir_unop_dFdx:
1301 emit(ir, TGSI_OPCODE_DDX, result_dst, op[0]);
1302 break;
1303 case ir_unop_dFdy:
1304 op[0].negate = ~op[0].negate;
1305 emit(ir, TGSI_OPCODE_DDY, result_dst, op[0]);
1306 break;
1307
1308 case ir_unop_noise: {
1309 /* At some point, a motivated person could add a better
1310 * implementation of noise. Currently not even the nvidia
1311 * binary drivers do anything more than this. In any case, the
1312 * place to do this is in the GL state tracker, not the poor
1313 * driver.
1314 */
1315 emit(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5));
1316 break;
1317 }
1318
1319 case ir_binop_add:
1320 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1321 break;
1322 case ir_binop_sub:
1323 emit(ir, TGSI_OPCODE_SUB, result_dst, op[0], op[1]);
1324 break;
1325
1326 case ir_binop_mul:
1327 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1328 break;
1329 case ir_binop_div:
1330 if (result_dst.type == GLSL_TYPE_FLOAT)
1331 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1332 else
1333 emit(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]);
1334 break;
1335 case ir_binop_mod:
1336 if (result_dst.type == GLSL_TYPE_FLOAT)
1337 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1338 else
1339 emit(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]);
1340 break;
1341
1342 case ir_binop_less:
1343 emit(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]);
1344 break;
1345 case ir_binop_greater:
1346 emit(ir, TGSI_OPCODE_SGT, result_dst, op[0], op[1]);
1347 break;
1348 case ir_binop_lequal:
1349 emit(ir, TGSI_OPCODE_SLE, result_dst, op[0], op[1]);
1350 break;
1351 case ir_binop_gequal:
1352 emit(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]);
1353 break;
1354 case ir_binop_equal:
1355 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1356 break;
1357 case ir_binop_nequal:
1358 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1359 break;
1360 case ir_binop_all_equal:
1361 /* "==" operator producing a scalar boolean. */
1362 if (ir->operands[0]->type->is_vector() ||
1363 ir->operands[1]->type->is_vector()) {
1364 st_src_reg temp = get_temp(glsl_version >= 130 ?
1365 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1366 glsl_type::vec4_type);
1367 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1368 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1369 emit_dp(ir, result_dst, temp, temp, vector_elements);
1370 emit(ir, TGSI_OPCODE_SEQ, result_dst, result_src, st_src_reg_for_float(0.0));
1371 } else {
1372 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1373 }
1374 break;
1375 case ir_binop_any_nequal:
1376 /* "!=" operator producing a scalar boolean. */
1377 if (ir->operands[0]->type->is_vector() ||
1378 ir->operands[1]->type->is_vector()) {
1379 st_src_reg temp = get_temp(glsl_version >= 130 ?
1380 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1381 glsl_type::vec4_type);
1382 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1383 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1384 emit_dp(ir, result_dst, temp, temp, vector_elements);
1385 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1386 } else {
1387 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1388 }
1389 break;
1390
1391 case ir_unop_any:
1392 assert(ir->operands[0]->type->is_vector());
1393 emit_dp(ir, result_dst, op[0], op[0],
1394 ir->operands[0]->type->vector_elements);
1395 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1396 break;
1397
1398 case ir_binop_logic_xor:
1399 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1400 break;
1401
1402 case ir_binop_logic_or:
1403 /* This could be a saturated add and skip the SNE. */
1404 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1405 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1406 break;
1407
1408 case ir_binop_logic_and:
1409 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
1410 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1411 break;
1412
1413 case ir_binop_dot:
1414 assert(ir->operands[0]->type->is_vector());
1415 assert(ir->operands[0]->type == ir->operands[1]->type);
1416 emit_dp(ir, result_dst, op[0], op[1],
1417 ir->operands[0]->type->vector_elements);
1418 break;
1419
1420 case ir_unop_sqrt:
1421 /* sqrt(x) = x * rsq(x). */
1422 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1423 emit(ir, TGSI_OPCODE_MUL, result_dst, result_src, op[0]);
1424 /* For incoming channels <= 0, set the result to 0. */
1425 op[0].negate = ~op[0].negate;
1426 emit(ir, TGSI_OPCODE_CMP, result_dst,
1427 op[0], result_src, st_src_reg_for_float(0.0));
1428 break;
1429 case ir_unop_rsq:
1430 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1431 break;
1432 case ir_unop_i2f:
1433 case ir_unop_b2f:
1434 if (glsl_version >= 130) {
1435 emit(ir, TGSI_OPCODE_I2F, result_dst, op[0]);
1436 break;
1437 }
1438 case ir_unop_b2i:
1439 /* Booleans are stored as integers (or floats in GLSL 1.20 and lower). */
1440 result_src = op[0];
1441 break;
1442 case ir_unop_f2i:
1443 if (glsl_version >= 130)
1444 emit(ir, TGSI_OPCODE_F2I, result_dst, op[0]);
1445 else
1446 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1447 break;
1448 case ir_unop_f2b:
1449 case ir_unop_i2b:
1450 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1451 st_src_reg_for_type(result_dst.type, 0));
1452 break;
1453 case ir_unop_trunc:
1454 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1455 break;
1456 case ir_unop_ceil:
1457 op[0].negate = ~op[0].negate;
1458 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1459 result_src.negate = ~result_src.negate;
1460 break;
1461 case ir_unop_floor:
1462 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1463 break;
1464 case ir_unop_fract:
1465 emit(ir, TGSI_OPCODE_FRC, result_dst, op[0]);
1466 break;
1467
1468 case ir_binop_min:
1469 emit(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]);
1470 break;
1471 case ir_binop_max:
1472 emit(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]);
1473 break;
1474 case ir_binop_pow:
1475 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]);
1476 break;
1477
1478 case ir_unop_bit_not:
1479 if (glsl_version >= 130) {
1480 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1481 break;
1482 }
1483 case ir_unop_u2f:
1484 if (glsl_version >= 130) {
1485 emit(ir, TGSI_OPCODE_U2F, result_dst, op[0]);
1486 break;
1487 }
1488 case ir_binop_lshift:
1489 if (glsl_version >= 130) {
1490 emit(ir, TGSI_OPCODE_SHL, result_dst, op[0]);
1491 break;
1492 }
1493 case ir_binop_rshift:
1494 if (glsl_version >= 130) {
1495 emit(ir, TGSI_OPCODE_ISHR, result_dst, op[0]);
1496 break;
1497 }
1498 case ir_binop_bit_and:
1499 if (glsl_version >= 130) {
1500 emit(ir, TGSI_OPCODE_AND, result_dst, op[0]);
1501 break;
1502 }
1503 case ir_binop_bit_xor:
1504 if (glsl_version >= 130) {
1505 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0]);
1506 break;
1507 }
1508 case ir_binop_bit_or:
1509 if (glsl_version >= 130) {
1510 emit(ir, TGSI_OPCODE_OR, result_dst, op[0]);
1511 break;
1512 }
1513 case ir_unop_round_even:
1514 assert(!"GLSL 1.30 features unsupported");
1515 break;
1516
1517 case ir_quadop_vector:
1518 /* This operation should have already been handled.
1519 */
1520 assert(!"Should not get here.");
1521 break;
1522 }
1523
1524 this->result = result_src;
1525 }
1526
1527
1528 void
1529 glsl_to_tgsi_visitor::visit(ir_swizzle *ir)
1530 {
1531 st_src_reg src;
1532 int i;
1533 int swizzle[4];
1534
1535 /* Note that this is only swizzles in expressions, not those on the left
1536 * hand side of an assignment, which do write masking. See ir_assignment
1537 * for that.
1538 */
1539
1540 ir->val->accept(this);
1541 src = this->result;
1542 assert(src.file != PROGRAM_UNDEFINED);
1543
1544 for (i = 0; i < 4; i++) {
1545 if (i < ir->type->vector_elements) {
1546 switch (i) {
1547 case 0:
1548 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
1549 break;
1550 case 1:
1551 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
1552 break;
1553 case 2:
1554 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
1555 break;
1556 case 3:
1557 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
1558 break;
1559 }
1560 } else {
1561 /* If the type is smaller than a vec4, replicate the last
1562 * channel out.
1563 */
1564 swizzle[i] = swizzle[ir->type->vector_elements - 1];
1565 }
1566 }
1567
1568 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1569
1570 this->result = src;
1571 }
1572
1573 void
1574 glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir)
1575 {
1576 variable_storage *entry = find_variable_storage(ir->var);
1577 ir_variable *var = ir->var;
1578
1579 if (!entry) {
1580 switch (var->mode) {
1581 case ir_var_uniform:
1582 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
1583 var->location);
1584 this->variables.push_tail(entry);
1585 break;
1586 case ir_var_in:
1587 case ir_var_inout:
1588 /* The linker assigns locations for varyings and attributes,
1589 * including deprecated builtins (like gl_Color), user-assign
1590 * generic attributes (glBindVertexLocation), and
1591 * user-defined varyings.
1592 *
1593 * FINISHME: We would hit this path for function arguments. Fix!
1594 */
1595 assert(var->location != -1);
1596 entry = new(mem_ctx) variable_storage(var,
1597 PROGRAM_INPUT,
1598 var->location);
1599 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1600 var->location >= VERT_ATTRIB_GENERIC0) {
1601 _mesa_add_attribute(this->prog->Attributes,
1602 var->name,
1603 _mesa_sizeof_glsl_type(var->type->gl_type),
1604 var->type->gl_type,
1605 var->location - VERT_ATTRIB_GENERIC0);
1606 }
1607 break;
1608 case ir_var_out:
1609 assert(var->location != -1);
1610 entry = new(mem_ctx) variable_storage(var,
1611 PROGRAM_OUTPUT,
1612 var->location);
1613 break;
1614 case ir_var_system_value:
1615 entry = new(mem_ctx) variable_storage(var,
1616 PROGRAM_SYSTEM_VALUE,
1617 var->location);
1618 break;
1619 case ir_var_auto:
1620 case ir_var_temporary:
1621 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY,
1622 this->next_temp);
1623 this->variables.push_tail(entry);
1624
1625 next_temp += type_size(var->type);
1626 break;
1627 }
1628
1629 if (!entry) {
1630 printf("Failed to make storage for %s\n", var->name);
1631 exit(1);
1632 }
1633 }
1634
1635 this->result = st_src_reg(entry->file, entry->index, var->type);
1636 if (glsl_version <= 120)
1637 this->result.type = GLSL_TYPE_FLOAT;
1638 }
1639
1640 void
1641 glsl_to_tgsi_visitor::visit(ir_dereference_array *ir)
1642 {
1643 ir_constant *index;
1644 st_src_reg src;
1645 int element_size = type_size(ir->type);
1646
1647 index = ir->array_index->constant_expression_value();
1648
1649 ir->array->accept(this);
1650 src = this->result;
1651
1652 if (index) {
1653 src.index += index->value.i[0] * element_size;
1654 } else {
1655 st_src_reg array_base = this->result;
1656 /* Variable index array dereference. It eats the "vec4" of the
1657 * base of the array and an index that offsets the Mesa register
1658 * index.
1659 */
1660 ir->array_index->accept(this);
1661
1662 st_src_reg index_reg;
1663
1664 if (element_size == 1) {
1665 index_reg = this->result;
1666 } else {
1667 index_reg = get_temp(glsl_type::float_type);
1668
1669 emit(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg),
1670 this->result, st_src_reg_for_float(element_size));
1671 }
1672
1673 src.reladdr = ralloc(mem_ctx, st_src_reg);
1674 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1675 }
1676
1677 /* If the type is smaller than a vec4, replicate the last channel out. */
1678 if (ir->type->is_scalar() || ir->type->is_vector())
1679 src.swizzle = swizzle_for_size(ir->type->vector_elements);
1680 else
1681 src.swizzle = SWIZZLE_NOOP;
1682
1683 this->result = src;
1684 }
1685
1686 void
1687 glsl_to_tgsi_visitor::visit(ir_dereference_record *ir)
1688 {
1689 unsigned int i;
1690 const glsl_type *struct_type = ir->record->type;
1691 int offset = 0;
1692
1693 ir->record->accept(this);
1694
1695 for (i = 0; i < struct_type->length; i++) {
1696 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1697 break;
1698 offset += type_size(struct_type->fields.structure[i].type);
1699 }
1700
1701 /* If the type is smaller than a vec4, replicate the last channel out. */
1702 if (ir->type->is_scalar() || ir->type->is_vector())
1703 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1704 else
1705 this->result.swizzle = SWIZZLE_NOOP;
1706
1707 this->result.index += offset;
1708 }
1709
1710 /**
1711 * We want to be careful in assignment setup to hit the actual storage
1712 * instead of potentially using a temporary like we might with the
1713 * ir_dereference handler.
1714 */
1715 static st_dst_reg
1716 get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v)
1717 {
1718 /* The LHS must be a dereference. If the LHS is a variable indexed array
1719 * access of a vector, it must be separated into a series conditional moves
1720 * before reaching this point (see ir_vec_index_to_cond_assign).
1721 */
1722 assert(ir->as_dereference());
1723 ir_dereference_array *deref_array = ir->as_dereference_array();
1724 if (deref_array) {
1725 assert(!deref_array->array->type->is_vector());
1726 }
1727
1728 /* Use the rvalue deref handler for the most part. We'll ignore
1729 * swizzles in it and write swizzles using writemask, though.
1730 */
1731 ir->accept(v);
1732 return st_dst_reg(v->result);
1733 }
1734
1735 /**
1736 * Process the condition of a conditional assignment
1737 *
1738 * Examines the condition of a conditional assignment to generate the optimal
1739 * first operand of a \c CMP instruction. If the condition is a relational
1740 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
1741 * used as the source for the \c CMP instruction. Otherwise the comparison
1742 * is processed to a boolean result, and the boolean result is used as the
1743 * operand to the CMP instruction.
1744 */
1745 bool
1746 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir)
1747 {
1748 ir_rvalue *src_ir = ir;
1749 bool negate = true;
1750 bool switch_order = false;
1751
1752 ir_expression *const expr = ir->as_expression();
1753 if ((expr != NULL) && (expr->get_num_operands() == 2)) {
1754 bool zero_on_left = false;
1755
1756 if (expr->operands[0]->is_zero()) {
1757 src_ir = expr->operands[1];
1758 zero_on_left = true;
1759 } else if (expr->operands[1]->is_zero()) {
1760 src_ir = expr->operands[0];
1761 zero_on_left = false;
1762 }
1763
1764 /* a is - 0 + - 0 +
1765 * (a < 0) T F F ( a < 0) T F F
1766 * (0 < a) F F T (-a < 0) F F T
1767 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands)
1768 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands)
1769 * (a > 0) F F T (-a < 0) F F T
1770 * (0 > a) T F F ( a < 0) T F F
1771 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
1772 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
1773 *
1774 * Note that exchanging the order of 0 and 'a' in the comparison simply
1775 * means that the value of 'a' should be negated.
1776 */
1777 if (src_ir != ir) {
1778 switch (expr->operation) {
1779 case ir_binop_less:
1780 switch_order = false;
1781 negate = zero_on_left;
1782 break;
1783
1784 case ir_binop_greater:
1785 switch_order = false;
1786 negate = !zero_on_left;
1787 break;
1788
1789 case ir_binop_lequal:
1790 switch_order = true;
1791 negate = !zero_on_left;
1792 break;
1793
1794 case ir_binop_gequal:
1795 switch_order = true;
1796 negate = zero_on_left;
1797 break;
1798
1799 default:
1800 /* This isn't the right kind of comparison afterall, so make sure
1801 * the whole condition is visited.
1802 */
1803 src_ir = ir;
1804 break;
1805 }
1806 }
1807 }
1808
1809 src_ir->accept(this);
1810
1811 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
1812 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
1813 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
1814 * computing the condition.
1815 */
1816 if (negate)
1817 this->result.negate = ~this->result.negate;
1818
1819 return switch_order;
1820 }
1821
1822 void
1823 glsl_to_tgsi_visitor::visit(ir_assignment *ir)
1824 {
1825 st_dst_reg l;
1826 st_src_reg r;
1827 int i;
1828
1829 ir->rhs->accept(this);
1830 r = this->result;
1831
1832 l = get_assignment_lhs(ir->lhs, this);
1833
1834 /* FINISHME: This should really set to the correct maximal writemask for each
1835 * FINISHME: component written (in the loops below). This case can only
1836 * FINISHME: occur for matrices, arrays, and structures.
1837 */
1838 if (ir->write_mask == 0) {
1839 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
1840 l.writemask = WRITEMASK_XYZW;
1841 } else if (ir->lhs->type->is_scalar()) {
1842 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the
1843 * FINISHME: W component of fragment shader output zero, work correctly.
1844 */
1845 l.writemask = WRITEMASK_XYZW;
1846 } else {
1847 int swizzles[4];
1848 int first_enabled_chan = 0;
1849 int rhs_chan = 0;
1850
1851 assert(ir->lhs->type->is_vector());
1852 l.writemask = ir->write_mask;
1853
1854 for (int i = 0; i < 4; i++) {
1855 if (l.writemask & (1 << i)) {
1856 first_enabled_chan = GET_SWZ(r.swizzle, i);
1857 break;
1858 }
1859 }
1860
1861 /* Swizzle a small RHS vector into the channels being written.
1862 *
1863 * glsl ir treats write_mask as dictating how many channels are
1864 * present on the RHS while Mesa IR treats write_mask as just
1865 * showing which channels of the vec4 RHS get written.
1866 */
1867 for (int i = 0; i < 4; i++) {
1868 if (l.writemask & (1 << i))
1869 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
1870 else
1871 swizzles[i] = first_enabled_chan;
1872 }
1873 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
1874 swizzles[2], swizzles[3]);
1875 }
1876
1877 assert(l.file != PROGRAM_UNDEFINED);
1878 assert(r.file != PROGRAM_UNDEFINED);
1879
1880 if (ir->condition) {
1881 const bool switch_order = this->process_move_condition(ir->condition);
1882 st_src_reg condition = this->result;
1883
1884 for (i = 0; i < type_size(ir->lhs->type); i++) {
1885 st_src_reg l_src = st_src_reg(l);
1886 l_src.swizzle = swizzle_for_size(ir->lhs->type->vector_elements);
1887
1888 if (switch_order) {
1889 emit(ir, TGSI_OPCODE_CMP, l, condition, l_src, r);
1890 } else {
1891 emit(ir, TGSI_OPCODE_CMP, l, condition, r, l_src);
1892 }
1893
1894 l.index++;
1895 r.index++;
1896 }
1897 } else {
1898 for (i = 0; i < type_size(ir->lhs->type); i++) {
1899 emit(ir, TGSI_OPCODE_MOV, l, r);
1900 l.index++;
1901 r.index++;
1902 }
1903 }
1904 }
1905
1906
1907 void
1908 glsl_to_tgsi_visitor::visit(ir_constant *ir)
1909 {
1910 st_src_reg src;
1911 GLfloat stack_vals[4] = { 0 };
1912 gl_constant_value *values = (gl_constant_value *) stack_vals;
1913 GLenum gl_type = GL_NONE;
1914 unsigned int i;
1915
1916 /* Unfortunately, 4 floats is all we can get into
1917 * _mesa_add_unnamed_constant. So, make a temp to store an
1918 * aggregate constant and move each constant value into it. If we
1919 * get lucky, copy propagation will eliminate the extra moves.
1920 */
1921 if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1922 st_src_reg temp_base = get_temp(ir->type);
1923 st_dst_reg temp = st_dst_reg(temp_base);
1924
1925 foreach_iter(exec_list_iterator, iter, ir->components) {
1926 ir_constant *field_value = (ir_constant *)iter.get();
1927 int size = type_size(field_value->type);
1928
1929 assert(size > 0);
1930
1931 field_value->accept(this);
1932 src = this->result;
1933
1934 for (i = 0; i < (unsigned int)size; i++) {
1935 emit(ir, TGSI_OPCODE_MOV, temp, src);
1936
1937 src.index++;
1938 temp.index++;
1939 }
1940 }
1941 this->result = temp_base;
1942 return;
1943 }
1944
1945 if (ir->type->is_array()) {
1946 st_src_reg temp_base = get_temp(ir->type);
1947 st_dst_reg temp = st_dst_reg(temp_base);
1948 int size = type_size(ir->type->fields.array);
1949
1950 assert(size > 0);
1951
1952 for (i = 0; i < ir->type->length; i++) {
1953 ir->array_elements[i]->accept(this);
1954 src = this->result;
1955 for (int j = 0; j < size; j++) {
1956 emit(ir, TGSI_OPCODE_MOV, temp, src);
1957
1958 src.index++;
1959 temp.index++;
1960 }
1961 }
1962 this->result = temp_base;
1963 return;
1964 }
1965
1966 if (ir->type->is_matrix()) {
1967 st_src_reg mat = get_temp(ir->type);
1968 st_dst_reg mat_column = st_dst_reg(mat);
1969
1970 for (i = 0; i < ir->type->matrix_columns; i++) {
1971 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1972 values = (gl_constant_value *) &ir->value.f[i * ir->type->vector_elements];
1973
1974 src = st_src_reg(PROGRAM_CONSTANT, -1, ir->type->base_type);
1975 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
1976 values,
1977 ir->type->vector_elements,
1978 GL_FLOAT,
1979 &src.swizzle);
1980 emit(ir, TGSI_OPCODE_MOV, mat_column, src);
1981
1982 mat_column.index++;
1983 }
1984
1985 this->result = mat;
1986 return;
1987 }
1988
1989 src.file = PROGRAM_CONSTANT;
1990 switch (ir->type->base_type) {
1991 case GLSL_TYPE_FLOAT:
1992 gl_type = GL_FLOAT;
1993 for (i = 0; i < ir->type->vector_elements; i++) {
1994 values[i].f = ir->value.f[i];
1995 }
1996 break;
1997 case GLSL_TYPE_UINT:
1998 gl_type = glsl_version >= 130 ? GL_UNSIGNED_INT : GL_FLOAT;
1999 for (i = 0; i < ir->type->vector_elements; i++) {
2000 if (glsl_version >= 130)
2001 values[i].u = ir->value.u[i];
2002 else
2003 values[i].f = ir->value.u[i];
2004 }
2005 break;
2006 case GLSL_TYPE_INT:
2007 gl_type = glsl_version >= 130 ? GL_INT : GL_FLOAT;
2008 for (i = 0; i < ir->type->vector_elements; i++) {
2009 if (glsl_version >= 130)
2010 values[i].i = ir->value.i[i];
2011 else
2012 values[i].f = ir->value.i[i];
2013 }
2014 break;
2015 case GLSL_TYPE_BOOL:
2016 gl_type = glsl_version >= 130 ? GL_BOOL : GL_FLOAT;
2017 for (i = 0; i < ir->type->vector_elements; i++) {
2018 if (glsl_version >= 130)
2019 values[i].b = ir->value.b[i];
2020 else
2021 values[i].f = ir->value.b[i];
2022 }
2023 break;
2024 default:
2025 assert(!"Non-float/uint/int/bool constant");
2026 }
2027
2028 this->result = st_src_reg(PROGRAM_CONSTANT, -1, ir->type);
2029 this->result.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
2030 values, ir->type->vector_elements, gl_type,
2031 &this->result.swizzle);
2032 }
2033
2034 function_entry *
2035 glsl_to_tgsi_visitor::get_function_signature(ir_function_signature *sig)
2036 {
2037 function_entry *entry;
2038
2039 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
2040 entry = (function_entry *)iter.get();
2041
2042 if (entry->sig == sig)
2043 return entry;
2044 }
2045
2046 entry = ralloc(mem_ctx, function_entry);
2047 entry->sig = sig;
2048 entry->sig_id = this->next_signature_id++;
2049 entry->bgn_inst = NULL;
2050
2051 /* Allocate storage for all the parameters. */
2052 foreach_iter(exec_list_iterator, iter, sig->parameters) {
2053 ir_variable *param = (ir_variable *)iter.get();
2054 variable_storage *storage;
2055
2056 storage = find_variable_storage(param);
2057 assert(!storage);
2058
2059 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
2060 this->next_temp);
2061 this->variables.push_tail(storage);
2062
2063 this->next_temp += type_size(param->type);
2064 }
2065
2066 if (!sig->return_type->is_void()) {
2067 entry->return_reg = get_temp(sig->return_type);
2068 } else {
2069 entry->return_reg = undef_src;
2070 }
2071
2072 this->function_signatures.push_tail(entry);
2073 return entry;
2074 }
2075
2076 void
2077 glsl_to_tgsi_visitor::visit(ir_call *ir)
2078 {
2079 glsl_to_tgsi_instruction *call_inst;
2080 ir_function_signature *sig = ir->get_callee();
2081 function_entry *entry = get_function_signature(sig);
2082 int i;
2083
2084 /* Process in parameters. */
2085 exec_list_iterator sig_iter = sig->parameters.iterator();
2086 foreach_iter(exec_list_iterator, iter, *ir) {
2087 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2088 ir_variable *param = (ir_variable *)sig_iter.get();
2089
2090 if (param->mode == ir_var_in ||
2091 param->mode == ir_var_inout) {
2092 variable_storage *storage = find_variable_storage(param);
2093 assert(storage);
2094
2095 param_rval->accept(this);
2096 st_src_reg r = this->result;
2097
2098 st_dst_reg l;
2099 l.file = storage->file;
2100 l.index = storage->index;
2101 l.reladdr = NULL;
2102 l.writemask = WRITEMASK_XYZW;
2103 l.cond_mask = COND_TR;
2104
2105 for (i = 0; i < type_size(param->type); i++) {
2106 emit(ir, TGSI_OPCODE_MOV, l, r);
2107 l.index++;
2108 r.index++;
2109 }
2110 }
2111
2112 sig_iter.next();
2113 }
2114 assert(!sig_iter.has_next());
2115
2116 /* Emit call instruction */
2117 call_inst = emit(ir, TGSI_OPCODE_CAL);
2118 call_inst->function = entry;
2119
2120 /* Process out parameters. */
2121 sig_iter = sig->parameters.iterator();
2122 foreach_iter(exec_list_iterator, iter, *ir) {
2123 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2124 ir_variable *param = (ir_variable *)sig_iter.get();
2125
2126 if (param->mode == ir_var_out ||
2127 param->mode == ir_var_inout) {
2128 variable_storage *storage = find_variable_storage(param);
2129 assert(storage);
2130
2131 st_src_reg r;
2132 r.file = storage->file;
2133 r.index = storage->index;
2134 r.reladdr = NULL;
2135 r.swizzle = SWIZZLE_NOOP;
2136 r.negate = 0;
2137
2138 param_rval->accept(this);
2139 st_dst_reg l = st_dst_reg(this->result);
2140
2141 for (i = 0; i < type_size(param->type); i++) {
2142 emit(ir, TGSI_OPCODE_MOV, l, r);
2143 l.index++;
2144 r.index++;
2145 }
2146 }
2147
2148 sig_iter.next();
2149 }
2150 assert(!sig_iter.has_next());
2151
2152 /* Process return value. */
2153 this->result = entry->return_reg;
2154 }
2155
2156 void
2157 glsl_to_tgsi_visitor::visit(ir_texture *ir)
2158 {
2159 st_src_reg result_src, coord, lod_info, projector, dx, dy;
2160 st_dst_reg result_dst, coord_dst;
2161 glsl_to_tgsi_instruction *inst = NULL;
2162 unsigned opcode = TGSI_OPCODE_NOP;
2163
2164 ir->coordinate->accept(this);
2165
2166 /* Put our coords in a temp. We'll need to modify them for shadow,
2167 * projection, or LOD, so the only case we'd use it as is is if
2168 * we're doing plain old texturing. Mesa IR optimization should
2169 * handle cleaning up our mess in that case.
2170 */
2171 coord = get_temp(glsl_type::vec4_type);
2172 coord_dst = st_dst_reg(coord);
2173 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2174
2175 if (ir->projector) {
2176 ir->projector->accept(this);
2177 projector = this->result;
2178 }
2179
2180 /* Storage for our result. Ideally for an assignment we'd be using
2181 * the actual storage for the result here, instead.
2182 */
2183 result_src = get_temp(glsl_type::vec4_type);
2184 result_dst = st_dst_reg(result_src);
2185
2186 switch (ir->op) {
2187 case ir_tex:
2188 opcode = TGSI_OPCODE_TEX;
2189 break;
2190 case ir_txb:
2191 opcode = TGSI_OPCODE_TXB;
2192 ir->lod_info.bias->accept(this);
2193 lod_info = this->result;
2194 break;
2195 case ir_txl:
2196 opcode = TGSI_OPCODE_TXL;
2197 ir->lod_info.lod->accept(this);
2198 lod_info = this->result;
2199 break;
2200 case ir_txd:
2201 opcode = TGSI_OPCODE_TXD;
2202 ir->lod_info.grad.dPdx->accept(this);
2203 dx = this->result;
2204 ir->lod_info.grad.dPdy->accept(this);
2205 dy = this->result;
2206 break;
2207 case ir_txf: /* TODO: use TGSI_OPCODE_TXF here */
2208 assert(!"GLSL 1.30 features unsupported");
2209 break;
2210 }
2211
2212 if (ir->projector) {
2213 if (opcode == TGSI_OPCODE_TEX) {
2214 /* Slot the projector in as the last component of the coord. */
2215 coord_dst.writemask = WRITEMASK_W;
2216 emit(ir, TGSI_OPCODE_MOV, coord_dst, projector);
2217 coord_dst.writemask = WRITEMASK_XYZW;
2218 opcode = TGSI_OPCODE_TXP;
2219 } else {
2220 st_src_reg coord_w = coord;
2221 coord_w.swizzle = SWIZZLE_WWWW;
2222
2223 /* For the other TEX opcodes there's no projective version
2224 * since the last slot is taken up by LOD info. Do the
2225 * projective divide now.
2226 */
2227 coord_dst.writemask = WRITEMASK_W;
2228 emit(ir, TGSI_OPCODE_RCP, coord_dst, projector);
2229
2230 /* In the case where we have to project the coordinates "by hand,"
2231 * the shadow comparator value must also be projected.
2232 */
2233 st_src_reg tmp_src = coord;
2234 if (ir->shadow_comparitor) {
2235 /* Slot the shadow value in as the second to last component of the
2236 * coord.
2237 */
2238 ir->shadow_comparitor->accept(this);
2239
2240 tmp_src = get_temp(glsl_type::vec4_type);
2241 st_dst_reg tmp_dst = st_dst_reg(tmp_src);
2242
2243 tmp_dst.writemask = WRITEMASK_Z;
2244 emit(ir, TGSI_OPCODE_MOV, tmp_dst, this->result);
2245
2246 tmp_dst.writemask = WRITEMASK_XY;
2247 emit(ir, TGSI_OPCODE_MOV, tmp_dst, coord);
2248 }
2249
2250 coord_dst.writemask = WRITEMASK_XYZ;
2251 emit(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w);
2252
2253 coord_dst.writemask = WRITEMASK_XYZW;
2254 coord.swizzle = SWIZZLE_XYZW;
2255 }
2256 }
2257
2258 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow
2259 * comparator was put in the correct place (and projected) by the code,
2260 * above, that handles by-hand projection.
2261 */
2262 if (ir->shadow_comparitor && (!ir->projector || opcode == TGSI_OPCODE_TXP)) {
2263 /* Slot the shadow value in as the second to last component of the
2264 * coord.
2265 */
2266 ir->shadow_comparitor->accept(this);
2267 coord_dst.writemask = WRITEMASK_Z;
2268 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2269 coord_dst.writemask = WRITEMASK_XYZW;
2270 }
2271
2272 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB) {
2273 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
2274 coord_dst.writemask = WRITEMASK_W;
2275 emit(ir, TGSI_OPCODE_MOV, coord_dst, lod_info);
2276 coord_dst.writemask = WRITEMASK_XYZW;
2277 }
2278
2279 if (opcode == TGSI_OPCODE_TXD)
2280 inst = emit(ir, opcode, result_dst, coord, dx, dy);
2281 else
2282 inst = emit(ir, opcode, result_dst, coord);
2283
2284 if (ir->shadow_comparitor)
2285 inst->tex_shadow = GL_TRUE;
2286
2287 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler,
2288 this->shader_program,
2289 this->prog);
2290
2291 const glsl_type *sampler_type = ir->sampler->type;
2292
2293 switch (sampler_type->sampler_dimensionality) {
2294 case GLSL_SAMPLER_DIM_1D:
2295 inst->tex_target = (sampler_type->sampler_array)
2296 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
2297 break;
2298 case GLSL_SAMPLER_DIM_2D:
2299 inst->tex_target = (sampler_type->sampler_array)
2300 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
2301 break;
2302 case GLSL_SAMPLER_DIM_3D:
2303 inst->tex_target = TEXTURE_3D_INDEX;
2304 break;
2305 case GLSL_SAMPLER_DIM_CUBE:
2306 inst->tex_target = TEXTURE_CUBE_INDEX;
2307 break;
2308 case GLSL_SAMPLER_DIM_RECT:
2309 inst->tex_target = TEXTURE_RECT_INDEX;
2310 break;
2311 case GLSL_SAMPLER_DIM_BUF:
2312 assert(!"FINISHME: Implement ARB_texture_buffer_object");
2313 break;
2314 default:
2315 assert(!"Should not get here.");
2316 }
2317
2318 this->result = result_src;
2319 }
2320
2321 void
2322 glsl_to_tgsi_visitor::visit(ir_return *ir)
2323 {
2324 if (ir->get_value()) {
2325 st_dst_reg l;
2326 int i;
2327
2328 assert(current_function);
2329
2330 ir->get_value()->accept(this);
2331 st_src_reg r = this->result;
2332
2333 l = st_dst_reg(current_function->return_reg);
2334
2335 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
2336 emit(ir, TGSI_OPCODE_MOV, l, r);
2337 l.index++;
2338 r.index++;
2339 }
2340 }
2341
2342 emit(ir, TGSI_OPCODE_RET);
2343 }
2344
2345 void
2346 glsl_to_tgsi_visitor::visit(ir_discard *ir)
2347 {
2348 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
2349
2350 if (ir->condition) {
2351 ir->condition->accept(this);
2352 this->result.negate = ~this->result.negate;
2353 emit(ir, TGSI_OPCODE_KIL, undef_dst, this->result);
2354 } else {
2355 emit(ir, TGSI_OPCODE_KILP);
2356 }
2357
2358 fp->UsesKill = GL_TRUE;
2359 }
2360
2361 void
2362 glsl_to_tgsi_visitor::visit(ir_if *ir)
2363 {
2364 glsl_to_tgsi_instruction *cond_inst, *if_inst, *else_inst = NULL;
2365 glsl_to_tgsi_instruction *prev_inst;
2366
2367 prev_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2368
2369 ir->condition->accept(this);
2370 assert(this->result.file != PROGRAM_UNDEFINED);
2371
2372 if (this->options->EmitCondCodes) {
2373 cond_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2374
2375 /* See if we actually generated any instruction for generating
2376 * the condition. If not, then cook up a move to a temp so we
2377 * have something to set cond_update on.
2378 */
2379 if (cond_inst == prev_inst) {
2380 st_src_reg temp = get_temp(glsl_type::bool_type);
2381 cond_inst = emit(ir->condition, TGSI_OPCODE_MOV, st_dst_reg(temp), result);
2382 }
2383 cond_inst->cond_update = GL_TRUE;
2384
2385 if_inst = emit(ir->condition, TGSI_OPCODE_IF);
2386 if_inst->dst.cond_mask = COND_NE;
2387 } else {
2388 if_inst = emit(ir->condition, TGSI_OPCODE_IF, undef_dst, this->result);
2389 }
2390
2391 this->instructions.push_tail(if_inst);
2392
2393 visit_exec_list(&ir->then_instructions, this);
2394
2395 if (!ir->else_instructions.is_empty()) {
2396 else_inst = emit(ir->condition, TGSI_OPCODE_ELSE);
2397 visit_exec_list(&ir->else_instructions, this);
2398 }
2399
2400 if_inst = emit(ir->condition, TGSI_OPCODE_ENDIF);
2401 }
2402
2403 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
2404 {
2405 result.file = PROGRAM_UNDEFINED;
2406 next_temp = 1;
2407 next_signature_id = 1;
2408 current_function = NULL;
2409 num_address_regs = 0;
2410 indirect_addr_temps = false;
2411 indirect_addr_consts = false;
2412 mem_ctx = ralloc_context(NULL);
2413 }
2414
2415 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
2416 {
2417 ralloc_free(mem_ctx);
2418 }
2419
2420 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v)
2421 {
2422 delete v;
2423 }
2424
2425
2426 /**
2427 * Count resources used by the given gpu program (number of texture
2428 * samplers, etc).
2429 */
2430 static void
2431 count_resources(glsl_to_tgsi_visitor *v, gl_program *prog)
2432 {
2433 v->samplers_used = 0;
2434
2435 foreach_iter(exec_list_iterator, iter, v->instructions) {
2436 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2437
2438 if (is_tex_instruction(inst->op)) {
2439 v->samplers_used |= 1 << inst->sampler;
2440
2441 prog->SamplerTargets[inst->sampler] =
2442 (gl_texture_index)inst->tex_target;
2443 if (inst->tex_shadow) {
2444 prog->ShadowSamplers |= 1 << inst->sampler;
2445 }
2446 }
2447 }
2448
2449 prog->SamplersUsed = v->samplers_used;
2450 _mesa_update_shader_textures_used(prog);
2451 }
2452
2453
2454 /**
2455 * Check if the given vertex/fragment/shader program is within the
2456 * resource limits of the context (number of texture units, etc).
2457 * If any of those checks fail, record a linker error.
2458 *
2459 * XXX more checks are needed...
2460 */
2461 static void
2462 check_resources(const struct gl_context *ctx,
2463 struct gl_shader_program *shader_program,
2464 glsl_to_tgsi_visitor *prog,
2465 struct gl_program *proginfo)
2466 {
2467 switch (proginfo->Target) {
2468 case GL_VERTEX_PROGRAM_ARB:
2469 if (_mesa_bitcount(prog->samplers_used) >
2470 ctx->Const.MaxVertexTextureImageUnits) {
2471 fail_link(shader_program, "Too many vertex shader texture samplers");
2472 }
2473 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2474 fail_link(shader_program, "Too many vertex shader constants");
2475 }
2476 break;
2477 case MESA_GEOMETRY_PROGRAM:
2478 if (_mesa_bitcount(prog->samplers_used) >
2479 ctx->Const.MaxGeometryTextureImageUnits) {
2480 fail_link(shader_program, "Too many geometry shader texture samplers");
2481 }
2482 if (proginfo->Parameters->NumParameters >
2483 MAX_GEOMETRY_UNIFORM_COMPONENTS / 4) {
2484 fail_link(shader_program, "Too many geometry shader constants");
2485 }
2486 break;
2487 case GL_FRAGMENT_PROGRAM_ARB:
2488 if (_mesa_bitcount(prog->samplers_used) >
2489 ctx->Const.MaxTextureImageUnits) {
2490 fail_link(shader_program, "Too many fragment shader texture samplers");
2491 }
2492 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2493 fail_link(shader_program, "Too many fragment shader constants");
2494 }
2495 break;
2496 default:
2497 _mesa_problem(ctx, "unexpected program type in check_resources()");
2498 }
2499 }
2500
2501
2502
2503 struct uniform_sort {
2504 struct gl_uniform *u;
2505 int pos;
2506 };
2507
2508 /* The shader_program->Uniforms list is almost sorted in increasing
2509 * uniform->{Frag,Vert}Pos locations, but not quite when there are
2510 * uniforms shared between targets. We need to add parameters in
2511 * increasing order for the targets.
2512 */
2513 static int
2514 sort_uniforms(const void *a, const void *b)
2515 {
2516 struct uniform_sort *u1 = (struct uniform_sort *)a;
2517 struct uniform_sort *u2 = (struct uniform_sort *)b;
2518
2519 return u1->pos - u2->pos;
2520 }
2521
2522 /* Add the uniforms to the parameters. The linker chose locations
2523 * in our parameters lists (which weren't created yet), which the
2524 * uniforms code will use to poke values into our parameters list
2525 * when uniforms are updated.
2526 */
2527 static void
2528 add_uniforms_to_parameters_list(struct gl_shader_program *shader_program,
2529 struct gl_shader *shader,
2530 struct gl_program *prog)
2531 {
2532 unsigned int i;
2533 unsigned int next_sampler = 0, num_uniforms = 0;
2534 struct uniform_sort *sorted_uniforms;
2535
2536 sorted_uniforms = ralloc_array(NULL, struct uniform_sort,
2537 shader_program->Uniforms->NumUniforms);
2538
2539 for (i = 0; i < shader_program->Uniforms->NumUniforms; i++) {
2540 struct gl_uniform *uniform = shader_program->Uniforms->Uniforms + i;
2541 int parameter_index = -1;
2542
2543 switch (shader->Type) {
2544 case GL_VERTEX_SHADER:
2545 parameter_index = uniform->VertPos;
2546 break;
2547 case GL_FRAGMENT_SHADER:
2548 parameter_index = uniform->FragPos;
2549 break;
2550 case GL_GEOMETRY_SHADER:
2551 parameter_index = uniform->GeomPos;
2552 break;
2553 }
2554
2555 /* Only add uniforms used in our target. */
2556 if (parameter_index != -1) {
2557 sorted_uniforms[num_uniforms].pos = parameter_index;
2558 sorted_uniforms[num_uniforms].u = uniform;
2559 num_uniforms++;
2560 }
2561 }
2562
2563 qsort(sorted_uniforms, num_uniforms, sizeof(struct uniform_sort),
2564 sort_uniforms);
2565
2566 for (i = 0; i < num_uniforms; i++) {
2567 struct gl_uniform *uniform = sorted_uniforms[i].u;
2568 int parameter_index = sorted_uniforms[i].pos;
2569 const glsl_type *type = uniform->Type;
2570 unsigned int size;
2571
2572 if (type->is_vector() ||
2573 type->is_scalar()) {
2574 size = type->vector_elements;
2575 } else {
2576 size = type_size(type) * 4;
2577 }
2578
2579 gl_register_file file;
2580 if (type->is_sampler() ||
2581 (type->is_array() && type->fields.array->is_sampler())) {
2582 file = PROGRAM_SAMPLER;
2583 } else {
2584 file = PROGRAM_UNIFORM;
2585 }
2586
2587 GLint index = _mesa_lookup_parameter_index(prog->Parameters, -1,
2588 uniform->Name);
2589
2590 if (index < 0) {
2591 index = _mesa_add_parameter(prog->Parameters, file,
2592 uniform->Name, size, type->gl_type,
2593 NULL, NULL, 0x0);
2594
2595 /* Sampler uniform values are stored in prog->SamplerUnits,
2596 * and the entry in that array is selected by this index we
2597 * store in ParameterValues[].
2598 */
2599 if (file == PROGRAM_SAMPLER) {
2600 for (unsigned int j = 0; j < size / 4; j++)
2601 prog->Parameters->ParameterValues[index + j][0].f = next_sampler++;
2602 }
2603
2604 /* The location chosen in the Parameters list here (returned
2605 * from _mesa_add_uniform) has to match what the linker chose.
2606 */
2607 if (index != parameter_index) {
2608 fail_link(shader_program, "Allocation of uniform `%s' to target "
2609 "failed (%d vs %d)\n",
2610 uniform->Name, index, parameter_index);
2611 }
2612 }
2613 }
2614
2615 ralloc_free(sorted_uniforms);
2616 }
2617
2618 static void
2619 set_uniform_initializer(struct gl_context *ctx, void *mem_ctx,
2620 struct gl_shader_program *shader_program,
2621 const char *name, const glsl_type *type,
2622 ir_constant *val)
2623 {
2624 if (type->is_record()) {
2625 ir_constant *field_constant;
2626
2627 field_constant = (ir_constant *)val->components.get_head();
2628
2629 for (unsigned int i = 0; i < type->length; i++) {
2630 const glsl_type *field_type = type->fields.structure[i].type;
2631 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
2632 type->fields.structure[i].name);
2633 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name,
2634 field_type, field_constant);
2635 field_constant = (ir_constant *)field_constant->next;
2636 }
2637 return;
2638 }
2639
2640 int loc = _mesa_get_uniform_location(ctx, shader_program, name);
2641
2642 if (loc == -1) {
2643 fail_link(shader_program,
2644 "Couldn't find uniform for initializer %s\n", name);
2645 return;
2646 }
2647
2648 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) {
2649 ir_constant *element;
2650 const glsl_type *element_type;
2651 if (type->is_array()) {
2652 element = val->array_elements[i];
2653 element_type = type->fields.array;
2654 } else {
2655 element = val;
2656 element_type = type;
2657 }
2658
2659 void *values;
2660
2661 if (element_type->base_type == GLSL_TYPE_BOOL) {
2662 int *conv = ralloc_array(mem_ctx, int, element_type->components());
2663 for (unsigned int j = 0; j < element_type->components(); j++) {
2664 conv[j] = element->value.b[j];
2665 }
2666 values = (void *)conv;
2667 element_type = glsl_type::get_instance(GLSL_TYPE_INT,
2668 element_type->vector_elements,
2669 1);
2670 } else {
2671 values = &element->value;
2672 }
2673
2674 if (element_type->is_matrix()) {
2675 _mesa_uniform_matrix(ctx, shader_program,
2676 element_type->matrix_columns,
2677 element_type->vector_elements,
2678 loc, 1, GL_FALSE, (GLfloat *)values);
2679 loc += element_type->matrix_columns;
2680 } else {
2681 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns,
2682 values, element_type->gl_type);
2683 loc += type_size(element_type);
2684 }
2685 }
2686 }
2687
2688 static void
2689 set_uniform_initializers(struct gl_context *ctx,
2690 struct gl_shader_program *shader_program)
2691 {
2692 void *mem_ctx = NULL;
2693
2694 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2695 struct gl_shader *shader = shader_program->_LinkedShaders[i];
2696
2697 if (shader == NULL)
2698 continue;
2699
2700 foreach_iter(exec_list_iterator, iter, *shader->ir) {
2701 ir_instruction *ir = (ir_instruction *)iter.get();
2702 ir_variable *var = ir->as_variable();
2703
2704 if (!var || var->mode != ir_var_uniform || !var->constant_value)
2705 continue;
2706
2707 if (!mem_ctx)
2708 mem_ctx = ralloc_context(NULL);
2709
2710 set_uniform_initializer(ctx, mem_ctx, shader_program, var->name,
2711 var->type, var->constant_value);
2712 }
2713 }
2714
2715 ralloc_free(mem_ctx);
2716 }
2717
2718 /*
2719 * Scan/rewrite program to remove reads of custom (output) registers.
2720 * The passed type has to be either PROGRAM_OUTPUT or PROGRAM_VARYING
2721 * (for vertex shaders).
2722 * In GLSL shaders, varying vars can be read and written.
2723 * On some hardware, trying to read an output register causes trouble.
2724 * So, rewrite the program to use a temporary register in this case.
2725 *
2726 * Based on _mesa_remove_output_reads from programopt.c.
2727 */
2728 void
2729 glsl_to_tgsi_visitor::remove_output_reads(gl_register_file type)
2730 {
2731 GLuint i;
2732 GLint outputMap[VERT_RESULT_MAX];
2733 GLint outputTypes[VERT_RESULT_MAX];
2734 GLuint numVaryingReads = 0;
2735 GLboolean usedTemps[MAX_PROGRAM_TEMPS];
2736 GLuint firstTemp = 0;
2737
2738 _mesa_find_used_registers(prog, PROGRAM_TEMPORARY,
2739 usedTemps, MAX_PROGRAM_TEMPS);
2740
2741 assert(type == PROGRAM_VARYING || type == PROGRAM_OUTPUT);
2742 assert(prog->Target == GL_VERTEX_PROGRAM_ARB || type != PROGRAM_VARYING);
2743
2744 for (i = 0; i < VERT_RESULT_MAX; i++)
2745 outputMap[i] = -1;
2746
2747 /* look for instructions which read from varying vars */
2748 foreach_iter(exec_list_iterator, iter, this->instructions) {
2749 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2750 const GLuint numSrc = num_inst_src_regs(inst->op);
2751 GLuint j;
2752 for (j = 0; j < numSrc; j++) {
2753 if (inst->src[j].file == type) {
2754 /* replace the read with a temp reg */
2755 const GLuint var = inst->src[j].index;
2756 if (outputMap[var] == -1) {
2757 numVaryingReads++;
2758 outputMap[var] = _mesa_find_free_register(usedTemps,
2759 MAX_PROGRAM_TEMPS,
2760 firstTemp);
2761 outputTypes[var] = inst->src[j].type;
2762 firstTemp = outputMap[var] + 1;
2763 }
2764 inst->src[j].file = PROGRAM_TEMPORARY;
2765 inst->src[j].index = outputMap[var];
2766 }
2767 }
2768 }
2769
2770 if (numVaryingReads == 0)
2771 return; /* nothing to be done */
2772
2773 /* look for instructions which write to the varying vars identified above */
2774 foreach_iter(exec_list_iterator, iter, this->instructions) {
2775 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2776 if (inst->dst.file == type && outputMap[inst->dst.index] >= 0) {
2777 /* change inst to write to the temp reg, instead of the varying */
2778 inst->dst.file = PROGRAM_TEMPORARY;
2779 inst->dst.index = outputMap[inst->dst.index];
2780 }
2781 }
2782
2783 /* insert new MOV instructions at the end */
2784 for (i = 0; i < VERT_RESULT_MAX; i++) {
2785 if (outputMap[i] >= 0) {
2786 /* MOV VAR[i], TEMP[tmp]; */
2787 st_src_reg src = st_src_reg(PROGRAM_TEMPORARY, outputMap[i], outputTypes[i]);
2788 st_dst_reg dst = st_dst_reg(type, WRITEMASK_XYZW, outputTypes[i]);
2789 dst.index = i;
2790 this->emit(NULL, TGSI_OPCODE_MOV, dst, src);
2791 }
2792 }
2793 }
2794
2795 /**
2796 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
2797 * are read from the given src in this instruction
2798 */
2799 static int
2800 get_src_arg_mask(st_dst_reg dst, st_src_reg src)
2801 {
2802 int read_mask = 0, comp;
2803
2804 /* Now, given the src swizzle and the written channels, find which
2805 * components are actually read
2806 */
2807 for (comp = 0; comp < 4; ++comp) {
2808 const unsigned coord = GET_SWZ(src.swizzle, comp);
2809 ASSERT(coord < 4);
2810 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W)
2811 read_mask |= 1 << coord;
2812 }
2813
2814 return read_mask;
2815 }
2816
2817 /**
2818 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
2819 * instruction is the first instruction to write to register T0. There are
2820 * several lowering passes done in GLSL IR (e.g. branches and
2821 * relative addressing) that create a large number of conditional assignments
2822 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
2823 *
2824 * Here is why this conversion is safe:
2825 * CMP T0, T1 T2 T0 can be expanded to:
2826 * if (T1 < 0.0)
2827 * MOV T0, T2;
2828 * else
2829 * MOV T0, T0;
2830 *
2831 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
2832 * as the original program. If (T1 < 0.0) evaluates to false, executing
2833 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
2834 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
2835 * because any instruction that was going to read from T0 after this was going
2836 * to read a garbage value anyway.
2837 */
2838 void
2839 glsl_to_tgsi_visitor::simplify_cmp(void)
2840 {
2841 unsigned tempWrites[MAX_PROGRAM_TEMPS];
2842 unsigned outputWrites[MAX_PROGRAM_OUTPUTS];
2843
2844 memset(tempWrites, 0, sizeof(tempWrites));
2845 memset(outputWrites, 0, sizeof(outputWrites));
2846
2847 foreach_iter(exec_list_iterator, iter, this->instructions) {
2848 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2849 unsigned prevWriteMask = 0;
2850
2851 /* Give up if we encounter relative addressing or flow control. */
2852 if (inst->dst.reladdr ||
2853 tgsi_get_opcode_info(inst->op)->is_branch ||
2854 inst->op == TGSI_OPCODE_BGNSUB ||
2855 inst->op == TGSI_OPCODE_CONT ||
2856 inst->op == TGSI_OPCODE_END ||
2857 inst->op == TGSI_OPCODE_ENDSUB ||
2858 inst->op == TGSI_OPCODE_RET) {
2859 return;
2860 }
2861
2862 if (inst->dst.file == PROGRAM_OUTPUT) {
2863 assert(inst->dst.index < MAX_PROGRAM_OUTPUTS);
2864 prevWriteMask = outputWrites[inst->dst.index];
2865 outputWrites[inst->dst.index] |= inst->dst.writemask;
2866 } else if (inst->dst.file == PROGRAM_TEMPORARY) {
2867 assert(inst->dst.index < MAX_PROGRAM_TEMPS);
2868 prevWriteMask = tempWrites[inst->dst.index];
2869 tempWrites[inst->dst.index] |= inst->dst.writemask;
2870 }
2871
2872 /* For a CMP to be considered a conditional write, the destination
2873 * register and source register two must be the same. */
2874 if (inst->op == TGSI_OPCODE_CMP
2875 && !(inst->dst.writemask & prevWriteMask)
2876 && inst->src[2].file == inst->dst.file
2877 && inst->src[2].index == inst->dst.index
2878 && inst->dst.writemask == get_src_arg_mask(inst->dst, inst->src[2])) {
2879
2880 inst->op = TGSI_OPCODE_MOV;
2881 inst->src[0] = inst->src[1];
2882 }
2883 }
2884 }
2885
2886 /* Replaces all references to a temporary register index with another index. */
2887 void
2888 glsl_to_tgsi_visitor::rename_temp_register(int index, int new_index)
2889 {
2890 foreach_iter(exec_list_iterator, iter, this->instructions) {
2891 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2892 unsigned j;
2893
2894 for (j=0; j < num_inst_src_regs(inst->op); j++) {
2895 if (inst->src[j].file == PROGRAM_TEMPORARY &&
2896 inst->src[j].index == index) {
2897 inst->src[j].index = new_index;
2898 }
2899 }
2900
2901 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
2902 inst->dst.index = new_index;
2903 }
2904 }
2905 }
2906
2907 int
2908 glsl_to_tgsi_visitor::get_first_temp_read(int index)
2909 {
2910 int depth = 0; /* loop depth */
2911 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
2912 unsigned i = 0, j;
2913
2914 foreach_iter(exec_list_iterator, iter, this->instructions) {
2915 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2916
2917 for (j=0; j < num_inst_src_regs(inst->op); j++) {
2918 if (inst->src[j].file == PROGRAM_TEMPORARY &&
2919 inst->src[j].index == index) {
2920 return (depth == 0) ? i : loop_start;
2921 }
2922 }
2923
2924 if (inst->op == TGSI_OPCODE_BGNLOOP) {
2925 if(depth++ == 0)
2926 loop_start = i;
2927 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
2928 if (--depth == 0)
2929 loop_start = -1;
2930 }
2931 assert(depth >= 0);
2932
2933 i++;
2934 }
2935
2936 return -1;
2937 }
2938
2939 int
2940 glsl_to_tgsi_visitor::get_first_temp_write(int index)
2941 {
2942 int depth = 0; /* loop depth */
2943 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
2944 int i = 0;
2945
2946 foreach_iter(exec_list_iterator, iter, this->instructions) {
2947 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2948
2949 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
2950 return (depth == 0) ? i : loop_start;
2951 }
2952
2953 if (inst->op == TGSI_OPCODE_BGNLOOP) {
2954 if(depth++ == 0)
2955 loop_start = i;
2956 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
2957 if (--depth == 0)
2958 loop_start = -1;
2959 }
2960 assert(depth >= 0);
2961
2962 i++;
2963 }
2964
2965 return -1;
2966 }
2967
2968 int
2969 glsl_to_tgsi_visitor::get_last_temp_read(int index)
2970 {
2971 int depth = 0; /* loop depth */
2972 int last = -1; /* index of last instruction that reads the temporary */
2973 unsigned i = 0, j;
2974
2975 foreach_iter(exec_list_iterator, iter, this->instructions) {
2976 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2977
2978 for (j=0; j < num_inst_src_regs(inst->op); j++) {
2979 if (inst->src[j].file == PROGRAM_TEMPORARY &&
2980 inst->src[j].index == index) {
2981 last = (depth == 0) ? i : -2;
2982 }
2983 }
2984
2985 if (inst->op == TGSI_OPCODE_BGNLOOP)
2986 depth++;
2987 else if (inst->op == TGSI_OPCODE_ENDLOOP)
2988 if (--depth == 0 && last == -2)
2989 last = i;
2990 assert(depth >= 0);
2991
2992 i++;
2993 }
2994
2995 assert(last >= -1);
2996 return last;
2997 }
2998
2999 int
3000 glsl_to_tgsi_visitor::get_last_temp_write(int index)
3001 {
3002 int depth = 0; /* loop depth */
3003 int last = -1; /* index of last instruction that writes to the temporary */
3004 int i = 0;
3005
3006 foreach_iter(exec_list_iterator, iter, this->instructions) {
3007 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3008
3009 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index)
3010 last = (depth == 0) ? i : -2;
3011
3012 if (inst->op == TGSI_OPCODE_BGNLOOP)
3013 depth++;
3014 else if (inst->op == TGSI_OPCODE_ENDLOOP)
3015 if (--depth == 0 && last == -2)
3016 last = i;
3017 assert(depth >= 0);
3018
3019 i++;
3020 }
3021
3022 assert(last >= -1);
3023 return last;
3024 }
3025
3026 /*
3027 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
3028 * channels for copy propagation and updates following instructions to
3029 * use the original versions.
3030 *
3031 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3032 * will occur. As an example, a TXP production before this pass:
3033 *
3034 * 0: MOV TEMP[1], INPUT[4].xyyy;
3035 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3036 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
3037 *
3038 * and after:
3039 *
3040 * 0: MOV TEMP[1], INPUT[4].xyyy;
3041 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3042 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3043 *
3044 * which allows for dead code elimination on TEMP[1]'s writes.
3045 */
3046 void
3047 glsl_to_tgsi_visitor::copy_propagate(void)
3048 {
3049 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx,
3050 glsl_to_tgsi_instruction *,
3051 this->next_temp * 4);
3052 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
3053 int level = 0;
3054
3055 foreach_iter(exec_list_iterator, iter, this->instructions) {
3056 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3057
3058 assert(inst->dst.file != PROGRAM_TEMPORARY
3059 || inst->dst.index < this->next_temp);
3060
3061 /* First, do any copy propagation possible into the src regs. */
3062 for (int r = 0; r < 3; r++) {
3063 glsl_to_tgsi_instruction *first = NULL;
3064 bool good = true;
3065 int acp_base = inst->src[r].index * 4;
3066
3067 if (inst->src[r].file != PROGRAM_TEMPORARY ||
3068 inst->src[r].reladdr)
3069 continue;
3070
3071 /* See if we can find entries in the ACP consisting of MOVs
3072 * from the same src register for all the swizzled channels
3073 * of this src register reference.
3074 */
3075 for (int i = 0; i < 4; i++) {
3076 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3077 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan];
3078
3079 if (!copy_chan) {
3080 good = false;
3081 break;
3082 }
3083
3084 assert(acp_level[acp_base + src_chan] <= level);
3085
3086 if (!first) {
3087 first = copy_chan;
3088 } else {
3089 if (first->src[0].file != copy_chan->src[0].file ||
3090 first->src[0].index != copy_chan->src[0].index) {
3091 good = false;
3092 break;
3093 }
3094 }
3095 }
3096
3097 if (good) {
3098 /* We've now validated that we can copy-propagate to
3099 * replace this src register reference. Do it.
3100 */
3101 inst->src[r].file = first->src[0].file;
3102 inst->src[r].index = first->src[0].index;
3103
3104 int swizzle = 0;
3105 for (int i = 0; i < 4; i++) {
3106 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3107 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan];
3108 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) <<
3109 (3 * i));
3110 }
3111 inst->src[r].swizzle = swizzle;
3112 }
3113 }
3114
3115 switch (inst->op) {
3116 case TGSI_OPCODE_BGNLOOP:
3117 case TGSI_OPCODE_ENDLOOP:
3118 /* End of a basic block, clear the ACP entirely. */
3119 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3120 break;
3121
3122 case TGSI_OPCODE_IF:
3123 ++level;
3124 break;
3125
3126 case TGSI_OPCODE_ENDIF:
3127 case TGSI_OPCODE_ELSE:
3128 /* Clear all channels written inside the block from the ACP, but
3129 * leaving those that were not touched.
3130 */
3131 for (int r = 0; r < this->next_temp; r++) {
3132 for (int c = 0; c < 4; c++) {
3133 if (!acp[4 * r + c])
3134 continue;
3135
3136 if (acp_level[4 * r + c] >= level)
3137 acp[4 * r + c] = NULL;
3138 }
3139 }
3140 if (inst->op == TGSI_OPCODE_ENDIF)
3141 --level;
3142 break;
3143
3144 default:
3145 /* Continuing the block, clear any written channels from
3146 * the ACP.
3147 */
3148 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) {
3149 /* Any temporary might be written, so no copy propagation
3150 * across this instruction.
3151 */
3152 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3153 } else if (inst->dst.file == PROGRAM_OUTPUT &&
3154 inst->dst.reladdr) {
3155 /* Any output might be written, so no copy propagation
3156 * from outputs across this instruction.
3157 */
3158 for (int r = 0; r < this->next_temp; r++) {
3159 for (int c = 0; c < 4; c++) {
3160 if (!acp[4 * r + c])
3161 continue;
3162
3163 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
3164 acp[4 * r + c] = NULL;
3165 }
3166 }
3167 } else if (inst->dst.file == PROGRAM_TEMPORARY ||
3168 inst->dst.file == PROGRAM_OUTPUT) {
3169 /* Clear where it's used as dst. */
3170 if (inst->dst.file == PROGRAM_TEMPORARY) {
3171 for (int c = 0; c < 4; c++) {
3172 if (inst->dst.writemask & (1 << c)) {
3173 acp[4 * inst->dst.index + c] = NULL;
3174 }
3175 }
3176 }
3177
3178 /* Clear where it's used as src. */
3179 for (int r = 0; r < this->next_temp; r++) {
3180 for (int c = 0; c < 4; c++) {
3181 if (!acp[4 * r + c])
3182 continue;
3183
3184 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
3185
3186 if (acp[4 * r + c]->src[0].file == inst->dst.file &&
3187 acp[4 * r + c]->src[0].index == inst->dst.index &&
3188 inst->dst.writemask & (1 << src_chan))
3189 {
3190 acp[4 * r + c] = NULL;
3191 }
3192 }
3193 }
3194 }
3195 break;
3196 }
3197
3198 /* If this is a copy, add it to the ACP. */
3199 if (inst->op == TGSI_OPCODE_MOV &&
3200 inst->dst.file == PROGRAM_TEMPORARY &&
3201 !inst->dst.reladdr &&
3202 !inst->saturate &&
3203 !inst->src[0].reladdr &&
3204 !inst->src[0].negate) {
3205 for (int i = 0; i < 4; i++) {
3206 if (inst->dst.writemask & (1 << i)) {
3207 acp[4 * inst->dst.index + i] = inst;
3208 acp_level[4 * inst->dst.index + i] = level;
3209 }
3210 }
3211 }
3212 }
3213
3214 ralloc_free(acp_level);
3215 ralloc_free(acp);
3216 }
3217
3218 /*
3219 * Tracks available PROGRAM_TEMPORARY registers for dead code elimination.
3220 *
3221 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3222 * will occur. As an example, a TXP production after copy propagation but
3223 * before this pass:
3224 *
3225 * 0: MOV TEMP[1], INPUT[4].xyyy;
3226 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3227 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3228 *
3229 * and after this pass:
3230 *
3231 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3232 *
3233 * FIXME: assumes that all functions are inlined (no support for BGNSUB/ENDSUB)
3234 * FIXME: doesn't eliminate all dead code inside of loops; it steps around them
3235 */
3236 void
3237 glsl_to_tgsi_visitor::eliminate_dead_code(void)
3238 {
3239 int i;
3240
3241 for (i=0; i < this->next_temp; i++) {
3242 int last_read = get_last_temp_read(i);
3243 int j = 0;
3244
3245 foreach_iter(exec_list_iterator, iter, this->instructions) {
3246 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3247
3248 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == i &&
3249 j > last_read)
3250 {
3251 iter.remove();
3252 delete inst;
3253 }
3254
3255 j++;
3256 }
3257 }
3258 }
3259
3260 /* Merges temporary registers together where possible to reduce the number of
3261 * registers needed to run a program.
3262 *
3263 * Produces optimal code only after copy propagation and dead code elimination
3264 * have been run. */
3265 void
3266 glsl_to_tgsi_visitor::merge_registers(void)
3267 {
3268 int *last_reads = rzalloc_array(mem_ctx, int, this->next_temp);
3269 int *first_writes = rzalloc_array(mem_ctx, int, this->next_temp);
3270 int i, j;
3271
3272 /* Read the indices of the last read and first write to each temp register
3273 * into an array so that we don't have to traverse the instruction list as
3274 * much. */
3275 for (i=0; i < this->next_temp; i++) {
3276 last_reads[i] = get_last_temp_read(i);
3277 first_writes[i] = get_first_temp_write(i);
3278 }
3279
3280 /* Start looking for registers with non-overlapping usages that can be
3281 * merged together. */
3282 for (i=0; i < this->next_temp; i++) {
3283 /* Don't touch unused registers. */
3284 if (last_reads[i] < 0 || first_writes[i] < 0) continue;
3285
3286 for (j=0; j < this->next_temp; j++) {
3287 /* Don't touch unused registers. */
3288 if (last_reads[j] < 0 || first_writes[j] < 0) continue;
3289
3290 /* We can merge the two registers if the first write to j is after or
3291 * in the same instruction as the last read from i. Note that the
3292 * register at index i will always be used earlier or at the same time
3293 * as the register at index j. */
3294 if (first_writes[i] <= first_writes[j] &&
3295 last_reads[i] <= first_writes[j])
3296 {
3297 rename_temp_register(j, i); /* Replace all references to j with i.*/
3298
3299 /* Update the first_writes and last_reads arrays with the new
3300 * values for the merged register index, and mark the newly unused
3301 * register index as such. */
3302 last_reads[i] = last_reads[j];
3303 first_writes[j] = -1;
3304 last_reads[j] = -1;
3305 }
3306 }
3307 }
3308
3309 ralloc_free(last_reads);
3310 ralloc_free(first_writes);
3311 }
3312
3313 /* Reassign indices to temporary registers by reusing unused indices created
3314 * by optimization passes. */
3315 void
3316 glsl_to_tgsi_visitor::renumber_registers(void)
3317 {
3318 int i = 0;
3319 int new_index = 0;
3320
3321 for (i=0; i < this->next_temp; i++) {
3322 if (get_first_temp_read(i) < 0) continue;
3323 if (i != new_index)
3324 rename_temp_register(i, new_index);
3325 new_index++;
3326 }
3327
3328 this->next_temp = new_index;
3329 }
3330
3331 /* ------------------------- TGSI conversion stuff -------------------------- */
3332 struct label {
3333 unsigned branch_target;
3334 unsigned token;
3335 };
3336
3337 /**
3338 * Intermediate state used during shader translation.
3339 */
3340 struct st_translate {
3341 struct ureg_program *ureg;
3342
3343 struct ureg_dst temps[MAX_PROGRAM_TEMPS];
3344 struct ureg_src *constants;
3345 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS];
3346 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS];
3347 struct ureg_dst address[1];
3348 struct ureg_src samplers[PIPE_MAX_SAMPLERS];
3349 struct ureg_src systemValues[SYSTEM_VALUE_MAX];
3350
3351 /* Extra info for handling point size clamping in vertex shader */
3352 struct ureg_dst pointSizeResult; /**< Actual point size output register */
3353 struct ureg_src pointSizeConst; /**< Point size range constant register */
3354 GLint pointSizeOutIndex; /**< Temp point size output register */
3355 GLboolean prevInstWrotePointSize;
3356
3357 const GLuint *inputMapping;
3358 const GLuint *outputMapping;
3359
3360 /* For every instruction that contains a label (eg CALL), keep
3361 * details so that we can go back afterwards and emit the correct
3362 * tgsi instruction number for each label.
3363 */
3364 struct label *labels;
3365 unsigned labels_size;
3366 unsigned labels_count;
3367
3368 /* Keep a record of the tgsi instruction number that each mesa
3369 * instruction starts at, will be used to fix up labels after
3370 * translation.
3371 */
3372 unsigned *insn;
3373 unsigned insn_size;
3374 unsigned insn_count;
3375
3376 unsigned procType; /**< TGSI_PROCESSOR_VERTEX/FRAGMENT */
3377
3378 boolean error;
3379 };
3380
3381 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
3382 static unsigned mesa_sysval_to_semantic[SYSTEM_VALUE_MAX] = {
3383 TGSI_SEMANTIC_FACE,
3384 TGSI_SEMANTIC_INSTANCEID
3385 };
3386
3387 /**
3388 * Make note of a branch to a label in the TGSI code.
3389 * After we've emitted all instructions, we'll go over the list
3390 * of labels built here and patch the TGSI code with the actual
3391 * location of each label.
3392 */
3393 static unsigned *get_label( struct st_translate *t,
3394 unsigned branch_target )
3395 {
3396 unsigned i;
3397
3398 if (t->labels_count + 1 >= t->labels_size) {
3399 t->labels_size = 1 << (util_logbase2(t->labels_size) + 1);
3400 t->labels = (struct label *)realloc(t->labels,
3401 t->labels_size * sizeof t->labels[0]);
3402 if (t->labels == NULL) {
3403 static unsigned dummy;
3404 t->error = TRUE;
3405 return &dummy;
3406 }
3407 }
3408
3409 i = t->labels_count++;
3410 t->labels[i].branch_target = branch_target;
3411 return &t->labels[i].token;
3412 }
3413
3414 /**
3415 * Called prior to emitting the TGSI code for each Mesa instruction.
3416 * Allocate additional space for instructions if needed.
3417 * Update the insn[] array so the next Mesa instruction points to
3418 * the next TGSI instruction.
3419 */
3420 static void set_insn_start( struct st_translate *t,
3421 unsigned start )
3422 {
3423 if (t->insn_count + 1 >= t->insn_size) {
3424 t->insn_size = 1 << (util_logbase2(t->insn_size) + 1);
3425 t->insn = (unsigned *)realloc(t->insn, t->insn_size * sizeof t->insn[0]);
3426 if (t->insn == NULL) {
3427 t->error = TRUE;
3428 return;
3429 }
3430 }
3431
3432 t->insn[t->insn_count++] = start;
3433 }
3434
3435 /**
3436 * Map a Mesa dst register to a TGSI ureg_dst register.
3437 */
3438 static struct ureg_dst
3439 dst_register( struct st_translate *t,
3440 gl_register_file file,
3441 GLuint index )
3442 {
3443 switch( file ) {
3444 case PROGRAM_UNDEFINED:
3445 return ureg_dst_undef();
3446
3447 case PROGRAM_TEMPORARY:
3448 if (ureg_dst_is_undef(t->temps[index]))
3449 t->temps[index] = ureg_DECL_temporary( t->ureg );
3450
3451 return t->temps[index];
3452
3453 case PROGRAM_OUTPUT:
3454 if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ)
3455 t->prevInstWrotePointSize = GL_TRUE;
3456
3457 if (t->procType == TGSI_PROCESSOR_VERTEX)
3458 assert(index < VERT_RESULT_MAX);
3459 else if (t->procType == TGSI_PROCESSOR_FRAGMENT)
3460 assert(index < FRAG_RESULT_MAX);
3461 else
3462 assert(index < GEOM_RESULT_MAX);
3463
3464 assert(t->outputMapping[index] < Elements(t->outputs));
3465
3466 return t->outputs[t->outputMapping[index]];
3467
3468 case PROGRAM_ADDRESS:
3469 return t->address[index];
3470
3471 default:
3472 debug_assert( 0 );
3473 return ureg_dst_undef();
3474 }
3475 }
3476
3477 /**
3478 * Map a Mesa src register to a TGSI ureg_src register.
3479 */
3480 static struct ureg_src
3481 src_register( struct st_translate *t,
3482 gl_register_file file,
3483 GLuint index )
3484 {
3485 switch( file ) {
3486 case PROGRAM_UNDEFINED:
3487 return ureg_src_undef();
3488
3489 case PROGRAM_TEMPORARY:
3490 assert(index >= 0);
3491 assert(index < Elements(t->temps));
3492 if (ureg_dst_is_undef(t->temps[index]))
3493 t->temps[index] = ureg_DECL_temporary( t->ureg );
3494 return ureg_src(t->temps[index]);
3495
3496 case PROGRAM_NAMED_PARAM:
3497 case PROGRAM_ENV_PARAM:
3498 case PROGRAM_LOCAL_PARAM:
3499 case PROGRAM_UNIFORM:
3500 assert(index >= 0);
3501 return t->constants[index];
3502 case PROGRAM_STATE_VAR:
3503 case PROGRAM_CONSTANT: /* ie, immediate */
3504 if (index < 0)
3505 return ureg_DECL_constant( t->ureg, 0 );
3506 else
3507 return t->constants[index];
3508
3509 case PROGRAM_INPUT:
3510 assert(t->inputMapping[index] < Elements(t->inputs));
3511 return t->inputs[t->inputMapping[index]];
3512
3513 case PROGRAM_OUTPUT:
3514 assert(t->outputMapping[index] < Elements(t->outputs));
3515 return ureg_src(t->outputs[t->outputMapping[index]]); /* not needed? */
3516
3517 case PROGRAM_ADDRESS:
3518 return ureg_src(t->address[index]);
3519
3520 case PROGRAM_SYSTEM_VALUE:
3521 assert(index < Elements(t->systemValues));
3522 return t->systemValues[index];
3523
3524 default:
3525 debug_assert( 0 );
3526 return ureg_src_undef();
3527 }
3528 }
3529
3530 /**
3531 * Create a TGSI ureg_dst register from an st_dst_reg.
3532 */
3533 static struct ureg_dst
3534 translate_dst( struct st_translate *t,
3535 const st_dst_reg *dst_reg,
3536 boolean saturate )
3537 {
3538 struct ureg_dst dst = dst_register( t,
3539 dst_reg->file,
3540 dst_reg->index );
3541
3542 dst = ureg_writemask( dst,
3543 dst_reg->writemask );
3544
3545 if (saturate)
3546 dst = ureg_saturate( dst );
3547
3548 if (dst_reg->reladdr != NULL)
3549 dst = ureg_dst_indirect( dst, ureg_src(t->address[0]) );
3550
3551 return dst;
3552 }
3553
3554 /**
3555 * Create a TGSI ureg_src register from an st_src_reg.
3556 */
3557 static struct ureg_src
3558 translate_src( struct st_translate *t,
3559 const st_src_reg *src_reg )
3560 {
3561 struct ureg_src src = src_register( t, src_reg->file, src_reg->index );
3562
3563 src = ureg_swizzle( src,
3564 GET_SWZ( src_reg->swizzle, 0 ) & 0x3,
3565 GET_SWZ( src_reg->swizzle, 1 ) & 0x3,
3566 GET_SWZ( src_reg->swizzle, 2 ) & 0x3,
3567 GET_SWZ( src_reg->swizzle, 3 ) & 0x3);
3568
3569 if ((src_reg->negate & 0xf) == NEGATE_XYZW)
3570 src = ureg_negate(src);
3571
3572 if (src_reg->reladdr != NULL) {
3573 /* Normally ureg_src_indirect() would be used here, but a stupid compiler
3574 * bug in g++ makes ureg_src_indirect (an inline C function) erroneously
3575 * set the bit for src.Negate. So we have to do the operation manually
3576 * here to work around the compiler's problems. */
3577 /*src = ureg_src_indirect(src, ureg_src(t->address[0]));*/
3578 struct ureg_src addr = ureg_src(t->address[0]);
3579 src.Indirect = 1;
3580 src.IndirectFile = addr.File;
3581 src.IndirectIndex = addr.Index;
3582 src.IndirectSwizzle = addr.SwizzleX;
3583
3584 if (src_reg->file != PROGRAM_INPUT &&
3585 src_reg->file != PROGRAM_OUTPUT) {
3586 /* If src_reg->index was negative, it was set to zero in
3587 * src_register(). Reassign it now. But don't do this
3588 * for input/output regs since they get remapped while
3589 * const buffers don't.
3590 */
3591 src.Index = src_reg->index;
3592 }
3593 }
3594
3595 return src;
3596 }
3597
3598 static void
3599 compile_tgsi_instruction(struct st_translate *t,
3600 const struct glsl_to_tgsi_instruction *inst)
3601 {
3602 struct ureg_program *ureg = t->ureg;
3603 GLuint i;
3604 struct ureg_dst dst[1];
3605 struct ureg_src src[4];
3606 unsigned num_dst;
3607 unsigned num_src;
3608
3609 num_dst = num_inst_dst_regs( inst->op );
3610 num_src = num_inst_src_regs( inst->op );
3611
3612 if (num_dst)
3613 dst[0] = translate_dst( t,
3614 &inst->dst,
3615 inst->saturate);
3616
3617 for (i = 0; i < num_src; i++)
3618 src[i] = translate_src( t, &inst->src[i] );
3619
3620 switch( inst->op ) {
3621 case TGSI_OPCODE_BGNLOOP:
3622 case TGSI_OPCODE_CAL:
3623 case TGSI_OPCODE_ELSE:
3624 case TGSI_OPCODE_ENDLOOP:
3625 case TGSI_OPCODE_IF:
3626 debug_assert(num_dst == 0);
3627 ureg_label_insn( ureg,
3628 inst->op,
3629 src, num_src,
3630 get_label( t,
3631 inst->op == TGSI_OPCODE_CAL ? inst->function->sig_id : 0 ));
3632 return;
3633
3634 case TGSI_OPCODE_TEX:
3635 case TGSI_OPCODE_TXB:
3636 case TGSI_OPCODE_TXD:
3637 case TGSI_OPCODE_TXL:
3638 case TGSI_OPCODE_TXP:
3639 src[num_src++] = t->samplers[inst->sampler];
3640 ureg_tex_insn( ureg,
3641 inst->op,
3642 dst, num_dst,
3643 translate_texture_target( inst->tex_target,
3644 inst->tex_shadow ),
3645 src, num_src );
3646 return;
3647
3648 case TGSI_OPCODE_SCS:
3649 dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XY );
3650 ureg_insn( ureg,
3651 inst->op,
3652 dst, num_dst,
3653 src, num_src );
3654 break;
3655
3656 default:
3657 ureg_insn( ureg,
3658 inst->op,
3659 dst, num_dst,
3660 src, num_src );
3661 break;
3662 }
3663 }
3664
3665 /**
3666 * Emit the TGSI instructions to adjust the WPOS pixel center convention
3667 * Basically, add (adjX, adjY) to the fragment position.
3668 */
3669 static void
3670 emit_adjusted_wpos( struct st_translate *t,
3671 const struct gl_program *program,
3672 GLfloat adjX, GLfloat adjY)
3673 {
3674 struct ureg_program *ureg = t->ureg;
3675 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg);
3676 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
3677
3678 /* Note that we bias X and Y and pass Z and W through unchanged.
3679 * The shader might also use gl_FragCoord.w and .z.
3680 */
3681 ureg_ADD(ureg, wpos_temp, wpos_input,
3682 ureg_imm4f(ureg, adjX, adjY, 0.0f, 0.0f));
3683
3684 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
3685 }
3686
3687
3688 /**
3689 * Emit the TGSI instructions for inverting the WPOS y coordinate.
3690 * This code is unavoidable because it also depends on whether
3691 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
3692 */
3693 static void
3694 emit_wpos_inversion( struct st_translate *t,
3695 const struct gl_program *program,
3696 boolean invert)
3697 {
3698 struct ureg_program *ureg = t->ureg;
3699
3700 /* Fragment program uses fragment position input.
3701 * Need to replace instances of INPUT[WPOS] with temp T
3702 * where T = INPUT[WPOS] by y is inverted.
3703 */
3704 static const gl_state_index wposTransformState[STATE_LENGTH]
3705 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM,
3706 (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
3707
3708 /* XXX: note we are modifying the incoming shader here! Need to
3709 * do this before emitting the constant decls below, or this
3710 * will be missed:
3711 */
3712 unsigned wposTransConst = _mesa_add_state_reference(program->Parameters,
3713 wposTransformState);
3714
3715 struct ureg_src wpostrans = ureg_DECL_constant( ureg, wposTransConst );
3716 struct ureg_dst wpos_temp;
3717 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
3718
3719 /* MOV wpos_temp, input[wpos]
3720 */
3721 if (wpos_input.File == TGSI_FILE_TEMPORARY)
3722 wpos_temp = ureg_dst(wpos_input);
3723 else {
3724 wpos_temp = ureg_DECL_temporary( ureg );
3725 ureg_MOV( ureg, wpos_temp, wpos_input );
3726 }
3727
3728 if (invert) {
3729 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
3730 */
3731 ureg_MAD( ureg,
3732 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ),
3733 wpos_input,
3734 ureg_scalar(wpostrans, 0),
3735 ureg_scalar(wpostrans, 1));
3736 } else {
3737 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
3738 */
3739 ureg_MAD( ureg,
3740 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ),
3741 wpos_input,
3742 ureg_scalar(wpostrans, 2),
3743 ureg_scalar(wpostrans, 3));
3744 }
3745
3746 /* Use wpos_temp as position input from here on:
3747 */
3748 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
3749 }
3750
3751
3752 /**
3753 * Emit fragment position/ooordinate code.
3754 */
3755 static void
3756 emit_wpos(struct st_context *st,
3757 struct st_translate *t,
3758 const struct gl_program *program,
3759 struct ureg_program *ureg)
3760 {
3761 const struct gl_fragment_program *fp =
3762 (const struct gl_fragment_program *) program;
3763 struct pipe_screen *pscreen = st->pipe->screen;
3764 boolean invert = FALSE;
3765
3766 if (fp->OriginUpperLeft) {
3767 /* Fragment shader wants origin in upper-left */
3768 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) {
3769 /* the driver supports upper-left origin */
3770 }
3771 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) {
3772 /* the driver supports lower-left origin, need to invert Y */
3773 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
3774 invert = TRUE;
3775 }
3776 else
3777 assert(0);
3778 }
3779 else {
3780 /* Fragment shader wants origin in lower-left */
3781 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT))
3782 /* the driver supports lower-left origin */
3783 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
3784 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT))
3785 /* the driver supports upper-left origin, need to invert Y */
3786 invert = TRUE;
3787 else
3788 assert(0);
3789 }
3790
3791 if (fp->PixelCenterInteger) {
3792 /* Fragment shader wants pixel center integer */
3793 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER))
3794 /* the driver supports pixel center integer */
3795 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
3796 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER))
3797 /* the driver supports pixel center half integer, need to bias X,Y */
3798 emit_adjusted_wpos(t, program, 0.5f, invert ? 0.5f : -0.5f);
3799 else
3800 assert(0);
3801 }
3802 else {
3803 /* Fragment shader wants pixel center half integer */
3804 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
3805 /* the driver supports pixel center half integer */
3806 }
3807 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) {
3808 /* the driver supports pixel center integer, need to bias X,Y */
3809 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
3810 emit_adjusted_wpos(t, program, 0.5f, invert ? -0.5f : 0.5f);
3811 }
3812 else
3813 assert(0);
3814 }
3815
3816 /* we invert after adjustment so that we avoid the MOV to temporary,
3817 * and reuse the adjustment ADD instead */
3818 emit_wpos_inversion(t, program, invert);
3819 }
3820
3821 /**
3822 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
3823 * TGSI uses +1 for front, -1 for back.
3824 * This function converts the TGSI value to the GL value. Simply clamping/
3825 * saturating the value to [0,1] does the job.
3826 */
3827 static void
3828 emit_face_var(struct st_translate *t)
3829 {
3830 struct ureg_program *ureg = t->ureg;
3831 struct ureg_dst face_temp = ureg_DECL_temporary(ureg);
3832 struct ureg_src face_input = t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]];
3833
3834 /* MOV_SAT face_temp, input[face] */
3835 face_temp = ureg_saturate(face_temp);
3836 ureg_MOV(ureg, face_temp, face_input);
3837
3838 /* Use face_temp as face input from here on: */
3839 t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]] = ureg_src(face_temp);
3840 }
3841
3842 static void
3843 emit_edgeflags(struct st_translate *t)
3844 {
3845 struct ureg_program *ureg = t->ureg;
3846 struct ureg_dst edge_dst = t->outputs[t->outputMapping[VERT_RESULT_EDGE]];
3847 struct ureg_src edge_src = t->inputs[t->inputMapping[VERT_ATTRIB_EDGEFLAG]];
3848
3849 ureg_MOV(ureg, edge_dst, edge_src);
3850 }
3851
3852 /**
3853 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
3854 * \param program the program to translate
3855 * \param numInputs number of input registers used
3856 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
3857 * input indexes
3858 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
3859 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
3860 * each input
3861 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
3862 * \param numOutputs number of output registers used
3863 * \param outputMapping maps Mesa fragment program outputs to TGSI
3864 * generic outputs
3865 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
3866 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
3867 * each output
3868 *
3869 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
3870 */
3871 extern "C" enum pipe_error
3872 st_translate_program(
3873 struct gl_context *ctx,
3874 uint procType,
3875 struct ureg_program *ureg,
3876 glsl_to_tgsi_visitor *program,
3877 const struct gl_program *proginfo,
3878 GLuint numInputs,
3879 const GLuint inputMapping[],
3880 const ubyte inputSemanticName[],
3881 const ubyte inputSemanticIndex[],
3882 const GLuint interpMode[],
3883 GLuint numOutputs,
3884 const GLuint outputMapping[],
3885 const ubyte outputSemanticName[],
3886 const ubyte outputSemanticIndex[],
3887 boolean passthrough_edgeflags )
3888 {
3889 struct st_translate translate, *t;
3890 unsigned i;
3891 enum pipe_error ret = PIPE_OK;
3892
3893 assert(numInputs <= Elements(t->inputs));
3894 assert(numOutputs <= Elements(t->outputs));
3895
3896 t = &translate;
3897 memset(t, 0, sizeof *t);
3898
3899 t->procType = procType;
3900 t->inputMapping = inputMapping;
3901 t->outputMapping = outputMapping;
3902 t->ureg = ureg;
3903 t->pointSizeOutIndex = -1;
3904 t->prevInstWrotePointSize = GL_FALSE;
3905
3906 /*
3907 * Declare input attributes.
3908 */
3909 if (procType == TGSI_PROCESSOR_FRAGMENT) {
3910 for (i = 0; i < numInputs; i++) {
3911 t->inputs[i] = ureg_DECL_fs_input(ureg,
3912 inputSemanticName[i],
3913 inputSemanticIndex[i],
3914 interpMode[i]);
3915 }
3916
3917 if (proginfo->InputsRead & FRAG_BIT_WPOS) {
3918 /* Must do this after setting up t->inputs, and before
3919 * emitting constant references, below:
3920 */
3921 emit_wpos(st_context(ctx), t, proginfo, ureg);
3922 }
3923
3924 if (proginfo->InputsRead & FRAG_BIT_FACE)
3925 emit_face_var(t);
3926
3927 /*
3928 * Declare output attributes.
3929 */
3930 for (i = 0; i < numOutputs; i++) {
3931 switch (outputSemanticName[i]) {
3932 case TGSI_SEMANTIC_POSITION:
3933 t->outputs[i] = ureg_DECL_output( ureg,
3934 TGSI_SEMANTIC_POSITION, /* Z / Depth */
3935 outputSemanticIndex[i] );
3936
3937 t->outputs[i] = ureg_writemask( t->outputs[i],
3938 TGSI_WRITEMASK_Z );
3939 break;
3940 case TGSI_SEMANTIC_STENCIL:
3941 t->outputs[i] = ureg_DECL_output( ureg,
3942 TGSI_SEMANTIC_STENCIL, /* Stencil */
3943 outputSemanticIndex[i] );
3944 t->outputs[i] = ureg_writemask( t->outputs[i],
3945 TGSI_WRITEMASK_Y );
3946 break;
3947 case TGSI_SEMANTIC_COLOR:
3948 t->outputs[i] = ureg_DECL_output( ureg,
3949 TGSI_SEMANTIC_COLOR,
3950 outputSemanticIndex[i] );
3951 break;
3952 default:
3953 debug_assert(0);
3954 return PIPE_ERROR_BAD_INPUT;
3955 }
3956 }
3957 }
3958 else if (procType == TGSI_PROCESSOR_GEOMETRY) {
3959 for (i = 0; i < numInputs; i++) {
3960 t->inputs[i] = ureg_DECL_gs_input(ureg,
3961 i,
3962 inputSemanticName[i],
3963 inputSemanticIndex[i]);
3964 }
3965
3966 for (i = 0; i < numOutputs; i++) {
3967 t->outputs[i] = ureg_DECL_output( ureg,
3968 outputSemanticName[i],
3969 outputSemanticIndex[i] );
3970 }
3971 }
3972 else {
3973 assert(procType == TGSI_PROCESSOR_VERTEX);
3974
3975 for (i = 0; i < numInputs; i++) {
3976 t->inputs[i] = ureg_DECL_vs_input(ureg, i);
3977 }
3978
3979 for (i = 0; i < numOutputs; i++) {
3980 t->outputs[i] = ureg_DECL_output( ureg,
3981 outputSemanticName[i],
3982 outputSemanticIndex[i] );
3983 if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) {
3984 /* Writing to the point size result register requires special
3985 * handling to implement clamping.
3986 */
3987 static const gl_state_index pointSizeClampState[STATE_LENGTH]
3988 = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
3989 /* XXX: note we are modifying the incoming shader here! Need to
3990 * do this before emitting the constant decls below, or this
3991 * will be missed.
3992 */
3993 unsigned pointSizeClampConst =
3994 _mesa_add_state_reference(proginfo->Parameters,
3995 pointSizeClampState);
3996 struct ureg_dst psizregtemp = ureg_DECL_temporary( ureg );
3997 t->pointSizeConst = ureg_DECL_constant( ureg, pointSizeClampConst );
3998 t->pointSizeResult = t->outputs[i];
3999 t->pointSizeOutIndex = i;
4000 t->outputs[i] = psizregtemp;
4001 }
4002 }
4003 if (passthrough_edgeflags)
4004 emit_edgeflags(t);
4005 }
4006
4007 /* Declare address register.
4008 */
4009 if (program->num_address_regs > 0) {
4010 debug_assert( program->num_address_regs == 1 );
4011 t->address[0] = ureg_DECL_address( ureg );
4012 }
4013
4014 /* Declare misc input registers
4015 */
4016 {
4017 GLbitfield sysInputs = proginfo->SystemValuesRead;
4018 unsigned numSys = 0;
4019 for (i = 0; sysInputs; i++) {
4020 if (sysInputs & (1 << i)) {
4021 unsigned semName = mesa_sysval_to_semantic[i];
4022 t->systemValues[i] = ureg_DECL_system_value(ureg, numSys, semName, 0);
4023 numSys++;
4024 sysInputs &= ~(1 << i);
4025 }
4026 }
4027 }
4028
4029 if (program->indirect_addr_temps) {
4030 /* If temps are accessed with indirect addressing, declare temporaries
4031 * in sequential order. Else, we declare them on demand elsewhere.
4032 * (Note: the number of temporaries is equal to program->next_temp)
4033 */
4034 for (i = 0; i < (unsigned)program->next_temp; i++) {
4035 /* XXX use TGSI_FILE_TEMPORARY_ARRAY when it's supported by ureg */
4036 t->temps[i] = ureg_DECL_temporary( t->ureg );
4037 }
4038 }
4039
4040 /* Emit constants and immediates. Mesa uses a single index space
4041 * for these, so we put all the translated regs in t->constants.
4042 * XXX: this entire if block depends on proginfo->Parameters from Mesa IR
4043 */
4044 if (proginfo->Parameters) {
4045 t->constants = (struct ureg_src *)CALLOC( proginfo->Parameters->NumParameters * sizeof t->constants[0] );
4046 if (t->constants == NULL) {
4047 ret = PIPE_ERROR_OUT_OF_MEMORY;
4048 goto out;
4049 }
4050
4051 for (i = 0; i < proginfo->Parameters->NumParameters; i++) {
4052 switch (proginfo->Parameters->Parameters[i].Type) {
4053 case PROGRAM_ENV_PARAM:
4054 case PROGRAM_LOCAL_PARAM:
4055 case PROGRAM_STATE_VAR:
4056 case PROGRAM_NAMED_PARAM:
4057 case PROGRAM_UNIFORM:
4058 t->constants[i] = ureg_DECL_constant( ureg, i );
4059 break;
4060
4061 /* Emit immediates only when there's no indirect addressing of
4062 * the const buffer.
4063 * FIXME: Be smarter and recognize param arrays:
4064 * indirect addressing is only valid within the referenced
4065 * array.
4066 */
4067 case PROGRAM_CONSTANT:
4068 if (program->indirect_addr_consts)
4069 t->constants[i] = ureg_DECL_constant( ureg, i );
4070 else
4071 switch(proginfo->Parameters->Parameters[i].DataType)
4072 {
4073 case GL_FLOAT:
4074 case GL_FLOAT_VEC2:
4075 case GL_FLOAT_VEC3:
4076 case GL_FLOAT_VEC4:
4077 t->constants[i] = ureg_DECL_immediate(ureg, (float *)proginfo->Parameters->ParameterValues[i], 4);
4078 break;
4079 case GL_INT:
4080 case GL_INT_VEC2:
4081 case GL_INT_VEC3:
4082 case GL_INT_VEC4:
4083 t->constants[i] = ureg_DECL_immediate_int(ureg, (int *)proginfo->Parameters->ParameterValues[i], 4);
4084 break;
4085 case GL_UNSIGNED_INT:
4086 case GL_UNSIGNED_INT_VEC2:
4087 case GL_UNSIGNED_INT_VEC3:
4088 case GL_UNSIGNED_INT_VEC4:
4089 case GL_BOOL:
4090 case GL_BOOL_VEC2:
4091 case GL_BOOL_VEC3:
4092 case GL_BOOL_VEC4:
4093 t->constants[i] = ureg_DECL_immediate_uint(ureg, (unsigned *)proginfo->Parameters->ParameterValues[i], 4);
4094 break;
4095 default:
4096 assert(!"should not get here");
4097 }
4098 break;
4099 default:
4100 break;
4101 }
4102 }
4103 }
4104
4105 /* texture samplers */
4106 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
4107 if (program->samplers_used & (1 << i)) {
4108 t->samplers[i] = ureg_DECL_sampler( ureg, i );
4109 }
4110 }
4111
4112 /* Emit each instruction in turn:
4113 */
4114 foreach_iter(exec_list_iterator, iter, program->instructions) {
4115 set_insn_start( t, ureg_get_instruction_number( ureg ));
4116 compile_tgsi_instruction( t, (glsl_to_tgsi_instruction *)iter.get() );
4117
4118 if (t->prevInstWrotePointSize && proginfo->Id) {
4119 /* The previous instruction wrote to the (fake) vertex point size
4120 * result register. Now we need to clamp that value to the min/max
4121 * point size range, putting the result into the real point size
4122 * register.
4123 * Note that we can't do this easily at the end of program due to
4124 * possible early return.
4125 */
4126 set_insn_start( t, ureg_get_instruction_number( ureg ));
4127 ureg_MAX( t->ureg,
4128 ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X),
4129 ureg_src(t->outputs[t->pointSizeOutIndex]),
4130 ureg_swizzle(t->pointSizeConst, 1,1,1,1));
4131 ureg_MIN( t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X),
4132 ureg_src(t->outputs[t->pointSizeOutIndex]),
4133 ureg_swizzle(t->pointSizeConst, 2,2,2,2));
4134 }
4135 t->prevInstWrotePointSize = GL_FALSE;
4136 }
4137
4138 /* Fix up all emitted labels:
4139 */
4140 for (i = 0; i < t->labels_count; i++) {
4141 ureg_fixup_label( ureg,
4142 t->labels[i].token,
4143 t->insn[t->labels[i].branch_target] );
4144 }
4145
4146 out:
4147 FREE(t->insn);
4148 FREE(t->labels);
4149 FREE(t->constants);
4150
4151 if (t->error) {
4152 debug_printf("%s: translate error flag set\n", __FUNCTION__);
4153 }
4154
4155 return ret;
4156 }
4157 /* ----------------------------- End TGSI code ------------------------------ */
4158
4159 /**
4160 * Convert a shader's GLSL IR into a Mesa gl_program, although without
4161 * generating Mesa IR.
4162 */
4163 static struct gl_program *
4164 get_mesa_program(struct gl_context *ctx,
4165 struct gl_shader_program *shader_program,
4166 struct gl_shader *shader)
4167 {
4168 glsl_to_tgsi_visitor* v = new glsl_to_tgsi_visitor();
4169 struct gl_program *prog;
4170 GLenum target;
4171 const char *target_string;
4172 GLboolean progress;
4173 struct gl_shader_compiler_options *options =
4174 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)];
4175
4176 switch (shader->Type) {
4177 case GL_VERTEX_SHADER:
4178 target = GL_VERTEX_PROGRAM_ARB;
4179 target_string = "vertex";
4180 break;
4181 case GL_FRAGMENT_SHADER:
4182 target = GL_FRAGMENT_PROGRAM_ARB;
4183 target_string = "fragment";
4184 break;
4185 case GL_GEOMETRY_SHADER:
4186 target = GL_GEOMETRY_PROGRAM_NV;
4187 target_string = "geometry";
4188 break;
4189 default:
4190 assert(!"should not be reached");
4191 return NULL;
4192 }
4193
4194 validate_ir_tree(shader->ir);
4195
4196 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name);
4197 if (!prog)
4198 return NULL;
4199 prog->Parameters = _mesa_new_parameter_list();
4200 prog->Varying = _mesa_new_parameter_list();
4201 prog->Attributes = _mesa_new_parameter_list();
4202 v->ctx = ctx;
4203 v->prog = prog;
4204 v->shader_program = shader_program;
4205 v->options = options;
4206 v->glsl_version = ctx->Const.GLSLVersion;
4207
4208 add_uniforms_to_parameters_list(shader_program, shader, prog);
4209
4210 /* Emit intermediate IR for main(). */
4211 visit_exec_list(shader->ir, v);
4212
4213 /* Now emit bodies for any functions that were used. */
4214 do {
4215 progress = GL_FALSE;
4216
4217 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
4218 function_entry *entry = (function_entry *)iter.get();
4219
4220 if (!entry->bgn_inst) {
4221 v->current_function = entry;
4222
4223 entry->bgn_inst = v->emit(NULL, TGSI_OPCODE_BGNSUB);
4224 entry->bgn_inst->function = entry;
4225
4226 visit_exec_list(&entry->sig->body, v);
4227
4228 glsl_to_tgsi_instruction *last;
4229 last = (glsl_to_tgsi_instruction *)v->instructions.get_tail();
4230 if (last->op != TGSI_OPCODE_RET)
4231 v->emit(NULL, TGSI_OPCODE_RET);
4232
4233 glsl_to_tgsi_instruction *end;
4234 end = v->emit(NULL, TGSI_OPCODE_ENDSUB);
4235 end->function = entry;
4236
4237 progress = GL_TRUE;
4238 }
4239 }
4240 } while (progress);
4241
4242 #if 0
4243 /* Print out some information (for debugging purposes) used by the
4244 * optimization passes. */
4245 for (i=0; i < v->next_temp; i++) {
4246 int fr = v->get_first_temp_read(i);
4247 int fw = v->get_first_temp_write(i);
4248 int lr = v->get_last_temp_read(i);
4249 int lw = v->get_last_temp_write(i);
4250
4251 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, fr, fw, lr, lw);
4252 assert(fw <= fr);
4253 }
4254 #endif
4255
4256 /* Remove reads to output registers, and to varyings in vertex shaders. */
4257 v->remove_output_reads(PROGRAM_OUTPUT);
4258 if (target == GL_VERTEX_PROGRAM_ARB)
4259 v->remove_output_reads(PROGRAM_VARYING);
4260
4261 /* Perform the simplify_cmp optimization, which is required by r300g. */
4262 v->simplify_cmp();
4263
4264 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor.
4265 * FIXME: These passes to optimize temporary registers don't work when there
4266 * is indirect addressing of the temporary register space. We need proper
4267 * array support so that we don't have to give up these passes in every
4268 * shader that uses arrays.
4269 */
4270 if (!v->indirect_addr_temps) {
4271 v->copy_propagate();
4272 v->eliminate_dead_code();
4273 v->merge_registers();
4274 v->renumber_registers();
4275 }
4276
4277 /* Write the END instruction. */
4278 v->emit(NULL, TGSI_OPCODE_END);
4279
4280 if (ctx->Shader.Flags & GLSL_DUMP) {
4281 printf("\n");
4282 printf("GLSL IR for linked %s program %d:\n", target_string,
4283 shader_program->Name);
4284 _mesa_print_ir(shader->ir, NULL);
4285 printf("\n");
4286 printf("\n");
4287 }
4288
4289 prog->Instructions = NULL;
4290 prog->NumInstructions = 0;
4291
4292 do_set_program_inouts(shader->ir, prog);
4293 count_resources(v, prog);
4294
4295 check_resources(ctx, shader_program, v, prog);
4296
4297 _mesa_reference_program(ctx, &shader->Program, prog);
4298
4299 struct st_vertex_program *stvp;
4300 struct st_fragment_program *stfp;
4301 struct st_geometry_program *stgp;
4302
4303 switch (shader->Type) {
4304 case GL_VERTEX_SHADER:
4305 stvp = (struct st_vertex_program *)prog;
4306 stvp->glsl_to_tgsi = v;
4307 break;
4308 case GL_FRAGMENT_SHADER:
4309 stfp = (struct st_fragment_program *)prog;
4310 stfp->glsl_to_tgsi = v;
4311 break;
4312 case GL_GEOMETRY_SHADER:
4313 stgp = (struct st_geometry_program *)prog;
4314 stgp->glsl_to_tgsi = v;
4315 break;
4316 default:
4317 assert(!"should not be reached");
4318 return NULL;
4319 }
4320
4321 return prog;
4322 }
4323
4324 extern "C" {
4325
4326 struct gl_shader *
4327 st_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
4328 {
4329 struct gl_shader *shader;
4330 assert(type == GL_FRAGMENT_SHADER || type == GL_VERTEX_SHADER ||
4331 type == GL_GEOMETRY_SHADER_ARB);
4332 shader = rzalloc(NULL, struct gl_shader);
4333 if (shader) {
4334 shader->Type = type;
4335 shader->Name = name;
4336 _mesa_init_shader(ctx, shader);
4337 }
4338 return shader;
4339 }
4340
4341 struct gl_shader_program *
4342 st_new_shader_program(struct gl_context *ctx, GLuint name)
4343 {
4344 struct gl_shader_program *shProg;
4345 shProg = rzalloc(NULL, struct gl_shader_program);
4346 if (shProg) {
4347 shProg->Name = name;
4348 _mesa_init_shader_program(ctx, shProg);
4349 }
4350 return shProg;
4351 }
4352
4353 /**
4354 * Link a shader.
4355 * Called via ctx->Driver.LinkShader()
4356 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
4357 * with code lowering and other optimizations.
4358 */
4359 GLboolean
4360 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4361 {
4362 assert(prog->LinkStatus);
4363
4364 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4365 if (prog->_LinkedShaders[i] == NULL)
4366 continue;
4367
4368 bool progress;
4369 exec_list *ir = prog->_LinkedShaders[i]->ir;
4370 const struct gl_shader_compiler_options *options =
4371 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)];
4372
4373 do {
4374 progress = false;
4375
4376 /* Lowering */
4377 do_mat_op_to_vec(ir);
4378 lower_instructions(ir, (MOD_TO_FRACT | DIV_TO_MUL_RCP | EXP_TO_EXP2
4379 | LOG_TO_LOG2
4380 | ((options->EmitNoPow) ? POW_TO_EXP2 : 0)));
4381
4382 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress;
4383
4384 progress = do_common_optimization(ir, true, options->MaxUnrollIterations) || progress;
4385
4386 progress = lower_quadop_vector(ir, true) || progress;
4387
4388 if (options->EmitNoIfs) {
4389 progress = lower_discard(ir) || progress;
4390 progress = lower_if_to_cond_assign(ir) || progress;
4391 }
4392
4393 if (options->EmitNoNoise)
4394 progress = lower_noise(ir) || progress;
4395
4396 /* If there are forms of indirect addressing that the driver
4397 * cannot handle, perform the lowering pass.
4398 */
4399 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput
4400 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform)
4401 progress =
4402 lower_variable_index_to_cond_assign(ir,
4403 options->EmitNoIndirectInput,
4404 options->EmitNoIndirectOutput,
4405 options->EmitNoIndirectTemp,
4406 options->EmitNoIndirectUniform)
4407 || progress;
4408
4409 progress = do_vec_index_to_cond_assign(ir) || progress;
4410 } while (progress);
4411
4412 validate_ir_tree(ir);
4413 }
4414
4415 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4416 struct gl_program *linked_prog;
4417
4418 if (prog->_LinkedShaders[i] == NULL)
4419 continue;
4420
4421 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i]);
4422
4423 if (linked_prog) {
4424 bool ok = true;
4425
4426 switch (prog->_LinkedShaders[i]->Type) {
4427 case GL_VERTEX_SHADER:
4428 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
4429 (struct gl_vertex_program *)linked_prog);
4430 ok = ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
4431 linked_prog);
4432 break;
4433 case GL_FRAGMENT_SHADER:
4434 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
4435 (struct gl_fragment_program *)linked_prog);
4436 ok = ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
4437 linked_prog);
4438 break;
4439 case GL_GEOMETRY_SHADER:
4440 _mesa_reference_geomprog(ctx, &prog->GeometryProgram,
4441 (struct gl_geometry_program *)linked_prog);
4442 ok = ctx->Driver.ProgramStringNotify(ctx, GL_GEOMETRY_PROGRAM_NV,
4443 linked_prog);
4444 break;
4445 }
4446 if (!ok) {
4447 return GL_FALSE;
4448 }
4449 }
4450
4451 _mesa_reference_program(ctx, &linked_prog, NULL);
4452 }
4453
4454 return GL_TRUE;
4455 }
4456
4457
4458 /**
4459 * Link a GLSL shader program. Called via glLinkProgram().
4460 */
4461 void
4462 st_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4463 {
4464 unsigned int i;
4465
4466 _mesa_clear_shader_program_data(ctx, prog);
4467
4468 prog->LinkStatus = GL_TRUE;
4469
4470 for (i = 0; i < prog->NumShaders; i++) {
4471 if (!prog->Shaders[i]->CompileStatus) {
4472 fail_link(prog, "linking with uncompiled shader");
4473 prog->LinkStatus = GL_FALSE;
4474 }
4475 }
4476
4477 prog->Varying = _mesa_new_parameter_list();
4478 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
4479 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
4480 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, NULL);
4481
4482 if (prog->LinkStatus) {
4483 link_shaders(ctx, prog);
4484 }
4485
4486 if (prog->LinkStatus) {
4487 if (!ctx->Driver.LinkShader(ctx, prog)) {
4488 prog->LinkStatus = GL_FALSE;
4489 }
4490 }
4491
4492 set_uniform_initializers(ctx, prog);
4493
4494 if (ctx->Shader.Flags & GLSL_DUMP) {
4495 if (!prog->LinkStatus) {
4496 printf("GLSL shader program %d failed to link\n", prog->Name);
4497 }
4498
4499 if (prog->InfoLog && prog->InfoLog[0] != 0) {
4500 printf("GLSL shader program %d info log:\n", prog->Name);
4501 printf("%s\n", prog->InfoLog);
4502 }
4503 }
4504 }
4505
4506 } /* extern "C" */