glsl_to_tgsi: replace MAX_PROGRAM_TEMPS (256) with MAX_TEMPS (4096)
[mesa.git] / src / mesa / state_tracker / st_glsl_to_tgsi.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file glsl_to_tgsi.cpp
29 *
30 * Translate GLSL IR to TGSI.
31 */
32
33 #include <stdio.h>
34 #include "main/compiler.h"
35 #include "ir.h"
36 #include "ir_visitor.h"
37 #include "ir_print_visitor.h"
38 #include "ir_expression_flattening.h"
39 #include "glsl_types.h"
40 #include "glsl_parser_extras.h"
41 #include "../glsl/program.h"
42 #include "ir_optimization.h"
43 #include "ast.h"
44
45 extern "C" {
46 #include "main/mtypes.h"
47 #include "main/shaderapi.h"
48 #include "main/shaderobj.h"
49 #include "main/uniforms.h"
50 #include "program/hash_table.h"
51 #include "program/prog_instruction.h"
52 #include "program/prog_optimize.h"
53 #include "program/prog_print.h"
54 #include "program/program.h"
55 #include "program/prog_uniform.h"
56 #include "program/prog_parameter.h"
57 #include "program/sampler.h"
58
59 #include "pipe/p_compiler.h"
60 #include "pipe/p_context.h"
61 #include "pipe/p_screen.h"
62 #include "pipe/p_shader_tokens.h"
63 #include "pipe/p_state.h"
64 #include "util/u_math.h"
65 #include "tgsi/tgsi_ureg.h"
66 #include "tgsi/tgsi_info.h"
67 #include "st_context.h"
68 #include "st_program.h"
69 #include "st_glsl_to_tgsi.h"
70 #include "st_mesa_to_tgsi.h"
71 }
72
73 #define PROGRAM_ANY_CONST ((1 << PROGRAM_LOCAL_PARAM) | \
74 (1 << PROGRAM_ENV_PARAM) | \
75 (1 << PROGRAM_STATE_VAR) | \
76 (1 << PROGRAM_NAMED_PARAM) | \
77 (1 << PROGRAM_CONSTANT) | \
78 (1 << PROGRAM_UNIFORM))
79
80 #define MAX_TEMPS 4096
81
82 class st_src_reg;
83 class st_dst_reg;
84
85 static int swizzle_for_size(int size);
86
87 /**
88 * This struct is a corresponding struct to TGSI ureg_src.
89 */
90 class st_src_reg {
91 public:
92 st_src_reg(gl_register_file file, int index, const glsl_type *type)
93 {
94 this->file = file;
95 this->index = index;
96 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
97 this->swizzle = swizzle_for_size(type->vector_elements);
98 else
99 this->swizzle = SWIZZLE_XYZW;
100 this->negate = 0;
101 this->type = type ? type->base_type : GLSL_TYPE_ERROR;
102 this->reladdr = NULL;
103 }
104
105 st_src_reg(gl_register_file file, int index, int type)
106 {
107 this->type = type;
108 this->file = file;
109 this->index = index;
110 this->swizzle = SWIZZLE_XYZW;
111 this->negate = 0;
112 this->reladdr = NULL;
113 }
114
115 st_src_reg()
116 {
117 this->type = GLSL_TYPE_ERROR;
118 this->file = PROGRAM_UNDEFINED;
119 this->index = 0;
120 this->swizzle = 0;
121 this->negate = 0;
122 this->reladdr = NULL;
123 }
124
125 explicit st_src_reg(st_dst_reg reg);
126
127 gl_register_file file; /**< PROGRAM_* from Mesa */
128 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
129 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
130 int negate; /**< NEGATE_XYZW mask from mesa */
131 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
132 /** Register index should be offset by the integer in this reg. */
133 st_src_reg *reladdr;
134 };
135
136 class st_dst_reg {
137 public:
138 st_dst_reg(gl_register_file file, int writemask, int type)
139 {
140 this->file = file;
141 this->index = 0;
142 this->writemask = writemask;
143 this->cond_mask = COND_TR;
144 this->reladdr = NULL;
145 this->type = type;
146 }
147
148 st_dst_reg()
149 {
150 this->type = GLSL_TYPE_ERROR;
151 this->file = PROGRAM_UNDEFINED;
152 this->index = 0;
153 this->writemask = 0;
154 this->cond_mask = COND_TR;
155 this->reladdr = NULL;
156 }
157
158 explicit st_dst_reg(st_src_reg reg);
159
160 gl_register_file file; /**< PROGRAM_* from Mesa */
161 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
162 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
163 GLuint cond_mask:4;
164 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
165 /** Register index should be offset by the integer in this reg. */
166 st_src_reg *reladdr;
167 };
168
169 st_src_reg::st_src_reg(st_dst_reg reg)
170 {
171 this->type = reg.type;
172 this->file = reg.file;
173 this->index = reg.index;
174 this->swizzle = SWIZZLE_XYZW;
175 this->negate = 0;
176 this->reladdr = NULL;
177 }
178
179 st_dst_reg::st_dst_reg(st_src_reg reg)
180 {
181 this->type = reg.type;
182 this->file = reg.file;
183 this->index = reg.index;
184 this->writemask = WRITEMASK_XYZW;
185 this->cond_mask = COND_TR;
186 this->reladdr = reg.reladdr;
187 }
188
189 class glsl_to_tgsi_instruction : public exec_node {
190 public:
191 /* Callers of this ralloc-based new need not call delete. It's
192 * easier to just ralloc_free 'ctx' (or any of its ancestors). */
193 static void* operator new(size_t size, void *ctx)
194 {
195 void *node;
196
197 node = rzalloc_size(ctx, size);
198 assert(node != NULL);
199
200 return node;
201 }
202
203 unsigned op;
204 st_dst_reg dst;
205 st_src_reg src[3];
206 /** Pointer to the ir source this tree came from for debugging */
207 ir_instruction *ir;
208 GLboolean cond_update;
209 bool saturate;
210 int sampler; /**< sampler index */
211 int tex_target; /**< One of TEXTURE_*_INDEX */
212 GLboolean tex_shadow;
213 int dead_mask; /**< Used in dead code elimination */
214
215 class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */
216 };
217
218 class variable_storage : public exec_node {
219 public:
220 variable_storage(ir_variable *var, gl_register_file file, int index)
221 : file(file), index(index), var(var)
222 {
223 /* empty */
224 }
225
226 gl_register_file file;
227 int index;
228 ir_variable *var; /* variable that maps to this, if any */
229 };
230
231 class function_entry : public exec_node {
232 public:
233 ir_function_signature *sig;
234
235 /**
236 * identifier of this function signature used by the program.
237 *
238 * At the point that Mesa instructions for function calls are
239 * generated, we don't know the address of the first instruction of
240 * the function body. So we make the BranchTarget that is called a
241 * small integer and rewrite them during set_branchtargets().
242 */
243 int sig_id;
244
245 /**
246 * Pointer to first instruction of the function body.
247 *
248 * Set during function body emits after main() is processed.
249 */
250 glsl_to_tgsi_instruction *bgn_inst;
251
252 /**
253 * Index of the first instruction of the function body in actual
254 * Mesa IR.
255 *
256 * Set after convertion from glsl_to_tgsi_instruction to prog_instruction.
257 */
258 int inst;
259
260 /** Storage for the return value. */
261 st_src_reg return_reg;
262 };
263
264 class glsl_to_tgsi_visitor : public ir_visitor {
265 public:
266 glsl_to_tgsi_visitor();
267 ~glsl_to_tgsi_visitor();
268
269 function_entry *current_function;
270
271 struct gl_context *ctx;
272 struct gl_program *prog;
273 struct gl_shader_program *shader_program;
274 struct gl_shader_compiler_options *options;
275
276 int next_temp;
277
278 int num_address_regs;
279 int samplers_used;
280 bool indirect_addr_temps;
281 bool indirect_addr_consts;
282
283 int glsl_version;
284
285 variable_storage *find_variable_storage(ir_variable *var);
286
287 function_entry *get_function_signature(ir_function_signature *sig);
288
289 st_src_reg get_temp(const glsl_type *type);
290 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr);
291
292 st_src_reg st_src_reg_for_float(float val);
293 st_src_reg st_src_reg_for_int(int val);
294 st_src_reg st_src_reg_for_type(int type, int val);
295
296 /**
297 * \name Visit methods
298 *
299 * As typical for the visitor pattern, there must be one \c visit method for
300 * each concrete subclass of \c ir_instruction. Virtual base classes within
301 * the hierarchy should not have \c visit methods.
302 */
303 /*@{*/
304 virtual void visit(ir_variable *);
305 virtual void visit(ir_loop *);
306 virtual void visit(ir_loop_jump *);
307 virtual void visit(ir_function_signature *);
308 virtual void visit(ir_function *);
309 virtual void visit(ir_expression *);
310 virtual void visit(ir_swizzle *);
311 virtual void visit(ir_dereference_variable *);
312 virtual void visit(ir_dereference_array *);
313 virtual void visit(ir_dereference_record *);
314 virtual void visit(ir_assignment *);
315 virtual void visit(ir_constant *);
316 virtual void visit(ir_call *);
317 virtual void visit(ir_return *);
318 virtual void visit(ir_discard *);
319 virtual void visit(ir_texture *);
320 virtual void visit(ir_if *);
321 /*@}*/
322
323 st_src_reg result;
324
325 /** List of variable_storage */
326 exec_list variables;
327
328 /** List of function_entry */
329 exec_list function_signatures;
330 int next_signature_id;
331
332 /** List of glsl_to_tgsi_instruction */
333 exec_list instructions;
334
335 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op);
336
337 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
338 st_dst_reg dst, st_src_reg src0);
339
340 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
341 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
342
343 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
344 st_dst_reg dst,
345 st_src_reg src0, st_src_reg src1, st_src_reg src2);
346
347 unsigned get_opcode(ir_instruction *ir, unsigned op,
348 st_dst_reg dst,
349 st_src_reg src0, st_src_reg src1);
350
351 /**
352 * Emit the correct dot-product instruction for the type of arguments
353 */
354 void emit_dp(ir_instruction *ir,
355 st_dst_reg dst,
356 st_src_reg src0,
357 st_src_reg src1,
358 unsigned elements);
359
360 void emit_scalar(ir_instruction *ir, unsigned op,
361 st_dst_reg dst, st_src_reg src0);
362
363 void emit_scalar(ir_instruction *ir, unsigned op,
364 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
365
366 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0);
367
368 void emit_scs(ir_instruction *ir, unsigned op,
369 st_dst_reg dst, const st_src_reg &src);
370
371 GLboolean try_emit_mad(ir_expression *ir,
372 int mul_operand);
373 GLboolean try_emit_sat(ir_expression *ir);
374
375 void emit_swz(ir_expression *ir);
376
377 bool process_move_condition(ir_rvalue *ir);
378
379 void remove_output_reads(gl_register_file type);
380 void simplify_cmp(void);
381
382 void rename_temp_register(int index, int new_index);
383 int get_first_temp_read(int index);
384 int get_first_temp_write(int index);
385 int get_last_temp_read(int index);
386 int get_last_temp_write(int index);
387
388 void copy_propagate(void);
389 void eliminate_dead_code(void);
390 int eliminate_dead_code_advanced(void);
391 void merge_registers(void);
392 void renumber_registers(void);
393
394 void *mem_ctx;
395 };
396
397 static st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR);
398
399 static st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR);
400
401 static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X, GLSL_TYPE_FLOAT);
402
403 static void
404 fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3);
405
406 static void
407 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
408 {
409 va_list args;
410 va_start(args, fmt);
411 ralloc_vasprintf_append(&prog->InfoLog, fmt, args);
412 va_end(args);
413
414 prog->LinkStatus = GL_FALSE;
415 }
416
417 static int
418 swizzle_for_size(int size)
419 {
420 int size_swizzles[4] = {
421 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
422 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
423 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
424 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
425 };
426
427 assert((size >= 1) && (size <= 4));
428 return size_swizzles[size - 1];
429 }
430
431 static bool
432 is_tex_instruction(unsigned opcode)
433 {
434 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
435 return info->is_tex;
436 }
437
438 static unsigned
439 num_inst_dst_regs(unsigned opcode)
440 {
441 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
442 return info->num_dst;
443 }
444
445 static unsigned
446 num_inst_src_regs(unsigned opcode)
447 {
448 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
449 return info->is_tex ? info->num_src - 1 : info->num_src;
450 }
451
452 glsl_to_tgsi_instruction *
453 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
454 st_dst_reg dst,
455 st_src_reg src0, st_src_reg src1, st_src_reg src2)
456 {
457 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction();
458 int num_reladdr = 0, i;
459
460 op = get_opcode(ir, op, dst, src0, src1);
461
462 /* If we have to do relative addressing, we want to load the ARL
463 * reg directly for one of the regs, and preload the other reladdr
464 * sources into temps.
465 */
466 num_reladdr += dst.reladdr != NULL;
467 num_reladdr += src0.reladdr != NULL;
468 num_reladdr += src1.reladdr != NULL;
469 num_reladdr += src2.reladdr != NULL;
470
471 reladdr_to_temp(ir, &src2, &num_reladdr);
472 reladdr_to_temp(ir, &src1, &num_reladdr);
473 reladdr_to_temp(ir, &src0, &num_reladdr);
474
475 if (dst.reladdr) {
476 emit_arl(ir, address_reg, *dst.reladdr);
477 num_reladdr--;
478 }
479 assert(num_reladdr == 0);
480
481 inst->op = op;
482 inst->dst = dst;
483 inst->src[0] = src0;
484 inst->src[1] = src1;
485 inst->src[2] = src2;
486 inst->ir = ir;
487 inst->dead_mask = 0;
488
489 inst->function = NULL;
490
491 if (op == TGSI_OPCODE_ARL)
492 this->num_address_regs = 1;
493
494 /* Update indirect addressing status used by TGSI */
495 if (dst.reladdr) {
496 switch(dst.file) {
497 case PROGRAM_TEMPORARY:
498 this->indirect_addr_temps = true;
499 break;
500 case PROGRAM_LOCAL_PARAM:
501 case PROGRAM_ENV_PARAM:
502 case PROGRAM_STATE_VAR:
503 case PROGRAM_NAMED_PARAM:
504 case PROGRAM_CONSTANT:
505 case PROGRAM_UNIFORM:
506 this->indirect_addr_consts = true;
507 break;
508 default:
509 break;
510 }
511 }
512 else {
513 for (i=0; i<3; i++) {
514 if(inst->src[i].reladdr) {
515 switch(inst->src[i].file) {
516 case PROGRAM_TEMPORARY:
517 this->indirect_addr_temps = true;
518 break;
519 case PROGRAM_LOCAL_PARAM:
520 case PROGRAM_ENV_PARAM:
521 case PROGRAM_STATE_VAR:
522 case PROGRAM_NAMED_PARAM:
523 case PROGRAM_CONSTANT:
524 case PROGRAM_UNIFORM:
525 this->indirect_addr_consts = true;
526 break;
527 default:
528 break;
529 }
530 }
531 }
532 }
533
534 this->instructions.push_tail(inst);
535
536 return inst;
537 }
538
539
540 glsl_to_tgsi_instruction *
541 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
542 st_dst_reg dst, st_src_reg src0, st_src_reg src1)
543 {
544 return emit(ir, op, dst, src0, src1, undef_src);
545 }
546
547 glsl_to_tgsi_instruction *
548 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
549 st_dst_reg dst, st_src_reg src0)
550 {
551 assert(dst.writemask != 0);
552 return emit(ir, op, dst, src0, undef_src, undef_src);
553 }
554
555 glsl_to_tgsi_instruction *
556 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op)
557 {
558 return emit(ir, op, undef_dst, undef_src, undef_src, undef_src);
559 }
560
561 /**
562 * Determines whether to use an integer, unsigned integer, or float opcode
563 * based on the operands and input opcode, then emits the result.
564 *
565 * TODO: type checking for remaining TGSI opcodes
566 */
567 unsigned
568 glsl_to_tgsi_visitor::get_opcode(ir_instruction *ir, unsigned op,
569 st_dst_reg dst,
570 st_src_reg src0, st_src_reg src1)
571 {
572 int type = GLSL_TYPE_FLOAT;
573
574 if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
575 type = GLSL_TYPE_FLOAT;
576 else if (glsl_version >= 130)
577 type = src0.type;
578
579 #define case4(c, f, i, u) \
580 case TGSI_OPCODE_##c: \
581 if (type == GLSL_TYPE_INT) op = TGSI_OPCODE_##i; \
582 else if (type == GLSL_TYPE_UINT) op = TGSI_OPCODE_##u; \
583 else op = TGSI_OPCODE_##f; \
584 break;
585 #define case3(f, i, u) case4(f, f, i, u)
586 #define case2fi(f, i) case4(f, f, i, i)
587 #define case2iu(i, u) case4(i, LAST, i, u)
588
589 switch(op) {
590 case2fi(ADD, UADD);
591 case2fi(MUL, UMUL);
592 case2fi(MAD, UMAD);
593 case3(DIV, IDIV, UDIV);
594 case3(MAX, IMAX, UMAX);
595 case3(MIN, IMIN, UMIN);
596 case2iu(MOD, UMOD);
597
598 case2fi(SEQ, USEQ);
599 case2fi(SNE, USNE);
600 case3(SGE, ISGE, USGE);
601 case3(SLT, ISLT, USLT);
602
603 case2iu(SHL, SHL);
604 case2iu(ISHR, USHR);
605 case2iu(NOT, NOT);
606 case2iu(AND, AND);
607 case2iu(OR, OR);
608 case2iu(XOR, XOR);
609
610 default: break;
611 }
612
613 assert(op != TGSI_OPCODE_LAST);
614 return op;
615 }
616
617 void
618 glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir,
619 st_dst_reg dst, st_src_reg src0, st_src_reg src1,
620 unsigned elements)
621 {
622 static const unsigned dot_opcodes[] = {
623 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4
624 };
625
626 emit(ir, dot_opcodes[elements - 2], dst, src0, src1);
627 }
628
629 /**
630 * Emits TGSI scalar opcodes to produce unique answers across channels.
631 *
632 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
633 * channel determines the result across all channels. So to do a vec4
634 * of this operation, we want to emit a scalar per source channel used
635 * to produce dest channels.
636 */
637 void
638 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
639 st_dst_reg dst,
640 st_src_reg orig_src0, st_src_reg orig_src1)
641 {
642 int i, j;
643 int done_mask = ~dst.writemask;
644
645 /* TGSI RCP is a scalar operation splatting results to all channels,
646 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
647 * dst channels.
648 */
649 for (i = 0; i < 4; i++) {
650 GLuint this_mask = (1 << i);
651 glsl_to_tgsi_instruction *inst;
652 st_src_reg src0 = orig_src0;
653 st_src_reg src1 = orig_src1;
654
655 if (done_mask & this_mask)
656 continue;
657
658 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
659 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
660 for (j = i + 1; j < 4; j++) {
661 /* If there is another enabled component in the destination that is
662 * derived from the same inputs, generate its value on this pass as
663 * well.
664 */
665 if (!(done_mask & (1 << j)) &&
666 GET_SWZ(src0.swizzle, j) == src0_swiz &&
667 GET_SWZ(src1.swizzle, j) == src1_swiz) {
668 this_mask |= (1 << j);
669 }
670 }
671 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
672 src0_swiz, src0_swiz);
673 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
674 src1_swiz, src1_swiz);
675
676 inst = emit(ir, op, dst, src0, src1);
677 inst->dst.writemask = this_mask;
678 done_mask |= this_mask;
679 }
680 }
681
682 void
683 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
684 st_dst_reg dst, st_src_reg src0)
685 {
686 st_src_reg undef = undef_src;
687
688 undef.swizzle = SWIZZLE_XXXX;
689
690 emit_scalar(ir, op, dst, src0, undef);
691 }
692
693 void
694 glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir,
695 st_dst_reg dst, st_src_reg src0)
696 {
697 st_src_reg tmp = get_temp(glsl_type::float_type);
698
699 if (src0.type == GLSL_TYPE_INT)
700 emit(NULL, TGSI_OPCODE_I2F, st_dst_reg(tmp), src0);
701 else if (src0.type == GLSL_TYPE_UINT)
702 emit(NULL, TGSI_OPCODE_U2F, st_dst_reg(tmp), src0);
703 else
704 tmp = src0;
705
706 emit(NULL, TGSI_OPCODE_ARL, dst, tmp);
707 }
708
709 /**
710 * Emit an TGSI_OPCODE_SCS instruction
711 *
712 * The \c SCS opcode functions a bit differently than the other TGSI opcodes.
713 * Instead of splatting its result across all four components of the
714 * destination, it writes one value to the \c x component and another value to
715 * the \c y component.
716 *
717 * \param ir IR instruction being processed
718 * \param op Either \c TGSI_OPCODE_SIN or \c TGSI_OPCODE_COS depending
719 * on which value is desired.
720 * \param dst Destination register
721 * \param src Source register
722 */
723 void
724 glsl_to_tgsi_visitor::emit_scs(ir_instruction *ir, unsigned op,
725 st_dst_reg dst,
726 const st_src_reg &src)
727 {
728 /* Vertex programs cannot use the SCS opcode.
729 */
730 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) {
731 emit_scalar(ir, op, dst, src);
732 return;
733 }
734
735 const unsigned component = (op == TGSI_OPCODE_SIN) ? 0 : 1;
736 const unsigned scs_mask = (1U << component);
737 int done_mask = ~dst.writemask;
738 st_src_reg tmp;
739
740 assert(op == TGSI_OPCODE_SIN || op == TGSI_OPCODE_COS);
741
742 /* If there are compnents in the destination that differ from the component
743 * that will be written by the SCS instrution, we'll need a temporary.
744 */
745 if (scs_mask != unsigned(dst.writemask)) {
746 tmp = get_temp(glsl_type::vec4_type);
747 }
748
749 for (unsigned i = 0; i < 4; i++) {
750 unsigned this_mask = (1U << i);
751 st_src_reg src0 = src;
752
753 if ((done_mask & this_mask) != 0)
754 continue;
755
756 /* The source swizzle specified which component of the source generates
757 * sine / cosine for the current component in the destination. The SCS
758 * instruction requires that this value be swizzle to the X component.
759 * Replace the current swizzle with a swizzle that puts the source in
760 * the X component.
761 */
762 unsigned src0_swiz = GET_SWZ(src.swizzle, i);
763
764 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
765 src0_swiz, src0_swiz);
766 for (unsigned j = i + 1; j < 4; j++) {
767 /* If there is another enabled component in the destination that is
768 * derived from the same inputs, generate its value on this pass as
769 * well.
770 */
771 if (!(done_mask & (1 << j)) &&
772 GET_SWZ(src0.swizzle, j) == src0_swiz) {
773 this_mask |= (1 << j);
774 }
775 }
776
777 if (this_mask != scs_mask) {
778 glsl_to_tgsi_instruction *inst;
779 st_dst_reg tmp_dst = st_dst_reg(tmp);
780
781 /* Emit the SCS instruction.
782 */
783 inst = emit(ir, TGSI_OPCODE_SCS, tmp_dst, src0);
784 inst->dst.writemask = scs_mask;
785
786 /* Move the result of the SCS instruction to the desired location in
787 * the destination.
788 */
789 tmp.swizzle = MAKE_SWIZZLE4(component, component,
790 component, component);
791 inst = emit(ir, TGSI_OPCODE_SCS, dst, tmp);
792 inst->dst.writemask = this_mask;
793 } else {
794 /* Emit the SCS instruction to write directly to the destination.
795 */
796 glsl_to_tgsi_instruction *inst = emit(ir, TGSI_OPCODE_SCS, dst, src0);
797 inst->dst.writemask = scs_mask;
798 }
799
800 done_mask |= this_mask;
801 }
802 }
803
804 struct st_src_reg
805 glsl_to_tgsi_visitor::st_src_reg_for_float(float val)
806 {
807 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_FLOAT);
808 union gl_constant_value uval;
809
810 uval.f = val;
811 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
812 &uval, 1, GL_FLOAT, &src.swizzle);
813
814 return src;
815 }
816
817 struct st_src_reg
818 glsl_to_tgsi_visitor::st_src_reg_for_int(int val)
819 {
820 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_INT);
821 union gl_constant_value uval;
822
823 assert(glsl_version >= 130);
824
825 uval.i = val;
826 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
827 &uval, 1, GL_INT, &src.swizzle);
828
829 return src;
830 }
831
832 struct st_src_reg
833 glsl_to_tgsi_visitor::st_src_reg_for_type(int type, int val)
834 {
835 if (glsl_version >= 130)
836 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) :
837 st_src_reg_for_int(val);
838 else
839 return st_src_reg_for_float(val);
840 }
841
842 static int
843 type_size(const struct glsl_type *type)
844 {
845 unsigned int i;
846 int size;
847
848 switch (type->base_type) {
849 case GLSL_TYPE_UINT:
850 case GLSL_TYPE_INT:
851 case GLSL_TYPE_FLOAT:
852 case GLSL_TYPE_BOOL:
853 if (type->is_matrix()) {
854 return type->matrix_columns;
855 } else {
856 /* Regardless of size of vector, it gets a vec4. This is bad
857 * packing for things like floats, but otherwise arrays become a
858 * mess. Hopefully a later pass over the code can pack scalars
859 * down if appropriate.
860 */
861 return 1;
862 }
863 case GLSL_TYPE_ARRAY:
864 assert(type->length > 0);
865 return type_size(type->fields.array) * type->length;
866 case GLSL_TYPE_STRUCT:
867 size = 0;
868 for (i = 0; i < type->length; i++) {
869 size += type_size(type->fields.structure[i].type);
870 }
871 return size;
872 case GLSL_TYPE_SAMPLER:
873 /* Samplers take up one slot in UNIFORMS[], but they're baked in
874 * at link time.
875 */
876 return 1;
877 default:
878 assert(0);
879 return 0;
880 }
881 }
882
883 /**
884 * In the initial pass of codegen, we assign temporary numbers to
885 * intermediate results. (not SSA -- variable assignments will reuse
886 * storage).
887 */
888 st_src_reg
889 glsl_to_tgsi_visitor::get_temp(const glsl_type *type)
890 {
891 st_src_reg src;
892 int swizzle[4];
893 int i;
894
895 src.type = glsl_version >= 130 ? type->base_type : GLSL_TYPE_FLOAT;
896 src.file = PROGRAM_TEMPORARY;
897 src.index = next_temp;
898 src.reladdr = NULL;
899 next_temp += type_size(type);
900
901 if (type->is_array() || type->is_record()) {
902 src.swizzle = SWIZZLE_NOOP;
903 } else {
904 for (i = 0; i < type->vector_elements; i++)
905 swizzle[i] = i;
906 for (; i < 4; i++)
907 swizzle[i] = type->vector_elements - 1;
908 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
909 swizzle[2], swizzle[3]);
910 }
911 src.negate = 0;
912
913 return src;
914 }
915
916 variable_storage *
917 glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var)
918 {
919
920 variable_storage *entry;
921
922 foreach_iter(exec_list_iterator, iter, this->variables) {
923 entry = (variable_storage *)iter.get();
924
925 if (entry->var == var)
926 return entry;
927 }
928
929 return NULL;
930 }
931
932 void
933 glsl_to_tgsi_visitor::visit(ir_variable *ir)
934 {
935 if (strcmp(ir->name, "gl_FragCoord") == 0) {
936 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
937
938 fp->OriginUpperLeft = ir->origin_upper_left;
939 fp->PixelCenterInteger = ir->pixel_center_integer;
940
941 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
942 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
943 switch (ir->depth_layout) {
944 case ir_depth_layout_none:
945 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
946 break;
947 case ir_depth_layout_any:
948 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
949 break;
950 case ir_depth_layout_greater:
951 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
952 break;
953 case ir_depth_layout_less:
954 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
955 break;
956 case ir_depth_layout_unchanged:
957 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
958 break;
959 default:
960 assert(0);
961 break;
962 }
963 }
964
965 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
966 unsigned int i;
967 const ir_state_slot *const slots = ir->state_slots;
968 assert(ir->state_slots != NULL);
969
970 /* Check if this statevar's setup in the STATE file exactly
971 * matches how we'll want to reference it as a
972 * struct/array/whatever. If not, then we need to move it into
973 * temporary storage and hope that it'll get copy-propagated
974 * out.
975 */
976 for (i = 0; i < ir->num_state_slots; i++) {
977 if (slots[i].swizzle != SWIZZLE_XYZW) {
978 break;
979 }
980 }
981
982 struct variable_storage *storage;
983 st_dst_reg dst;
984 if (i == ir->num_state_slots) {
985 /* We'll set the index later. */
986 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
987 this->variables.push_tail(storage);
988
989 dst = undef_dst;
990 } else {
991 /* The variable_storage constructor allocates slots based on the size
992 * of the type. However, this had better match the number of state
993 * elements that we're going to copy into the new temporary.
994 */
995 assert((int) ir->num_state_slots == type_size(ir->type));
996
997 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY,
998 this->next_temp);
999 this->variables.push_tail(storage);
1000 this->next_temp += type_size(ir->type);
1001
1002 dst = st_dst_reg(st_src_reg(PROGRAM_TEMPORARY, storage->index,
1003 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT));
1004 }
1005
1006
1007 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
1008 int index = _mesa_add_state_reference(this->prog->Parameters,
1009 (gl_state_index *)slots[i].tokens);
1010
1011 if (storage->file == PROGRAM_STATE_VAR) {
1012 if (storage->index == -1) {
1013 storage->index = index;
1014 } else {
1015 assert(index == storage->index + (int)i);
1016 }
1017 } else {
1018 st_src_reg src(PROGRAM_STATE_VAR, index,
1019 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT);
1020 src.swizzle = slots[i].swizzle;
1021 emit(ir, TGSI_OPCODE_MOV, dst, src);
1022 /* even a float takes up a whole vec4 reg in a struct/array. */
1023 dst.index++;
1024 }
1025 }
1026
1027 if (storage->file == PROGRAM_TEMPORARY &&
1028 dst.index != storage->index + (int) ir->num_state_slots) {
1029 fail_link(this->shader_program,
1030 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1031 ir->name, dst.index - storage->index,
1032 type_size(ir->type));
1033 }
1034 }
1035 }
1036
1037 void
1038 glsl_to_tgsi_visitor::visit(ir_loop *ir)
1039 {
1040 ir_dereference_variable *counter = NULL;
1041
1042 if (ir->counter != NULL)
1043 counter = new(ir) ir_dereference_variable(ir->counter);
1044
1045 if (ir->from != NULL) {
1046 assert(ir->counter != NULL);
1047
1048 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL);
1049
1050 a->accept(this);
1051 delete a;
1052 }
1053
1054 emit(NULL, TGSI_OPCODE_BGNLOOP);
1055
1056 if (ir->to) {
1057 ir_expression *e =
1058 new(ir) ir_expression(ir->cmp, glsl_type::bool_type,
1059 counter, ir->to);
1060 ir_if *if_stmt = new(ir) ir_if(e);
1061
1062 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
1063
1064 if_stmt->then_instructions.push_tail(brk);
1065
1066 if_stmt->accept(this);
1067
1068 delete if_stmt;
1069 delete e;
1070 delete brk;
1071 }
1072
1073 visit_exec_list(&ir->body_instructions, this);
1074
1075 if (ir->increment) {
1076 ir_expression *e =
1077 new(ir) ir_expression(ir_binop_add, counter->type,
1078 counter, ir->increment);
1079
1080 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL);
1081
1082 a->accept(this);
1083 delete a;
1084 delete e;
1085 }
1086
1087 emit(NULL, TGSI_OPCODE_ENDLOOP);
1088 }
1089
1090 void
1091 glsl_to_tgsi_visitor::visit(ir_loop_jump *ir)
1092 {
1093 switch (ir->mode) {
1094 case ir_loop_jump::jump_break:
1095 emit(NULL, TGSI_OPCODE_BRK);
1096 break;
1097 case ir_loop_jump::jump_continue:
1098 emit(NULL, TGSI_OPCODE_CONT);
1099 break;
1100 }
1101 }
1102
1103
1104 void
1105 glsl_to_tgsi_visitor::visit(ir_function_signature *ir)
1106 {
1107 assert(0);
1108 (void)ir;
1109 }
1110
1111 void
1112 glsl_to_tgsi_visitor::visit(ir_function *ir)
1113 {
1114 /* Ignore function bodies other than main() -- we shouldn't see calls to
1115 * them since they should all be inlined before we get to glsl_to_tgsi.
1116 */
1117 if (strcmp(ir->name, "main") == 0) {
1118 const ir_function_signature *sig;
1119 exec_list empty;
1120
1121 sig = ir->matching_signature(&empty);
1122
1123 assert(sig);
1124
1125 foreach_iter(exec_list_iterator, iter, sig->body) {
1126 ir_instruction *ir = (ir_instruction *)iter.get();
1127
1128 ir->accept(this);
1129 }
1130 }
1131 }
1132
1133 GLboolean
1134 glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
1135 {
1136 int nonmul_operand = 1 - mul_operand;
1137 st_src_reg a, b, c;
1138 st_dst_reg result_dst;
1139
1140 ir_expression *expr = ir->operands[mul_operand]->as_expression();
1141 if (!expr || expr->operation != ir_binop_mul)
1142 return false;
1143
1144 expr->operands[0]->accept(this);
1145 a = this->result;
1146 expr->operands[1]->accept(this);
1147 b = this->result;
1148 ir->operands[nonmul_operand]->accept(this);
1149 c = this->result;
1150
1151 this->result = get_temp(ir->type);
1152 result_dst = st_dst_reg(this->result);
1153 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1154 emit(ir, TGSI_OPCODE_MAD, result_dst, a, b, c);
1155
1156 return true;
1157 }
1158
1159 GLboolean
1160 glsl_to_tgsi_visitor::try_emit_sat(ir_expression *ir)
1161 {
1162 /* Saturates were only introduced to vertex programs in
1163 * NV_vertex_program3, so don't give them to drivers in the VP.
1164 */
1165 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB)
1166 return false;
1167
1168 ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1169 if (!sat_src)
1170 return false;
1171
1172 sat_src->accept(this);
1173 st_src_reg src = this->result;
1174
1175 this->result = get_temp(ir->type);
1176 st_dst_reg result_dst = st_dst_reg(this->result);
1177 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1178 glsl_to_tgsi_instruction *inst;
1179 inst = emit(ir, TGSI_OPCODE_MOV, result_dst, src);
1180 inst->saturate = true;
1181
1182 return true;
1183 }
1184
1185 void
1186 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir,
1187 st_src_reg *reg, int *num_reladdr)
1188 {
1189 if (!reg->reladdr)
1190 return;
1191
1192 emit_arl(ir, address_reg, *reg->reladdr);
1193
1194 if (*num_reladdr != 1) {
1195 st_src_reg temp = get_temp(glsl_type::vec4_type);
1196
1197 emit(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg);
1198 *reg = temp;
1199 }
1200
1201 (*num_reladdr)--;
1202 }
1203
1204 void
1205 glsl_to_tgsi_visitor::visit(ir_expression *ir)
1206 {
1207 unsigned int operand;
1208 st_src_reg op[Elements(ir->operands)];
1209 st_src_reg result_src;
1210 st_dst_reg result_dst;
1211
1212 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1213 */
1214 if (ir->operation == ir_binop_add) {
1215 if (try_emit_mad(ir, 1))
1216 return;
1217 if (try_emit_mad(ir, 0))
1218 return;
1219 }
1220 if (try_emit_sat(ir))
1221 return;
1222
1223 if (ir->operation == ir_quadop_vector)
1224 assert(!"ir_quadop_vector should have been lowered");
1225
1226 for (operand = 0; operand < ir->get_num_operands(); operand++) {
1227 this->result.file = PROGRAM_UNDEFINED;
1228 ir->operands[operand]->accept(this);
1229 if (this->result.file == PROGRAM_UNDEFINED) {
1230 ir_print_visitor v;
1231 printf("Failed to get tree for expression operand:\n");
1232 ir->operands[operand]->accept(&v);
1233 exit(1);
1234 }
1235 op[operand] = this->result;
1236
1237 /* Matrix expression operands should have been broken down to vector
1238 * operations already.
1239 */
1240 assert(!ir->operands[operand]->type->is_matrix());
1241 }
1242
1243 int vector_elements = ir->operands[0]->type->vector_elements;
1244 if (ir->operands[1]) {
1245 vector_elements = MAX2(vector_elements,
1246 ir->operands[1]->type->vector_elements);
1247 }
1248
1249 this->result.file = PROGRAM_UNDEFINED;
1250
1251 /* Storage for our result. Ideally for an assignment we'd be using
1252 * the actual storage for the result here, instead.
1253 */
1254 result_src = get_temp(ir->type);
1255 /* convenience for the emit functions below. */
1256 result_dst = st_dst_reg(result_src);
1257 /* Limit writes to the channels that will be used by result_src later.
1258 * This does limit this temp's use as a temporary for multi-instruction
1259 * sequences.
1260 */
1261 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1262
1263 switch (ir->operation) {
1264 case ir_unop_logic_not:
1265 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], st_src_reg_for_type(result_dst.type, 0));
1266 break;
1267 case ir_unop_neg:
1268 assert(result_dst.type == GLSL_TYPE_FLOAT || result_dst.type == GLSL_TYPE_INT);
1269 if (result_dst.type == GLSL_TYPE_INT)
1270 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]);
1271 else {
1272 op[0].negate = ~op[0].negate;
1273 result_src = op[0];
1274 }
1275 break;
1276 case ir_unop_abs:
1277 assert(result_dst.type == GLSL_TYPE_FLOAT);
1278 emit(ir, TGSI_OPCODE_ABS, result_dst, op[0]);
1279 break;
1280 case ir_unop_sign:
1281 emit(ir, TGSI_OPCODE_SSG, result_dst, op[0]);
1282 break;
1283 case ir_unop_rcp:
1284 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]);
1285 break;
1286
1287 case ir_unop_exp2:
1288 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]);
1289 break;
1290 case ir_unop_exp:
1291 case ir_unop_log:
1292 assert(!"not reached: should be handled by ir_explog_to_explog2");
1293 break;
1294 case ir_unop_log2:
1295 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]);
1296 break;
1297 case ir_unop_sin:
1298 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1299 break;
1300 case ir_unop_cos:
1301 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1302 break;
1303 case ir_unop_sin_reduced:
1304 emit_scs(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1305 break;
1306 case ir_unop_cos_reduced:
1307 emit_scs(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1308 break;
1309
1310 case ir_unop_dFdx:
1311 emit(ir, TGSI_OPCODE_DDX, result_dst, op[0]);
1312 break;
1313 case ir_unop_dFdy:
1314 op[0].negate = ~op[0].negate;
1315 emit(ir, TGSI_OPCODE_DDY, result_dst, op[0]);
1316 break;
1317
1318 case ir_unop_noise: {
1319 /* At some point, a motivated person could add a better
1320 * implementation of noise. Currently not even the nvidia
1321 * binary drivers do anything more than this. In any case, the
1322 * place to do this is in the GL state tracker, not the poor
1323 * driver.
1324 */
1325 emit(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5));
1326 break;
1327 }
1328
1329 case ir_binop_add:
1330 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1331 break;
1332 case ir_binop_sub:
1333 emit(ir, TGSI_OPCODE_SUB, result_dst, op[0], op[1]);
1334 break;
1335
1336 case ir_binop_mul:
1337 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1338 break;
1339 case ir_binop_div:
1340 if (result_dst.type == GLSL_TYPE_FLOAT)
1341 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1342 else
1343 emit(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]);
1344 break;
1345 case ir_binop_mod:
1346 if (result_dst.type == GLSL_TYPE_FLOAT)
1347 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1348 else
1349 emit(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]);
1350 break;
1351
1352 case ir_binop_less:
1353 emit(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]);
1354 break;
1355 case ir_binop_greater:
1356 emit(ir, TGSI_OPCODE_SGT, result_dst, op[0], op[1]);
1357 break;
1358 case ir_binop_lequal:
1359 emit(ir, TGSI_OPCODE_SLE, result_dst, op[0], op[1]);
1360 break;
1361 case ir_binop_gequal:
1362 emit(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]);
1363 break;
1364 case ir_binop_equal:
1365 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1366 break;
1367 case ir_binop_nequal:
1368 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1369 break;
1370 case ir_binop_all_equal:
1371 /* "==" operator producing a scalar boolean. */
1372 if (ir->operands[0]->type->is_vector() ||
1373 ir->operands[1]->type->is_vector()) {
1374 st_src_reg temp = get_temp(glsl_version >= 130 ?
1375 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1376 glsl_type::vec4_type);
1377 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1378 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1379 emit_dp(ir, result_dst, temp, temp, vector_elements);
1380 emit(ir, TGSI_OPCODE_SEQ, result_dst, result_src, st_src_reg_for_float(0.0));
1381 } else {
1382 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1383 }
1384 break;
1385 case ir_binop_any_nequal:
1386 /* "!=" operator producing a scalar boolean. */
1387 if (ir->operands[0]->type->is_vector() ||
1388 ir->operands[1]->type->is_vector()) {
1389 st_src_reg temp = get_temp(glsl_version >= 130 ?
1390 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1391 glsl_type::vec4_type);
1392 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1393 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1394 emit_dp(ir, result_dst, temp, temp, vector_elements);
1395 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1396 } else {
1397 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1398 }
1399 break;
1400
1401 case ir_unop_any:
1402 assert(ir->operands[0]->type->is_vector());
1403 emit_dp(ir, result_dst, op[0], op[0],
1404 ir->operands[0]->type->vector_elements);
1405 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1406 break;
1407
1408 case ir_binop_logic_xor:
1409 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1410 break;
1411
1412 case ir_binop_logic_or:
1413 /* This could be a saturated add and skip the SNE. */
1414 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1415 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1416 break;
1417
1418 case ir_binop_logic_and:
1419 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
1420 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1421 break;
1422
1423 case ir_binop_dot:
1424 assert(ir->operands[0]->type->is_vector());
1425 assert(ir->operands[0]->type == ir->operands[1]->type);
1426 emit_dp(ir, result_dst, op[0], op[1],
1427 ir->operands[0]->type->vector_elements);
1428 break;
1429
1430 case ir_unop_sqrt:
1431 /* sqrt(x) = x * rsq(x). */
1432 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1433 emit(ir, TGSI_OPCODE_MUL, result_dst, result_src, op[0]);
1434 /* For incoming channels <= 0, set the result to 0. */
1435 op[0].negate = ~op[0].negate;
1436 emit(ir, TGSI_OPCODE_CMP, result_dst,
1437 op[0], result_src, st_src_reg_for_float(0.0));
1438 break;
1439 case ir_unop_rsq:
1440 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1441 break;
1442 case ir_unop_i2f:
1443 case ir_unop_b2f:
1444 if (glsl_version >= 130) {
1445 emit(ir, TGSI_OPCODE_I2F, result_dst, op[0]);
1446 break;
1447 }
1448 case ir_unop_b2i:
1449 /* Booleans are stored as integers (or floats in GLSL 1.20 and lower). */
1450 result_src = op[0];
1451 break;
1452 case ir_unop_f2i:
1453 if (glsl_version >= 130)
1454 emit(ir, TGSI_OPCODE_F2I, result_dst, op[0]);
1455 else
1456 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1457 break;
1458 case ir_unop_f2b:
1459 case ir_unop_i2b:
1460 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1461 st_src_reg_for_type(result_dst.type, 0));
1462 break;
1463 case ir_unop_trunc:
1464 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1465 break;
1466 case ir_unop_ceil:
1467 op[0].negate = ~op[0].negate;
1468 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1469 result_src.negate = ~result_src.negate;
1470 break;
1471 case ir_unop_floor:
1472 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1473 break;
1474 case ir_unop_fract:
1475 emit(ir, TGSI_OPCODE_FRC, result_dst, op[0]);
1476 break;
1477
1478 case ir_binop_min:
1479 emit(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]);
1480 break;
1481 case ir_binop_max:
1482 emit(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]);
1483 break;
1484 case ir_binop_pow:
1485 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]);
1486 break;
1487
1488 case ir_unop_bit_not:
1489 if (glsl_version >= 130) {
1490 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1491 break;
1492 }
1493 case ir_unop_u2f:
1494 if (glsl_version >= 130) {
1495 emit(ir, TGSI_OPCODE_U2F, result_dst, op[0]);
1496 break;
1497 }
1498 case ir_binop_lshift:
1499 if (glsl_version >= 130) {
1500 emit(ir, TGSI_OPCODE_SHL, result_dst, op[0]);
1501 break;
1502 }
1503 case ir_binop_rshift:
1504 if (glsl_version >= 130) {
1505 emit(ir, TGSI_OPCODE_ISHR, result_dst, op[0]);
1506 break;
1507 }
1508 case ir_binop_bit_and:
1509 if (glsl_version >= 130) {
1510 emit(ir, TGSI_OPCODE_AND, result_dst, op[0]);
1511 break;
1512 }
1513 case ir_binop_bit_xor:
1514 if (glsl_version >= 130) {
1515 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0]);
1516 break;
1517 }
1518 case ir_binop_bit_or:
1519 if (glsl_version >= 130) {
1520 emit(ir, TGSI_OPCODE_OR, result_dst, op[0]);
1521 break;
1522 }
1523 case ir_unop_round_even:
1524 assert(!"GLSL 1.30 features unsupported");
1525 break;
1526
1527 case ir_quadop_vector:
1528 /* This operation should have already been handled.
1529 */
1530 assert(!"Should not get here.");
1531 break;
1532 }
1533
1534 this->result = result_src;
1535 }
1536
1537
1538 void
1539 glsl_to_tgsi_visitor::visit(ir_swizzle *ir)
1540 {
1541 st_src_reg src;
1542 int i;
1543 int swizzle[4];
1544
1545 /* Note that this is only swizzles in expressions, not those on the left
1546 * hand side of an assignment, which do write masking. See ir_assignment
1547 * for that.
1548 */
1549
1550 ir->val->accept(this);
1551 src = this->result;
1552 assert(src.file != PROGRAM_UNDEFINED);
1553
1554 for (i = 0; i < 4; i++) {
1555 if (i < ir->type->vector_elements) {
1556 switch (i) {
1557 case 0:
1558 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
1559 break;
1560 case 1:
1561 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
1562 break;
1563 case 2:
1564 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
1565 break;
1566 case 3:
1567 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
1568 break;
1569 }
1570 } else {
1571 /* If the type is smaller than a vec4, replicate the last
1572 * channel out.
1573 */
1574 swizzle[i] = swizzle[ir->type->vector_elements - 1];
1575 }
1576 }
1577
1578 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1579
1580 this->result = src;
1581 }
1582
1583 void
1584 glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir)
1585 {
1586 variable_storage *entry = find_variable_storage(ir->var);
1587 ir_variable *var = ir->var;
1588
1589 if (!entry) {
1590 switch (var->mode) {
1591 case ir_var_uniform:
1592 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
1593 var->location);
1594 this->variables.push_tail(entry);
1595 break;
1596 case ir_var_in:
1597 case ir_var_inout:
1598 /* The linker assigns locations for varyings and attributes,
1599 * including deprecated builtins (like gl_Color), user-assign
1600 * generic attributes (glBindVertexLocation), and
1601 * user-defined varyings.
1602 *
1603 * FINISHME: We would hit this path for function arguments. Fix!
1604 */
1605 assert(var->location != -1);
1606 entry = new(mem_ctx) variable_storage(var,
1607 PROGRAM_INPUT,
1608 var->location);
1609 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1610 var->location >= VERT_ATTRIB_GENERIC0) {
1611 _mesa_add_attribute(this->prog->Attributes,
1612 var->name,
1613 _mesa_sizeof_glsl_type(var->type->gl_type),
1614 var->type->gl_type,
1615 var->location - VERT_ATTRIB_GENERIC0);
1616 }
1617 break;
1618 case ir_var_out:
1619 assert(var->location != -1);
1620 entry = new(mem_ctx) variable_storage(var,
1621 PROGRAM_OUTPUT,
1622 var->location);
1623 break;
1624 case ir_var_system_value:
1625 entry = new(mem_ctx) variable_storage(var,
1626 PROGRAM_SYSTEM_VALUE,
1627 var->location);
1628 break;
1629 case ir_var_auto:
1630 case ir_var_temporary:
1631 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY,
1632 this->next_temp);
1633 this->variables.push_tail(entry);
1634
1635 next_temp += type_size(var->type);
1636 break;
1637 }
1638
1639 if (!entry) {
1640 printf("Failed to make storage for %s\n", var->name);
1641 exit(1);
1642 }
1643 }
1644
1645 this->result = st_src_reg(entry->file, entry->index, var->type);
1646 if (glsl_version <= 120)
1647 this->result.type = GLSL_TYPE_FLOAT;
1648 }
1649
1650 void
1651 glsl_to_tgsi_visitor::visit(ir_dereference_array *ir)
1652 {
1653 ir_constant *index;
1654 st_src_reg src;
1655 int element_size = type_size(ir->type);
1656
1657 index = ir->array_index->constant_expression_value();
1658
1659 ir->array->accept(this);
1660 src = this->result;
1661
1662 if (index) {
1663 src.index += index->value.i[0] * element_size;
1664 } else {
1665 st_src_reg array_base = this->result;
1666 /* Variable index array dereference. It eats the "vec4" of the
1667 * base of the array and an index that offsets the Mesa register
1668 * index.
1669 */
1670 ir->array_index->accept(this);
1671
1672 st_src_reg index_reg;
1673
1674 if (element_size == 1) {
1675 index_reg = this->result;
1676 } else {
1677 index_reg = get_temp(glsl_type::float_type);
1678
1679 emit(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg),
1680 this->result, st_src_reg_for_float(element_size));
1681 }
1682
1683 src.reladdr = ralloc(mem_ctx, st_src_reg);
1684 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1685 }
1686
1687 /* If the type is smaller than a vec4, replicate the last channel out. */
1688 if (ir->type->is_scalar() || ir->type->is_vector())
1689 src.swizzle = swizzle_for_size(ir->type->vector_elements);
1690 else
1691 src.swizzle = SWIZZLE_NOOP;
1692
1693 this->result = src;
1694 }
1695
1696 void
1697 glsl_to_tgsi_visitor::visit(ir_dereference_record *ir)
1698 {
1699 unsigned int i;
1700 const glsl_type *struct_type = ir->record->type;
1701 int offset = 0;
1702
1703 ir->record->accept(this);
1704
1705 for (i = 0; i < struct_type->length; i++) {
1706 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1707 break;
1708 offset += type_size(struct_type->fields.structure[i].type);
1709 }
1710
1711 /* If the type is smaller than a vec4, replicate the last channel out. */
1712 if (ir->type->is_scalar() || ir->type->is_vector())
1713 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1714 else
1715 this->result.swizzle = SWIZZLE_NOOP;
1716
1717 this->result.index += offset;
1718 }
1719
1720 /**
1721 * We want to be careful in assignment setup to hit the actual storage
1722 * instead of potentially using a temporary like we might with the
1723 * ir_dereference handler.
1724 */
1725 static st_dst_reg
1726 get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v)
1727 {
1728 /* The LHS must be a dereference. If the LHS is a variable indexed array
1729 * access of a vector, it must be separated into a series conditional moves
1730 * before reaching this point (see ir_vec_index_to_cond_assign).
1731 */
1732 assert(ir->as_dereference());
1733 ir_dereference_array *deref_array = ir->as_dereference_array();
1734 if (deref_array) {
1735 assert(!deref_array->array->type->is_vector());
1736 }
1737
1738 /* Use the rvalue deref handler for the most part. We'll ignore
1739 * swizzles in it and write swizzles using writemask, though.
1740 */
1741 ir->accept(v);
1742 return st_dst_reg(v->result);
1743 }
1744
1745 /**
1746 * Process the condition of a conditional assignment
1747 *
1748 * Examines the condition of a conditional assignment to generate the optimal
1749 * first operand of a \c CMP instruction. If the condition is a relational
1750 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
1751 * used as the source for the \c CMP instruction. Otherwise the comparison
1752 * is processed to a boolean result, and the boolean result is used as the
1753 * operand to the CMP instruction.
1754 */
1755 bool
1756 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir)
1757 {
1758 ir_rvalue *src_ir = ir;
1759 bool negate = true;
1760 bool switch_order = false;
1761
1762 ir_expression *const expr = ir->as_expression();
1763 if ((expr != NULL) && (expr->get_num_operands() == 2)) {
1764 bool zero_on_left = false;
1765
1766 if (expr->operands[0]->is_zero()) {
1767 src_ir = expr->operands[1];
1768 zero_on_left = true;
1769 } else if (expr->operands[1]->is_zero()) {
1770 src_ir = expr->operands[0];
1771 zero_on_left = false;
1772 }
1773
1774 /* a is - 0 + - 0 +
1775 * (a < 0) T F F ( a < 0) T F F
1776 * (0 < a) F F T (-a < 0) F F T
1777 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands)
1778 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands)
1779 * (a > 0) F F T (-a < 0) F F T
1780 * (0 > a) T F F ( a < 0) T F F
1781 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
1782 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
1783 *
1784 * Note that exchanging the order of 0 and 'a' in the comparison simply
1785 * means that the value of 'a' should be negated.
1786 */
1787 if (src_ir != ir) {
1788 switch (expr->operation) {
1789 case ir_binop_less:
1790 switch_order = false;
1791 negate = zero_on_left;
1792 break;
1793
1794 case ir_binop_greater:
1795 switch_order = false;
1796 negate = !zero_on_left;
1797 break;
1798
1799 case ir_binop_lequal:
1800 switch_order = true;
1801 negate = !zero_on_left;
1802 break;
1803
1804 case ir_binop_gequal:
1805 switch_order = true;
1806 negate = zero_on_left;
1807 break;
1808
1809 default:
1810 /* This isn't the right kind of comparison afterall, so make sure
1811 * the whole condition is visited.
1812 */
1813 src_ir = ir;
1814 break;
1815 }
1816 }
1817 }
1818
1819 src_ir->accept(this);
1820
1821 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
1822 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
1823 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
1824 * computing the condition.
1825 */
1826 if (negate)
1827 this->result.negate = ~this->result.negate;
1828
1829 return switch_order;
1830 }
1831
1832 void
1833 glsl_to_tgsi_visitor::visit(ir_assignment *ir)
1834 {
1835 st_dst_reg l;
1836 st_src_reg r;
1837 int i;
1838
1839 ir->rhs->accept(this);
1840 r = this->result;
1841
1842 l = get_assignment_lhs(ir->lhs, this);
1843
1844 /* FINISHME: This should really set to the correct maximal writemask for each
1845 * FINISHME: component written (in the loops below). This case can only
1846 * FINISHME: occur for matrices, arrays, and structures.
1847 */
1848 if (ir->write_mask == 0) {
1849 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
1850 l.writemask = WRITEMASK_XYZW;
1851 } else if (ir->lhs->type->is_scalar() &&
1852 ir->lhs->variable_referenced()->mode == ir_var_out) {
1853 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the
1854 * FINISHME: W component of fragment shader output zero, work correctly.
1855 */
1856 l.writemask = WRITEMASK_XYZW;
1857 } else {
1858 int swizzles[4];
1859 int first_enabled_chan = 0;
1860 int rhs_chan = 0;
1861
1862 l.writemask = ir->write_mask;
1863
1864 for (int i = 0; i < 4; i++) {
1865 if (l.writemask & (1 << i)) {
1866 first_enabled_chan = GET_SWZ(r.swizzle, i);
1867 break;
1868 }
1869 }
1870
1871 /* Swizzle a small RHS vector into the channels being written.
1872 *
1873 * glsl ir treats write_mask as dictating how many channels are
1874 * present on the RHS while Mesa IR treats write_mask as just
1875 * showing which channels of the vec4 RHS get written.
1876 */
1877 for (int i = 0; i < 4; i++) {
1878 if (l.writemask & (1 << i))
1879 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
1880 else
1881 swizzles[i] = first_enabled_chan;
1882 }
1883 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
1884 swizzles[2], swizzles[3]);
1885 }
1886
1887 assert(l.file != PROGRAM_UNDEFINED);
1888 assert(r.file != PROGRAM_UNDEFINED);
1889
1890 if (ir->condition) {
1891 const bool switch_order = this->process_move_condition(ir->condition);
1892 st_src_reg condition = this->result;
1893
1894 for (i = 0; i < type_size(ir->lhs->type); i++) {
1895 st_src_reg l_src = st_src_reg(l);
1896 l_src.swizzle = swizzle_for_size(ir->lhs->type->vector_elements);
1897
1898 if (switch_order) {
1899 emit(ir, TGSI_OPCODE_CMP, l, condition, l_src, r);
1900 } else {
1901 emit(ir, TGSI_OPCODE_CMP, l, condition, r, l_src);
1902 }
1903
1904 l.index++;
1905 r.index++;
1906 }
1907 } else if (ir->rhs->as_expression() &&
1908 this->instructions.get_tail() &&
1909 ir->rhs == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->ir &&
1910 type_size(ir->lhs->type) == 1) {
1911 /* To avoid emitting an extra MOV when assigning an expression to a
1912 * variable, change the destination register of the last instruction
1913 * emitted as part of the expression to the assignment variable.
1914 */
1915 glsl_to_tgsi_instruction *inst;
1916 inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
1917 inst->dst = l;
1918 } else {
1919 for (i = 0; i < type_size(ir->lhs->type); i++) {
1920 emit(ir, TGSI_OPCODE_MOV, l, r);
1921 l.index++;
1922 r.index++;
1923 }
1924 }
1925 }
1926
1927
1928 void
1929 glsl_to_tgsi_visitor::visit(ir_constant *ir)
1930 {
1931 st_src_reg src;
1932 GLfloat stack_vals[4] = { 0 };
1933 gl_constant_value *values = (gl_constant_value *) stack_vals;
1934 GLenum gl_type = GL_NONE;
1935 unsigned int i;
1936
1937 /* Unfortunately, 4 floats is all we can get into
1938 * _mesa_add_unnamed_constant. So, make a temp to store an
1939 * aggregate constant and move each constant value into it. If we
1940 * get lucky, copy propagation will eliminate the extra moves.
1941 */
1942 if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1943 st_src_reg temp_base = get_temp(ir->type);
1944 st_dst_reg temp = st_dst_reg(temp_base);
1945
1946 foreach_iter(exec_list_iterator, iter, ir->components) {
1947 ir_constant *field_value = (ir_constant *)iter.get();
1948 int size = type_size(field_value->type);
1949
1950 assert(size > 0);
1951
1952 field_value->accept(this);
1953 src = this->result;
1954
1955 for (i = 0; i < (unsigned int)size; i++) {
1956 emit(ir, TGSI_OPCODE_MOV, temp, src);
1957
1958 src.index++;
1959 temp.index++;
1960 }
1961 }
1962 this->result = temp_base;
1963 return;
1964 }
1965
1966 if (ir->type->is_array()) {
1967 st_src_reg temp_base = get_temp(ir->type);
1968 st_dst_reg temp = st_dst_reg(temp_base);
1969 int size = type_size(ir->type->fields.array);
1970
1971 assert(size > 0);
1972
1973 for (i = 0; i < ir->type->length; i++) {
1974 ir->array_elements[i]->accept(this);
1975 src = this->result;
1976 for (int j = 0; j < size; j++) {
1977 emit(ir, TGSI_OPCODE_MOV, temp, src);
1978
1979 src.index++;
1980 temp.index++;
1981 }
1982 }
1983 this->result = temp_base;
1984 return;
1985 }
1986
1987 if (ir->type->is_matrix()) {
1988 st_src_reg mat = get_temp(ir->type);
1989 st_dst_reg mat_column = st_dst_reg(mat);
1990
1991 for (i = 0; i < ir->type->matrix_columns; i++) {
1992 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1993 values = (gl_constant_value *) &ir->value.f[i * ir->type->vector_elements];
1994
1995 src = st_src_reg(PROGRAM_CONSTANT, -1, ir->type->base_type);
1996 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
1997 values,
1998 ir->type->vector_elements,
1999 GL_FLOAT,
2000 &src.swizzle);
2001 emit(ir, TGSI_OPCODE_MOV, mat_column, src);
2002
2003 mat_column.index++;
2004 }
2005
2006 this->result = mat;
2007 return;
2008 }
2009
2010 src.file = PROGRAM_CONSTANT;
2011 switch (ir->type->base_type) {
2012 case GLSL_TYPE_FLOAT:
2013 gl_type = GL_FLOAT;
2014 for (i = 0; i < ir->type->vector_elements; i++) {
2015 values[i].f = ir->value.f[i];
2016 }
2017 break;
2018 case GLSL_TYPE_UINT:
2019 gl_type = glsl_version >= 130 ? GL_UNSIGNED_INT : GL_FLOAT;
2020 for (i = 0; i < ir->type->vector_elements; i++) {
2021 if (glsl_version >= 130)
2022 values[i].u = ir->value.u[i];
2023 else
2024 values[i].f = ir->value.u[i];
2025 }
2026 break;
2027 case GLSL_TYPE_INT:
2028 gl_type = glsl_version >= 130 ? GL_INT : GL_FLOAT;
2029 for (i = 0; i < ir->type->vector_elements; i++) {
2030 if (glsl_version >= 130)
2031 values[i].i = ir->value.i[i];
2032 else
2033 values[i].f = ir->value.i[i];
2034 }
2035 break;
2036 case GLSL_TYPE_BOOL:
2037 gl_type = glsl_version >= 130 ? GL_BOOL : GL_FLOAT;
2038 for (i = 0; i < ir->type->vector_elements; i++) {
2039 if (glsl_version >= 130)
2040 values[i].b = ir->value.b[i];
2041 else
2042 values[i].f = ir->value.b[i];
2043 }
2044 break;
2045 default:
2046 assert(!"Non-float/uint/int/bool constant");
2047 }
2048
2049 this->result = st_src_reg(PROGRAM_CONSTANT, -1, ir->type);
2050 this->result.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
2051 values, ir->type->vector_elements, gl_type,
2052 &this->result.swizzle);
2053 }
2054
2055 function_entry *
2056 glsl_to_tgsi_visitor::get_function_signature(ir_function_signature *sig)
2057 {
2058 function_entry *entry;
2059
2060 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
2061 entry = (function_entry *)iter.get();
2062
2063 if (entry->sig == sig)
2064 return entry;
2065 }
2066
2067 entry = ralloc(mem_ctx, function_entry);
2068 entry->sig = sig;
2069 entry->sig_id = this->next_signature_id++;
2070 entry->bgn_inst = NULL;
2071
2072 /* Allocate storage for all the parameters. */
2073 foreach_iter(exec_list_iterator, iter, sig->parameters) {
2074 ir_variable *param = (ir_variable *)iter.get();
2075 variable_storage *storage;
2076
2077 storage = find_variable_storage(param);
2078 assert(!storage);
2079
2080 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
2081 this->next_temp);
2082 this->variables.push_tail(storage);
2083
2084 this->next_temp += type_size(param->type);
2085 }
2086
2087 if (!sig->return_type->is_void()) {
2088 entry->return_reg = get_temp(sig->return_type);
2089 } else {
2090 entry->return_reg = undef_src;
2091 }
2092
2093 this->function_signatures.push_tail(entry);
2094 return entry;
2095 }
2096
2097 void
2098 glsl_to_tgsi_visitor::visit(ir_call *ir)
2099 {
2100 glsl_to_tgsi_instruction *call_inst;
2101 ir_function_signature *sig = ir->get_callee();
2102 function_entry *entry = get_function_signature(sig);
2103 int i;
2104
2105 /* Process in parameters. */
2106 exec_list_iterator sig_iter = sig->parameters.iterator();
2107 foreach_iter(exec_list_iterator, iter, *ir) {
2108 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2109 ir_variable *param = (ir_variable *)sig_iter.get();
2110
2111 if (param->mode == ir_var_in ||
2112 param->mode == ir_var_inout) {
2113 variable_storage *storage = find_variable_storage(param);
2114 assert(storage);
2115
2116 param_rval->accept(this);
2117 st_src_reg r = this->result;
2118
2119 st_dst_reg l;
2120 l.file = storage->file;
2121 l.index = storage->index;
2122 l.reladdr = NULL;
2123 l.writemask = WRITEMASK_XYZW;
2124 l.cond_mask = COND_TR;
2125
2126 for (i = 0; i < type_size(param->type); i++) {
2127 emit(ir, TGSI_OPCODE_MOV, l, r);
2128 l.index++;
2129 r.index++;
2130 }
2131 }
2132
2133 sig_iter.next();
2134 }
2135 assert(!sig_iter.has_next());
2136
2137 /* Emit call instruction */
2138 call_inst = emit(ir, TGSI_OPCODE_CAL);
2139 call_inst->function = entry;
2140
2141 /* Process out parameters. */
2142 sig_iter = sig->parameters.iterator();
2143 foreach_iter(exec_list_iterator, iter, *ir) {
2144 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2145 ir_variable *param = (ir_variable *)sig_iter.get();
2146
2147 if (param->mode == ir_var_out ||
2148 param->mode == ir_var_inout) {
2149 variable_storage *storage = find_variable_storage(param);
2150 assert(storage);
2151
2152 st_src_reg r;
2153 r.file = storage->file;
2154 r.index = storage->index;
2155 r.reladdr = NULL;
2156 r.swizzle = SWIZZLE_NOOP;
2157 r.negate = 0;
2158
2159 param_rval->accept(this);
2160 st_dst_reg l = st_dst_reg(this->result);
2161
2162 for (i = 0; i < type_size(param->type); i++) {
2163 emit(ir, TGSI_OPCODE_MOV, l, r);
2164 l.index++;
2165 r.index++;
2166 }
2167 }
2168
2169 sig_iter.next();
2170 }
2171 assert(!sig_iter.has_next());
2172
2173 /* Process return value. */
2174 this->result = entry->return_reg;
2175 }
2176
2177 void
2178 glsl_to_tgsi_visitor::visit(ir_texture *ir)
2179 {
2180 st_src_reg result_src, coord, lod_info, projector, dx, dy;
2181 st_dst_reg result_dst, coord_dst;
2182 glsl_to_tgsi_instruction *inst = NULL;
2183 unsigned opcode = TGSI_OPCODE_NOP;
2184
2185 ir->coordinate->accept(this);
2186
2187 /* Put our coords in a temp. We'll need to modify them for shadow,
2188 * projection, or LOD, so the only case we'd use it as is is if
2189 * we're doing plain old texturing. Mesa IR optimization should
2190 * handle cleaning up our mess in that case.
2191 */
2192 coord = get_temp(glsl_type::vec4_type);
2193 coord_dst = st_dst_reg(coord);
2194 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2195
2196 if (ir->projector) {
2197 ir->projector->accept(this);
2198 projector = this->result;
2199 }
2200
2201 /* Storage for our result. Ideally for an assignment we'd be using
2202 * the actual storage for the result here, instead.
2203 */
2204 result_src = get_temp(glsl_type::vec4_type);
2205 result_dst = st_dst_reg(result_src);
2206
2207 switch (ir->op) {
2208 case ir_tex:
2209 opcode = TGSI_OPCODE_TEX;
2210 break;
2211 case ir_txb:
2212 opcode = TGSI_OPCODE_TXB;
2213 ir->lod_info.bias->accept(this);
2214 lod_info = this->result;
2215 break;
2216 case ir_txl:
2217 opcode = TGSI_OPCODE_TXL;
2218 ir->lod_info.lod->accept(this);
2219 lod_info = this->result;
2220 break;
2221 case ir_txd:
2222 opcode = TGSI_OPCODE_TXD;
2223 ir->lod_info.grad.dPdx->accept(this);
2224 dx = this->result;
2225 ir->lod_info.grad.dPdy->accept(this);
2226 dy = this->result;
2227 break;
2228 case ir_txf: /* TODO: use TGSI_OPCODE_TXF here */
2229 assert(!"GLSL 1.30 features unsupported");
2230 break;
2231 }
2232
2233 if (ir->projector) {
2234 if (opcode == TGSI_OPCODE_TEX) {
2235 /* Slot the projector in as the last component of the coord. */
2236 coord_dst.writemask = WRITEMASK_W;
2237 emit(ir, TGSI_OPCODE_MOV, coord_dst, projector);
2238 coord_dst.writemask = WRITEMASK_XYZW;
2239 opcode = TGSI_OPCODE_TXP;
2240 } else {
2241 st_src_reg coord_w = coord;
2242 coord_w.swizzle = SWIZZLE_WWWW;
2243
2244 /* For the other TEX opcodes there's no projective version
2245 * since the last slot is taken up by LOD info. Do the
2246 * projective divide now.
2247 */
2248 coord_dst.writemask = WRITEMASK_W;
2249 emit(ir, TGSI_OPCODE_RCP, coord_dst, projector);
2250
2251 /* In the case where we have to project the coordinates "by hand,"
2252 * the shadow comparator value must also be projected.
2253 */
2254 st_src_reg tmp_src = coord;
2255 if (ir->shadow_comparitor) {
2256 /* Slot the shadow value in as the second to last component of the
2257 * coord.
2258 */
2259 ir->shadow_comparitor->accept(this);
2260
2261 tmp_src = get_temp(glsl_type::vec4_type);
2262 st_dst_reg tmp_dst = st_dst_reg(tmp_src);
2263
2264 tmp_dst.writemask = WRITEMASK_Z;
2265 emit(ir, TGSI_OPCODE_MOV, tmp_dst, this->result);
2266
2267 tmp_dst.writemask = WRITEMASK_XY;
2268 emit(ir, TGSI_OPCODE_MOV, tmp_dst, coord);
2269 }
2270
2271 coord_dst.writemask = WRITEMASK_XYZ;
2272 emit(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w);
2273
2274 coord_dst.writemask = WRITEMASK_XYZW;
2275 coord.swizzle = SWIZZLE_XYZW;
2276 }
2277 }
2278
2279 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow
2280 * comparator was put in the correct place (and projected) by the code,
2281 * above, that handles by-hand projection.
2282 */
2283 if (ir->shadow_comparitor && (!ir->projector || opcode == TGSI_OPCODE_TXP)) {
2284 /* Slot the shadow value in as the second to last component of the
2285 * coord.
2286 */
2287 ir->shadow_comparitor->accept(this);
2288 coord_dst.writemask = WRITEMASK_Z;
2289 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2290 coord_dst.writemask = WRITEMASK_XYZW;
2291 }
2292
2293 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB) {
2294 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
2295 coord_dst.writemask = WRITEMASK_W;
2296 emit(ir, TGSI_OPCODE_MOV, coord_dst, lod_info);
2297 coord_dst.writemask = WRITEMASK_XYZW;
2298 }
2299
2300 if (opcode == TGSI_OPCODE_TXD)
2301 inst = emit(ir, opcode, result_dst, coord, dx, dy);
2302 else
2303 inst = emit(ir, opcode, result_dst, coord);
2304
2305 if (ir->shadow_comparitor)
2306 inst->tex_shadow = GL_TRUE;
2307
2308 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler,
2309 this->shader_program,
2310 this->prog);
2311
2312 const glsl_type *sampler_type = ir->sampler->type;
2313
2314 switch (sampler_type->sampler_dimensionality) {
2315 case GLSL_SAMPLER_DIM_1D:
2316 inst->tex_target = (sampler_type->sampler_array)
2317 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
2318 break;
2319 case GLSL_SAMPLER_DIM_2D:
2320 inst->tex_target = (sampler_type->sampler_array)
2321 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
2322 break;
2323 case GLSL_SAMPLER_DIM_3D:
2324 inst->tex_target = TEXTURE_3D_INDEX;
2325 break;
2326 case GLSL_SAMPLER_DIM_CUBE:
2327 inst->tex_target = TEXTURE_CUBE_INDEX;
2328 break;
2329 case GLSL_SAMPLER_DIM_RECT:
2330 inst->tex_target = TEXTURE_RECT_INDEX;
2331 break;
2332 case GLSL_SAMPLER_DIM_BUF:
2333 assert(!"FINISHME: Implement ARB_texture_buffer_object");
2334 break;
2335 default:
2336 assert(!"Should not get here.");
2337 }
2338
2339 this->result = result_src;
2340 }
2341
2342 void
2343 glsl_to_tgsi_visitor::visit(ir_return *ir)
2344 {
2345 if (ir->get_value()) {
2346 st_dst_reg l;
2347 int i;
2348
2349 assert(current_function);
2350
2351 ir->get_value()->accept(this);
2352 st_src_reg r = this->result;
2353
2354 l = st_dst_reg(current_function->return_reg);
2355
2356 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
2357 emit(ir, TGSI_OPCODE_MOV, l, r);
2358 l.index++;
2359 r.index++;
2360 }
2361 }
2362
2363 emit(ir, TGSI_OPCODE_RET);
2364 }
2365
2366 void
2367 glsl_to_tgsi_visitor::visit(ir_discard *ir)
2368 {
2369 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
2370
2371 if (ir->condition) {
2372 ir->condition->accept(this);
2373 this->result.negate = ~this->result.negate;
2374 emit(ir, TGSI_OPCODE_KIL, undef_dst, this->result);
2375 } else {
2376 emit(ir, TGSI_OPCODE_KILP);
2377 }
2378
2379 fp->UsesKill = GL_TRUE;
2380 }
2381
2382 void
2383 glsl_to_tgsi_visitor::visit(ir_if *ir)
2384 {
2385 glsl_to_tgsi_instruction *cond_inst, *if_inst, *else_inst = NULL;
2386 glsl_to_tgsi_instruction *prev_inst;
2387
2388 prev_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2389
2390 ir->condition->accept(this);
2391 assert(this->result.file != PROGRAM_UNDEFINED);
2392
2393 if (this->options->EmitCondCodes) {
2394 cond_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2395
2396 /* See if we actually generated any instruction for generating
2397 * the condition. If not, then cook up a move to a temp so we
2398 * have something to set cond_update on.
2399 */
2400 if (cond_inst == prev_inst) {
2401 st_src_reg temp = get_temp(glsl_type::bool_type);
2402 cond_inst = emit(ir->condition, TGSI_OPCODE_MOV, st_dst_reg(temp), result);
2403 }
2404 cond_inst->cond_update = GL_TRUE;
2405
2406 if_inst = emit(ir->condition, TGSI_OPCODE_IF);
2407 if_inst->dst.cond_mask = COND_NE;
2408 } else {
2409 if_inst = emit(ir->condition, TGSI_OPCODE_IF, undef_dst, this->result);
2410 }
2411
2412 this->instructions.push_tail(if_inst);
2413
2414 visit_exec_list(&ir->then_instructions, this);
2415
2416 if (!ir->else_instructions.is_empty()) {
2417 else_inst = emit(ir->condition, TGSI_OPCODE_ELSE);
2418 visit_exec_list(&ir->else_instructions, this);
2419 }
2420
2421 if_inst = emit(ir->condition, TGSI_OPCODE_ENDIF);
2422 }
2423
2424 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
2425 {
2426 result.file = PROGRAM_UNDEFINED;
2427 next_temp = 1;
2428 next_signature_id = 1;
2429 current_function = NULL;
2430 num_address_regs = 0;
2431 indirect_addr_temps = false;
2432 indirect_addr_consts = false;
2433 mem_ctx = ralloc_context(NULL);
2434 }
2435
2436 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
2437 {
2438 ralloc_free(mem_ctx);
2439 }
2440
2441 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v)
2442 {
2443 delete v;
2444 }
2445
2446
2447 /**
2448 * Count resources used by the given gpu program (number of texture
2449 * samplers, etc).
2450 */
2451 static void
2452 count_resources(glsl_to_tgsi_visitor *v, gl_program *prog)
2453 {
2454 v->samplers_used = 0;
2455
2456 foreach_iter(exec_list_iterator, iter, v->instructions) {
2457 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2458
2459 if (is_tex_instruction(inst->op)) {
2460 v->samplers_used |= 1 << inst->sampler;
2461
2462 prog->SamplerTargets[inst->sampler] =
2463 (gl_texture_index)inst->tex_target;
2464 if (inst->tex_shadow) {
2465 prog->ShadowSamplers |= 1 << inst->sampler;
2466 }
2467 }
2468 }
2469
2470 prog->SamplersUsed = v->samplers_used;
2471 _mesa_update_shader_textures_used(prog);
2472 }
2473
2474
2475 /**
2476 * Check if the given vertex/fragment/shader program is within the
2477 * resource limits of the context (number of texture units, etc).
2478 * If any of those checks fail, record a linker error.
2479 *
2480 * XXX more checks are needed...
2481 */
2482 static void
2483 check_resources(const struct gl_context *ctx,
2484 struct gl_shader_program *shader_program,
2485 glsl_to_tgsi_visitor *prog,
2486 struct gl_program *proginfo)
2487 {
2488 switch (proginfo->Target) {
2489 case GL_VERTEX_PROGRAM_ARB:
2490 if (_mesa_bitcount(prog->samplers_used) >
2491 ctx->Const.MaxVertexTextureImageUnits) {
2492 fail_link(shader_program, "Too many vertex shader texture samplers");
2493 }
2494 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2495 fail_link(shader_program, "Too many vertex shader constants");
2496 }
2497 break;
2498 case MESA_GEOMETRY_PROGRAM:
2499 if (_mesa_bitcount(prog->samplers_used) >
2500 ctx->Const.MaxGeometryTextureImageUnits) {
2501 fail_link(shader_program, "Too many geometry shader texture samplers");
2502 }
2503 if (proginfo->Parameters->NumParameters >
2504 MAX_GEOMETRY_UNIFORM_COMPONENTS / 4) {
2505 fail_link(shader_program, "Too many geometry shader constants");
2506 }
2507 break;
2508 case GL_FRAGMENT_PROGRAM_ARB:
2509 if (_mesa_bitcount(prog->samplers_used) >
2510 ctx->Const.MaxTextureImageUnits) {
2511 fail_link(shader_program, "Too many fragment shader texture samplers");
2512 }
2513 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2514 fail_link(shader_program, "Too many fragment shader constants");
2515 }
2516 break;
2517 default:
2518 _mesa_problem(ctx, "unexpected program type in check_resources()");
2519 }
2520 }
2521
2522
2523
2524 struct uniform_sort {
2525 struct gl_uniform *u;
2526 int pos;
2527 };
2528
2529 /* The shader_program->Uniforms list is almost sorted in increasing
2530 * uniform->{Frag,Vert}Pos locations, but not quite when there are
2531 * uniforms shared between targets. We need to add parameters in
2532 * increasing order for the targets.
2533 */
2534 static int
2535 sort_uniforms(const void *a, const void *b)
2536 {
2537 struct uniform_sort *u1 = (struct uniform_sort *)a;
2538 struct uniform_sort *u2 = (struct uniform_sort *)b;
2539
2540 return u1->pos - u2->pos;
2541 }
2542
2543 /* Add the uniforms to the parameters. The linker chose locations
2544 * in our parameters lists (which weren't created yet), which the
2545 * uniforms code will use to poke values into our parameters list
2546 * when uniforms are updated.
2547 */
2548 static void
2549 add_uniforms_to_parameters_list(struct gl_shader_program *shader_program,
2550 struct gl_shader *shader,
2551 struct gl_program *prog)
2552 {
2553 unsigned int i;
2554 unsigned int next_sampler = 0, num_uniforms = 0;
2555 struct uniform_sort *sorted_uniforms;
2556
2557 sorted_uniforms = ralloc_array(NULL, struct uniform_sort,
2558 shader_program->Uniforms->NumUniforms);
2559
2560 for (i = 0; i < shader_program->Uniforms->NumUniforms; i++) {
2561 struct gl_uniform *uniform = shader_program->Uniforms->Uniforms + i;
2562 int parameter_index = -1;
2563
2564 switch (shader->Type) {
2565 case GL_VERTEX_SHADER:
2566 parameter_index = uniform->VertPos;
2567 break;
2568 case GL_FRAGMENT_SHADER:
2569 parameter_index = uniform->FragPos;
2570 break;
2571 case GL_GEOMETRY_SHADER:
2572 parameter_index = uniform->GeomPos;
2573 break;
2574 }
2575
2576 /* Only add uniforms used in our target. */
2577 if (parameter_index != -1) {
2578 sorted_uniforms[num_uniforms].pos = parameter_index;
2579 sorted_uniforms[num_uniforms].u = uniform;
2580 num_uniforms++;
2581 }
2582 }
2583
2584 qsort(sorted_uniforms, num_uniforms, sizeof(struct uniform_sort),
2585 sort_uniforms);
2586
2587 for (i = 0; i < num_uniforms; i++) {
2588 struct gl_uniform *uniform = sorted_uniforms[i].u;
2589 int parameter_index = sorted_uniforms[i].pos;
2590 const glsl_type *type = uniform->Type;
2591 unsigned int size;
2592
2593 if (type->is_vector() ||
2594 type->is_scalar()) {
2595 size = type->vector_elements;
2596 } else {
2597 size = type_size(type) * 4;
2598 }
2599
2600 gl_register_file file;
2601 if (type->is_sampler() ||
2602 (type->is_array() && type->fields.array->is_sampler())) {
2603 file = PROGRAM_SAMPLER;
2604 } else {
2605 file = PROGRAM_UNIFORM;
2606 }
2607
2608 GLint index = _mesa_lookup_parameter_index(prog->Parameters, -1,
2609 uniform->Name);
2610
2611 if (index < 0) {
2612 index = _mesa_add_parameter(prog->Parameters, file,
2613 uniform->Name, size, type->gl_type,
2614 NULL, NULL, 0x0);
2615
2616 /* Sampler uniform values are stored in prog->SamplerUnits,
2617 * and the entry in that array is selected by this index we
2618 * store in ParameterValues[].
2619 */
2620 if (file == PROGRAM_SAMPLER) {
2621 for (unsigned int j = 0; j < size / 4; j++)
2622 prog->Parameters->ParameterValues[index + j][0].f = next_sampler++;
2623 }
2624
2625 /* The location chosen in the Parameters list here (returned
2626 * from _mesa_add_uniform) has to match what the linker chose.
2627 */
2628 if (index != parameter_index) {
2629 fail_link(shader_program, "Allocation of uniform `%s' to target "
2630 "failed (%d vs %d)\n",
2631 uniform->Name, index, parameter_index);
2632 }
2633 }
2634 }
2635
2636 ralloc_free(sorted_uniforms);
2637 }
2638
2639 static void
2640 set_uniform_initializer(struct gl_context *ctx, void *mem_ctx,
2641 struct gl_shader_program *shader_program,
2642 const char *name, const glsl_type *type,
2643 ir_constant *val)
2644 {
2645 if (type->is_record()) {
2646 ir_constant *field_constant;
2647
2648 field_constant = (ir_constant *)val->components.get_head();
2649
2650 for (unsigned int i = 0; i < type->length; i++) {
2651 const glsl_type *field_type = type->fields.structure[i].type;
2652 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
2653 type->fields.structure[i].name);
2654 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name,
2655 field_type, field_constant);
2656 field_constant = (ir_constant *)field_constant->next;
2657 }
2658 return;
2659 }
2660
2661 int loc = _mesa_get_uniform_location(ctx, shader_program, name);
2662
2663 if (loc == -1) {
2664 fail_link(shader_program,
2665 "Couldn't find uniform for initializer %s\n", name);
2666 return;
2667 }
2668
2669 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) {
2670 ir_constant *element;
2671 const glsl_type *element_type;
2672 if (type->is_array()) {
2673 element = val->array_elements[i];
2674 element_type = type->fields.array;
2675 } else {
2676 element = val;
2677 element_type = type;
2678 }
2679
2680 void *values;
2681
2682 if (element_type->base_type == GLSL_TYPE_BOOL) {
2683 int *conv = ralloc_array(mem_ctx, int, element_type->components());
2684 for (unsigned int j = 0; j < element_type->components(); j++) {
2685 conv[j] = element->value.b[j];
2686 }
2687 values = (void *)conv;
2688 element_type = glsl_type::get_instance(GLSL_TYPE_INT,
2689 element_type->vector_elements,
2690 1);
2691 } else {
2692 values = &element->value;
2693 }
2694
2695 if (element_type->is_matrix()) {
2696 _mesa_uniform_matrix(ctx, shader_program,
2697 element_type->matrix_columns,
2698 element_type->vector_elements,
2699 loc, 1, GL_FALSE, (GLfloat *)values);
2700 loc += element_type->matrix_columns;
2701 } else {
2702 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns,
2703 values, element_type->gl_type);
2704 loc += type_size(element_type);
2705 }
2706 }
2707 }
2708
2709 static void
2710 set_uniform_initializers(struct gl_context *ctx,
2711 struct gl_shader_program *shader_program)
2712 {
2713 void *mem_ctx = NULL;
2714
2715 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2716 struct gl_shader *shader = shader_program->_LinkedShaders[i];
2717
2718 if (shader == NULL)
2719 continue;
2720
2721 foreach_iter(exec_list_iterator, iter, *shader->ir) {
2722 ir_instruction *ir = (ir_instruction *)iter.get();
2723 ir_variable *var = ir->as_variable();
2724
2725 if (!var || var->mode != ir_var_uniform || !var->constant_value)
2726 continue;
2727
2728 if (!mem_ctx)
2729 mem_ctx = ralloc_context(NULL);
2730
2731 set_uniform_initializer(ctx, mem_ctx, shader_program, var->name,
2732 var->type, var->constant_value);
2733 }
2734 }
2735
2736 ralloc_free(mem_ctx);
2737 }
2738
2739 /*
2740 * Scan/rewrite program to remove reads of custom (output) registers.
2741 * The passed type has to be either PROGRAM_OUTPUT or PROGRAM_VARYING
2742 * (for vertex shaders).
2743 * In GLSL shaders, varying vars can be read and written.
2744 * On some hardware, trying to read an output register causes trouble.
2745 * So, rewrite the program to use a temporary register in this case.
2746 *
2747 * Based on _mesa_remove_output_reads from programopt.c.
2748 */
2749 void
2750 glsl_to_tgsi_visitor::remove_output_reads(gl_register_file type)
2751 {
2752 GLuint i;
2753 GLint outputMap[VERT_RESULT_MAX];
2754 GLint outputTypes[VERT_RESULT_MAX];
2755 GLuint numVaryingReads = 0;
2756 GLboolean usedTemps[MAX_TEMPS];
2757 GLuint firstTemp = 0;
2758
2759 _mesa_find_used_registers(prog, PROGRAM_TEMPORARY,
2760 usedTemps, MAX_TEMPS);
2761
2762 assert(type == PROGRAM_VARYING || type == PROGRAM_OUTPUT);
2763 assert(prog->Target == GL_VERTEX_PROGRAM_ARB || type != PROGRAM_VARYING);
2764
2765 for (i = 0; i < VERT_RESULT_MAX; i++)
2766 outputMap[i] = -1;
2767
2768 /* look for instructions which read from varying vars */
2769 foreach_iter(exec_list_iterator, iter, this->instructions) {
2770 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2771 const GLuint numSrc = num_inst_src_regs(inst->op);
2772 GLuint j;
2773 for (j = 0; j < numSrc; j++) {
2774 if (inst->src[j].file == type) {
2775 /* replace the read with a temp reg */
2776 const GLuint var = inst->src[j].index;
2777 if (outputMap[var] == -1) {
2778 numVaryingReads++;
2779 outputMap[var] = _mesa_find_free_register(usedTemps,
2780 MAX_TEMPS,
2781 firstTemp);
2782 outputTypes[var] = inst->src[j].type;
2783 firstTemp = outputMap[var] + 1;
2784 }
2785 inst->src[j].file = PROGRAM_TEMPORARY;
2786 inst->src[j].index = outputMap[var];
2787 }
2788 }
2789 }
2790
2791 if (numVaryingReads == 0)
2792 return; /* nothing to be done */
2793
2794 /* look for instructions which write to the varying vars identified above */
2795 foreach_iter(exec_list_iterator, iter, this->instructions) {
2796 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2797 if (inst->dst.file == type && outputMap[inst->dst.index] >= 0) {
2798 /* change inst to write to the temp reg, instead of the varying */
2799 inst->dst.file = PROGRAM_TEMPORARY;
2800 inst->dst.index = outputMap[inst->dst.index];
2801 }
2802 }
2803
2804 /* insert new MOV instructions at the end */
2805 for (i = 0; i < VERT_RESULT_MAX; i++) {
2806 if (outputMap[i] >= 0) {
2807 /* MOV VAR[i], TEMP[tmp]; */
2808 st_src_reg src = st_src_reg(PROGRAM_TEMPORARY, outputMap[i], outputTypes[i]);
2809 st_dst_reg dst = st_dst_reg(type, WRITEMASK_XYZW, outputTypes[i]);
2810 dst.index = i;
2811 this->emit(NULL, TGSI_OPCODE_MOV, dst, src);
2812 }
2813 }
2814 }
2815
2816 /**
2817 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
2818 * are read from the given src in this instruction
2819 */
2820 static int
2821 get_src_arg_mask(st_dst_reg dst, st_src_reg src)
2822 {
2823 int read_mask = 0, comp;
2824
2825 /* Now, given the src swizzle and the written channels, find which
2826 * components are actually read
2827 */
2828 for (comp = 0; comp < 4; ++comp) {
2829 const unsigned coord = GET_SWZ(src.swizzle, comp);
2830 ASSERT(coord < 4);
2831 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W)
2832 read_mask |= 1 << coord;
2833 }
2834
2835 return read_mask;
2836 }
2837
2838 /**
2839 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
2840 * instruction is the first instruction to write to register T0. There are
2841 * several lowering passes done in GLSL IR (e.g. branches and
2842 * relative addressing) that create a large number of conditional assignments
2843 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
2844 *
2845 * Here is why this conversion is safe:
2846 * CMP T0, T1 T2 T0 can be expanded to:
2847 * if (T1 < 0.0)
2848 * MOV T0, T2;
2849 * else
2850 * MOV T0, T0;
2851 *
2852 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
2853 * as the original program. If (T1 < 0.0) evaluates to false, executing
2854 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
2855 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
2856 * because any instruction that was going to read from T0 after this was going
2857 * to read a garbage value anyway.
2858 */
2859 void
2860 glsl_to_tgsi_visitor::simplify_cmp(void)
2861 {
2862 unsigned tempWrites[MAX_TEMPS];
2863 unsigned outputWrites[MAX_PROGRAM_OUTPUTS];
2864
2865 memset(tempWrites, 0, sizeof(tempWrites));
2866 memset(outputWrites, 0, sizeof(outputWrites));
2867
2868 foreach_iter(exec_list_iterator, iter, this->instructions) {
2869 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2870 unsigned prevWriteMask = 0;
2871
2872 /* Give up if we encounter relative addressing or flow control. */
2873 if (inst->dst.reladdr ||
2874 tgsi_get_opcode_info(inst->op)->is_branch ||
2875 inst->op == TGSI_OPCODE_BGNSUB ||
2876 inst->op == TGSI_OPCODE_CONT ||
2877 inst->op == TGSI_OPCODE_END ||
2878 inst->op == TGSI_OPCODE_ENDSUB ||
2879 inst->op == TGSI_OPCODE_RET) {
2880 return;
2881 }
2882
2883 if (inst->dst.file == PROGRAM_OUTPUT) {
2884 assert(inst->dst.index < MAX_PROGRAM_OUTPUTS);
2885 prevWriteMask = outputWrites[inst->dst.index];
2886 outputWrites[inst->dst.index] |= inst->dst.writemask;
2887 } else if (inst->dst.file == PROGRAM_TEMPORARY) {
2888 assert(inst->dst.index < MAX_TEMPS);
2889 prevWriteMask = tempWrites[inst->dst.index];
2890 tempWrites[inst->dst.index] |= inst->dst.writemask;
2891 }
2892
2893 /* For a CMP to be considered a conditional write, the destination
2894 * register and source register two must be the same. */
2895 if (inst->op == TGSI_OPCODE_CMP
2896 && !(inst->dst.writemask & prevWriteMask)
2897 && inst->src[2].file == inst->dst.file
2898 && inst->src[2].index == inst->dst.index
2899 && inst->dst.writemask == get_src_arg_mask(inst->dst, inst->src[2])) {
2900
2901 inst->op = TGSI_OPCODE_MOV;
2902 inst->src[0] = inst->src[1];
2903 }
2904 }
2905 }
2906
2907 /* Replaces all references to a temporary register index with another index. */
2908 void
2909 glsl_to_tgsi_visitor::rename_temp_register(int index, int new_index)
2910 {
2911 foreach_iter(exec_list_iterator, iter, this->instructions) {
2912 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2913 unsigned j;
2914
2915 for (j=0; j < num_inst_src_regs(inst->op); j++) {
2916 if (inst->src[j].file == PROGRAM_TEMPORARY &&
2917 inst->src[j].index == index) {
2918 inst->src[j].index = new_index;
2919 }
2920 }
2921
2922 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
2923 inst->dst.index = new_index;
2924 }
2925 }
2926 }
2927
2928 int
2929 glsl_to_tgsi_visitor::get_first_temp_read(int index)
2930 {
2931 int depth = 0; /* loop depth */
2932 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
2933 unsigned i = 0, j;
2934
2935 foreach_iter(exec_list_iterator, iter, this->instructions) {
2936 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2937
2938 for (j=0; j < num_inst_src_regs(inst->op); j++) {
2939 if (inst->src[j].file == PROGRAM_TEMPORARY &&
2940 inst->src[j].index == index) {
2941 return (depth == 0) ? i : loop_start;
2942 }
2943 }
2944
2945 if (inst->op == TGSI_OPCODE_BGNLOOP) {
2946 if(depth++ == 0)
2947 loop_start = i;
2948 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
2949 if (--depth == 0)
2950 loop_start = -1;
2951 }
2952 assert(depth >= 0);
2953
2954 i++;
2955 }
2956
2957 return -1;
2958 }
2959
2960 int
2961 glsl_to_tgsi_visitor::get_first_temp_write(int index)
2962 {
2963 int depth = 0; /* loop depth */
2964 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
2965 int i = 0;
2966
2967 foreach_iter(exec_list_iterator, iter, this->instructions) {
2968 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2969
2970 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
2971 return (depth == 0) ? i : loop_start;
2972 }
2973
2974 if (inst->op == TGSI_OPCODE_BGNLOOP) {
2975 if(depth++ == 0)
2976 loop_start = i;
2977 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
2978 if (--depth == 0)
2979 loop_start = -1;
2980 }
2981 assert(depth >= 0);
2982
2983 i++;
2984 }
2985
2986 return -1;
2987 }
2988
2989 int
2990 glsl_to_tgsi_visitor::get_last_temp_read(int index)
2991 {
2992 int depth = 0; /* loop depth */
2993 int last = -1; /* index of last instruction that reads the temporary */
2994 unsigned i = 0, j;
2995
2996 foreach_iter(exec_list_iterator, iter, this->instructions) {
2997 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2998
2999 for (j=0; j < num_inst_src_regs(inst->op); j++) {
3000 if (inst->src[j].file == PROGRAM_TEMPORARY &&
3001 inst->src[j].index == index) {
3002 last = (depth == 0) ? i : -2;
3003 }
3004 }
3005
3006 if (inst->op == TGSI_OPCODE_BGNLOOP)
3007 depth++;
3008 else if (inst->op == TGSI_OPCODE_ENDLOOP)
3009 if (--depth == 0 && last == -2)
3010 last = i;
3011 assert(depth >= 0);
3012
3013 i++;
3014 }
3015
3016 assert(last >= -1);
3017 return last;
3018 }
3019
3020 int
3021 glsl_to_tgsi_visitor::get_last_temp_write(int index)
3022 {
3023 int depth = 0; /* loop depth */
3024 int last = -1; /* index of last instruction that writes to the temporary */
3025 int i = 0;
3026
3027 foreach_iter(exec_list_iterator, iter, this->instructions) {
3028 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3029
3030 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index)
3031 last = (depth == 0) ? i : -2;
3032
3033 if (inst->op == TGSI_OPCODE_BGNLOOP)
3034 depth++;
3035 else if (inst->op == TGSI_OPCODE_ENDLOOP)
3036 if (--depth == 0 && last == -2)
3037 last = i;
3038 assert(depth >= 0);
3039
3040 i++;
3041 }
3042
3043 assert(last >= -1);
3044 return last;
3045 }
3046
3047 /*
3048 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
3049 * channels for copy propagation and updates following instructions to
3050 * use the original versions.
3051 *
3052 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3053 * will occur. As an example, a TXP production before this pass:
3054 *
3055 * 0: MOV TEMP[1], INPUT[4].xyyy;
3056 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3057 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
3058 *
3059 * and after:
3060 *
3061 * 0: MOV TEMP[1], INPUT[4].xyyy;
3062 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3063 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3064 *
3065 * which allows for dead code elimination on TEMP[1]'s writes.
3066 */
3067 void
3068 glsl_to_tgsi_visitor::copy_propagate(void)
3069 {
3070 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx,
3071 glsl_to_tgsi_instruction *,
3072 this->next_temp * 4);
3073 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
3074 int level = 0;
3075
3076 foreach_iter(exec_list_iterator, iter, this->instructions) {
3077 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3078
3079 assert(inst->dst.file != PROGRAM_TEMPORARY
3080 || inst->dst.index < this->next_temp);
3081
3082 /* First, do any copy propagation possible into the src regs. */
3083 for (int r = 0; r < 3; r++) {
3084 glsl_to_tgsi_instruction *first = NULL;
3085 bool good = true;
3086 int acp_base = inst->src[r].index * 4;
3087
3088 if (inst->src[r].file != PROGRAM_TEMPORARY ||
3089 inst->src[r].reladdr)
3090 continue;
3091
3092 /* See if we can find entries in the ACP consisting of MOVs
3093 * from the same src register for all the swizzled channels
3094 * of this src register reference.
3095 */
3096 for (int i = 0; i < 4; i++) {
3097 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3098 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan];
3099
3100 if (!copy_chan) {
3101 good = false;
3102 break;
3103 }
3104
3105 assert(acp_level[acp_base + src_chan] <= level);
3106
3107 if (!first) {
3108 first = copy_chan;
3109 } else {
3110 if (first->src[0].file != copy_chan->src[0].file ||
3111 first->src[0].index != copy_chan->src[0].index) {
3112 good = false;
3113 break;
3114 }
3115 }
3116 }
3117
3118 if (good) {
3119 /* We've now validated that we can copy-propagate to
3120 * replace this src register reference. Do it.
3121 */
3122 inst->src[r].file = first->src[0].file;
3123 inst->src[r].index = first->src[0].index;
3124
3125 int swizzle = 0;
3126 for (int i = 0; i < 4; i++) {
3127 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3128 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan];
3129 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) <<
3130 (3 * i));
3131 }
3132 inst->src[r].swizzle = swizzle;
3133 }
3134 }
3135
3136 switch (inst->op) {
3137 case TGSI_OPCODE_BGNLOOP:
3138 case TGSI_OPCODE_ENDLOOP:
3139 /* End of a basic block, clear the ACP entirely. */
3140 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3141 break;
3142
3143 case TGSI_OPCODE_IF:
3144 ++level;
3145 break;
3146
3147 case TGSI_OPCODE_ENDIF:
3148 case TGSI_OPCODE_ELSE:
3149 /* Clear all channels written inside the block from the ACP, but
3150 * leaving those that were not touched.
3151 */
3152 for (int r = 0; r < this->next_temp; r++) {
3153 for (int c = 0; c < 4; c++) {
3154 if (!acp[4 * r + c])
3155 continue;
3156
3157 if (acp_level[4 * r + c] >= level)
3158 acp[4 * r + c] = NULL;
3159 }
3160 }
3161 if (inst->op == TGSI_OPCODE_ENDIF)
3162 --level;
3163 break;
3164
3165 default:
3166 /* Continuing the block, clear any written channels from
3167 * the ACP.
3168 */
3169 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) {
3170 /* Any temporary might be written, so no copy propagation
3171 * across this instruction.
3172 */
3173 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3174 } else if (inst->dst.file == PROGRAM_OUTPUT &&
3175 inst->dst.reladdr) {
3176 /* Any output might be written, so no copy propagation
3177 * from outputs across this instruction.
3178 */
3179 for (int r = 0; r < this->next_temp; r++) {
3180 for (int c = 0; c < 4; c++) {
3181 if (!acp[4 * r + c])
3182 continue;
3183
3184 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
3185 acp[4 * r + c] = NULL;
3186 }
3187 }
3188 } else if (inst->dst.file == PROGRAM_TEMPORARY ||
3189 inst->dst.file == PROGRAM_OUTPUT) {
3190 /* Clear where it's used as dst. */
3191 if (inst->dst.file == PROGRAM_TEMPORARY) {
3192 for (int c = 0; c < 4; c++) {
3193 if (inst->dst.writemask & (1 << c)) {
3194 acp[4 * inst->dst.index + c] = NULL;
3195 }
3196 }
3197 }
3198
3199 /* Clear where it's used as src. */
3200 for (int r = 0; r < this->next_temp; r++) {
3201 for (int c = 0; c < 4; c++) {
3202 if (!acp[4 * r + c])
3203 continue;
3204
3205 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
3206
3207 if (acp[4 * r + c]->src[0].file == inst->dst.file &&
3208 acp[4 * r + c]->src[0].index == inst->dst.index &&
3209 inst->dst.writemask & (1 << src_chan))
3210 {
3211 acp[4 * r + c] = NULL;
3212 }
3213 }
3214 }
3215 }
3216 break;
3217 }
3218
3219 /* If this is a copy, add it to the ACP. */
3220 if (inst->op == TGSI_OPCODE_MOV &&
3221 inst->dst.file == PROGRAM_TEMPORARY &&
3222 !inst->dst.reladdr &&
3223 !inst->saturate &&
3224 !inst->src[0].reladdr &&
3225 !inst->src[0].negate) {
3226 for (int i = 0; i < 4; i++) {
3227 if (inst->dst.writemask & (1 << i)) {
3228 acp[4 * inst->dst.index + i] = inst;
3229 acp_level[4 * inst->dst.index + i] = level;
3230 }
3231 }
3232 }
3233 }
3234
3235 ralloc_free(acp_level);
3236 ralloc_free(acp);
3237 }
3238
3239 /*
3240 * Tracks available PROGRAM_TEMPORARY registers for dead code elimination.
3241 *
3242 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3243 * will occur. As an example, a TXP production after copy propagation but
3244 * before this pass:
3245 *
3246 * 0: MOV TEMP[1], INPUT[4].xyyy;
3247 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3248 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3249 *
3250 * and after this pass:
3251 *
3252 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3253 *
3254 * FIXME: assumes that all functions are inlined (no support for BGNSUB/ENDSUB)
3255 * FIXME: doesn't eliminate all dead code inside of loops; it steps around them
3256 */
3257 void
3258 glsl_to_tgsi_visitor::eliminate_dead_code(void)
3259 {
3260 int i;
3261
3262 for (i=0; i < this->next_temp; i++) {
3263 int last_read = get_last_temp_read(i);
3264 int j = 0;
3265
3266 foreach_iter(exec_list_iterator, iter, this->instructions) {
3267 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3268
3269 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == i &&
3270 j > last_read)
3271 {
3272 iter.remove();
3273 delete inst;
3274 }
3275
3276 j++;
3277 }
3278 }
3279 }
3280
3281 /*
3282 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
3283 * code elimination. This is less primitive than eliminate_dead_code(), as it
3284 * is per-channel and can detect consecutive writes without a read between them
3285 * as dead code. However, there is some dead code that can be eliminated by
3286 * eliminate_dead_code() but not this function - for example, this function
3287 * cannot eliminate an instruction writing to a register that is never read and
3288 * is the only instruction writing to that register.
3289 *
3290 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3291 * will occur.
3292 */
3293 int
3294 glsl_to_tgsi_visitor::eliminate_dead_code_advanced(void)
3295 {
3296 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx,
3297 glsl_to_tgsi_instruction *,
3298 this->next_temp * 4);
3299 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
3300 int level = 0;
3301 int removed = 0;
3302
3303 foreach_iter(exec_list_iterator, iter, this->instructions) {
3304 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3305
3306 assert(inst->dst.file != PROGRAM_TEMPORARY
3307 || inst->dst.index < this->next_temp);
3308
3309 switch (inst->op) {
3310 case TGSI_OPCODE_BGNLOOP:
3311 case TGSI_OPCODE_ENDLOOP:
3312 /* End of a basic block, clear the write array entirely.
3313 * FIXME: This keeps us from killing dead code when the writes are
3314 * on either side of a loop, even when the register isn't touched
3315 * inside the loop.
3316 */
3317 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
3318 break;
3319
3320 case TGSI_OPCODE_ENDIF:
3321 --level;
3322 break;
3323
3324 case TGSI_OPCODE_ELSE:
3325 /* Clear all channels written inside the preceding if block from the
3326 * write array, but leave those that were not touched.
3327 *
3328 * FIXME: This destroys opportunities to remove dead code inside of
3329 * IF blocks that are followed by an ELSE block.
3330 */
3331 for (int r = 0; r < this->next_temp; r++) {
3332 for (int c = 0; c < 4; c++) {
3333 if (!writes[4 * r + c])
3334 continue;
3335
3336 if (write_level[4 * r + c] >= level)
3337 writes[4 * r + c] = NULL;
3338 }
3339 }
3340 break;
3341
3342 case TGSI_OPCODE_IF:
3343 ++level;
3344 /* fallthrough to default case to mark the condition as read */
3345
3346 default:
3347 /* Continuing the block, clear any channels from the write array that
3348 * are read by this instruction.
3349 */
3350 for (int i = 0; i < 4; i++) {
3351 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){
3352 /* Any temporary might be read, so no dead code elimination
3353 * across this instruction.
3354 */
3355 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
3356 } else if (inst->src[i].file == PROGRAM_TEMPORARY) {
3357 /* Clear where it's used as src. */
3358 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0);
3359 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1);
3360 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2);
3361 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3);
3362
3363 for (int c = 0; c < 4; c++) {
3364 if (src_chans & (1 << c)) {
3365 writes[4 * inst->src[i].index + c] = NULL;
3366 }
3367 }
3368 }
3369 }
3370 break;
3371 }
3372
3373 /* If this instruction writes to a temporary, add it to the write array.
3374 * If there is already an instruction in the write array for one or more
3375 * of the channels, flag that channel write as dead.
3376 */
3377 if (inst->dst.file == PROGRAM_TEMPORARY &&
3378 !inst->dst.reladdr &&
3379 !inst->saturate) {
3380 for (int c = 0; c < 4; c++) {
3381 if (inst->dst.writemask & (1 << c)) {
3382 if (writes[4 * inst->dst.index + c]) {
3383 if (write_level[4 * inst->dst.index + c] < level)
3384 continue;
3385 else
3386 writes[4 * inst->dst.index + c]->dead_mask |= (1 << c);
3387 }
3388 writes[4 * inst->dst.index + c] = inst;
3389 write_level[4 * inst->dst.index + c] = level;
3390 }
3391 }
3392 }
3393 }
3394
3395 /* Anything still in the write array at this point is dead code. */
3396 for (int r = 0; r < this->next_temp; r++) {
3397 for (int c = 0; c < 4; c++) {
3398 glsl_to_tgsi_instruction *inst = writes[4 * r + c];
3399 if (inst)
3400 inst->dead_mask |= (1 << c);
3401 }
3402 }
3403
3404 /* Now actually remove the instructions that are completely dead and update
3405 * the writemask of other instructions with dead channels.
3406 */
3407 foreach_iter(exec_list_iterator, iter, this->instructions) {
3408 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3409
3410 if (!inst->dead_mask || !inst->dst.writemask)
3411 continue;
3412 else if (inst->dead_mask == inst->dst.writemask) {
3413 iter.remove();
3414 delete inst;
3415 removed++;
3416 } else
3417 inst->dst.writemask &= ~(inst->dead_mask);
3418 }
3419
3420 ralloc_free(write_level);
3421 ralloc_free(writes);
3422
3423 return removed;
3424 }
3425
3426 /* Merges temporary registers together where possible to reduce the number of
3427 * registers needed to run a program.
3428 *
3429 * Produces optimal code only after copy propagation and dead code elimination
3430 * have been run. */
3431 void
3432 glsl_to_tgsi_visitor::merge_registers(void)
3433 {
3434 int *last_reads = rzalloc_array(mem_ctx, int, this->next_temp);
3435 int *first_writes = rzalloc_array(mem_ctx, int, this->next_temp);
3436 int i, j;
3437
3438 /* Read the indices of the last read and first write to each temp register
3439 * into an array so that we don't have to traverse the instruction list as
3440 * much. */
3441 for (i=0; i < this->next_temp; i++) {
3442 last_reads[i] = get_last_temp_read(i);
3443 first_writes[i] = get_first_temp_write(i);
3444 }
3445
3446 /* Start looking for registers with non-overlapping usages that can be
3447 * merged together. */
3448 for (i=0; i < this->next_temp; i++) {
3449 /* Don't touch unused registers. */
3450 if (last_reads[i] < 0 || first_writes[i] < 0) continue;
3451
3452 for (j=0; j < this->next_temp; j++) {
3453 /* Don't touch unused registers. */
3454 if (last_reads[j] < 0 || first_writes[j] < 0) continue;
3455
3456 /* We can merge the two registers if the first write to j is after or
3457 * in the same instruction as the last read from i. Note that the
3458 * register at index i will always be used earlier or at the same time
3459 * as the register at index j. */
3460 if (first_writes[i] <= first_writes[j] &&
3461 last_reads[i] <= first_writes[j])
3462 {
3463 rename_temp_register(j, i); /* Replace all references to j with i.*/
3464
3465 /* Update the first_writes and last_reads arrays with the new
3466 * values for the merged register index, and mark the newly unused
3467 * register index as such. */
3468 last_reads[i] = last_reads[j];
3469 first_writes[j] = -1;
3470 last_reads[j] = -1;
3471 }
3472 }
3473 }
3474
3475 ralloc_free(last_reads);
3476 ralloc_free(first_writes);
3477 }
3478
3479 /* Reassign indices to temporary registers by reusing unused indices created
3480 * by optimization passes. */
3481 void
3482 glsl_to_tgsi_visitor::renumber_registers(void)
3483 {
3484 int i = 0;
3485 int new_index = 0;
3486
3487 for (i=0; i < this->next_temp; i++) {
3488 if (get_first_temp_read(i) < 0) continue;
3489 if (i != new_index)
3490 rename_temp_register(i, new_index);
3491 new_index++;
3492 }
3493
3494 this->next_temp = new_index;
3495 }
3496
3497 /* ------------------------- TGSI conversion stuff -------------------------- */
3498 struct label {
3499 unsigned branch_target;
3500 unsigned token;
3501 };
3502
3503 /**
3504 * Intermediate state used during shader translation.
3505 */
3506 struct st_translate {
3507 struct ureg_program *ureg;
3508
3509 struct ureg_dst temps[MAX_TEMPS];
3510 struct ureg_src *constants;
3511 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS];
3512 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS];
3513 struct ureg_dst address[1];
3514 struct ureg_src samplers[PIPE_MAX_SAMPLERS];
3515 struct ureg_src systemValues[SYSTEM_VALUE_MAX];
3516
3517 /* Extra info for handling point size clamping in vertex shader */
3518 struct ureg_dst pointSizeResult; /**< Actual point size output register */
3519 struct ureg_src pointSizeConst; /**< Point size range constant register */
3520 GLint pointSizeOutIndex; /**< Temp point size output register */
3521 GLboolean prevInstWrotePointSize;
3522
3523 const GLuint *inputMapping;
3524 const GLuint *outputMapping;
3525
3526 /* For every instruction that contains a label (eg CALL), keep
3527 * details so that we can go back afterwards and emit the correct
3528 * tgsi instruction number for each label.
3529 */
3530 struct label *labels;
3531 unsigned labels_size;
3532 unsigned labels_count;
3533
3534 /* Keep a record of the tgsi instruction number that each mesa
3535 * instruction starts at, will be used to fix up labels after
3536 * translation.
3537 */
3538 unsigned *insn;
3539 unsigned insn_size;
3540 unsigned insn_count;
3541
3542 unsigned procType; /**< TGSI_PROCESSOR_VERTEX/FRAGMENT */
3543
3544 boolean error;
3545 };
3546
3547 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
3548 static unsigned mesa_sysval_to_semantic[SYSTEM_VALUE_MAX] = {
3549 TGSI_SEMANTIC_FACE,
3550 TGSI_SEMANTIC_INSTANCEID
3551 };
3552
3553 /**
3554 * Make note of a branch to a label in the TGSI code.
3555 * After we've emitted all instructions, we'll go over the list
3556 * of labels built here and patch the TGSI code with the actual
3557 * location of each label.
3558 */
3559 static unsigned *get_label( struct st_translate *t,
3560 unsigned branch_target )
3561 {
3562 unsigned i;
3563
3564 if (t->labels_count + 1 >= t->labels_size) {
3565 t->labels_size = 1 << (util_logbase2(t->labels_size) + 1);
3566 t->labels = (struct label *)realloc(t->labels,
3567 t->labels_size * sizeof t->labels[0]);
3568 if (t->labels == NULL) {
3569 static unsigned dummy;
3570 t->error = TRUE;
3571 return &dummy;
3572 }
3573 }
3574
3575 i = t->labels_count++;
3576 t->labels[i].branch_target = branch_target;
3577 return &t->labels[i].token;
3578 }
3579
3580 /**
3581 * Called prior to emitting the TGSI code for each Mesa instruction.
3582 * Allocate additional space for instructions if needed.
3583 * Update the insn[] array so the next Mesa instruction points to
3584 * the next TGSI instruction.
3585 */
3586 static void set_insn_start( struct st_translate *t,
3587 unsigned start )
3588 {
3589 if (t->insn_count + 1 >= t->insn_size) {
3590 t->insn_size = 1 << (util_logbase2(t->insn_size) + 1);
3591 t->insn = (unsigned *)realloc(t->insn, t->insn_size * sizeof t->insn[0]);
3592 if (t->insn == NULL) {
3593 t->error = TRUE;
3594 return;
3595 }
3596 }
3597
3598 t->insn[t->insn_count++] = start;
3599 }
3600
3601 /**
3602 * Map a Mesa dst register to a TGSI ureg_dst register.
3603 */
3604 static struct ureg_dst
3605 dst_register( struct st_translate *t,
3606 gl_register_file file,
3607 GLuint index )
3608 {
3609 switch( file ) {
3610 case PROGRAM_UNDEFINED:
3611 return ureg_dst_undef();
3612
3613 case PROGRAM_TEMPORARY:
3614 if (ureg_dst_is_undef(t->temps[index]))
3615 t->temps[index] = ureg_DECL_temporary( t->ureg );
3616
3617 return t->temps[index];
3618
3619 case PROGRAM_OUTPUT:
3620 if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ)
3621 t->prevInstWrotePointSize = GL_TRUE;
3622
3623 if (t->procType == TGSI_PROCESSOR_VERTEX)
3624 assert(index < VERT_RESULT_MAX);
3625 else if (t->procType == TGSI_PROCESSOR_FRAGMENT)
3626 assert(index < FRAG_RESULT_MAX);
3627 else
3628 assert(index < GEOM_RESULT_MAX);
3629
3630 assert(t->outputMapping[index] < Elements(t->outputs));
3631
3632 return t->outputs[t->outputMapping[index]];
3633
3634 case PROGRAM_ADDRESS:
3635 return t->address[index];
3636
3637 default:
3638 debug_assert( 0 );
3639 return ureg_dst_undef();
3640 }
3641 }
3642
3643 /**
3644 * Map a Mesa src register to a TGSI ureg_src register.
3645 */
3646 static struct ureg_src
3647 src_register( struct st_translate *t,
3648 gl_register_file file,
3649 GLuint index )
3650 {
3651 switch( file ) {
3652 case PROGRAM_UNDEFINED:
3653 return ureg_src_undef();
3654
3655 case PROGRAM_TEMPORARY:
3656 assert(index >= 0);
3657 assert(index < Elements(t->temps));
3658 if (ureg_dst_is_undef(t->temps[index]))
3659 t->temps[index] = ureg_DECL_temporary( t->ureg );
3660 return ureg_src(t->temps[index]);
3661
3662 case PROGRAM_NAMED_PARAM:
3663 case PROGRAM_ENV_PARAM:
3664 case PROGRAM_LOCAL_PARAM:
3665 case PROGRAM_UNIFORM:
3666 assert(index >= 0);
3667 return t->constants[index];
3668 case PROGRAM_STATE_VAR:
3669 case PROGRAM_CONSTANT: /* ie, immediate */
3670 if (index < 0)
3671 return ureg_DECL_constant( t->ureg, 0 );
3672 else
3673 return t->constants[index];
3674
3675 case PROGRAM_INPUT:
3676 assert(t->inputMapping[index] < Elements(t->inputs));
3677 return t->inputs[t->inputMapping[index]];
3678
3679 case PROGRAM_OUTPUT:
3680 assert(t->outputMapping[index] < Elements(t->outputs));
3681 return ureg_src(t->outputs[t->outputMapping[index]]); /* not needed? */
3682
3683 case PROGRAM_ADDRESS:
3684 return ureg_src(t->address[index]);
3685
3686 case PROGRAM_SYSTEM_VALUE:
3687 assert(index < Elements(t->systemValues));
3688 return t->systemValues[index];
3689
3690 default:
3691 debug_assert( 0 );
3692 return ureg_src_undef();
3693 }
3694 }
3695
3696 /**
3697 * Create a TGSI ureg_dst register from an st_dst_reg.
3698 */
3699 static struct ureg_dst
3700 translate_dst( struct st_translate *t,
3701 const st_dst_reg *dst_reg,
3702 boolean saturate )
3703 {
3704 struct ureg_dst dst = dst_register( t,
3705 dst_reg->file,
3706 dst_reg->index );
3707
3708 dst = ureg_writemask( dst,
3709 dst_reg->writemask );
3710
3711 if (saturate)
3712 dst = ureg_saturate( dst );
3713
3714 if (dst_reg->reladdr != NULL)
3715 dst = ureg_dst_indirect( dst, ureg_src(t->address[0]) );
3716
3717 return dst;
3718 }
3719
3720 /**
3721 * Create a TGSI ureg_src register from an st_src_reg.
3722 */
3723 static struct ureg_src
3724 translate_src( struct st_translate *t,
3725 const st_src_reg *src_reg )
3726 {
3727 struct ureg_src src = src_register( t, src_reg->file, src_reg->index );
3728
3729 src = ureg_swizzle( src,
3730 GET_SWZ( src_reg->swizzle, 0 ) & 0x3,
3731 GET_SWZ( src_reg->swizzle, 1 ) & 0x3,
3732 GET_SWZ( src_reg->swizzle, 2 ) & 0x3,
3733 GET_SWZ( src_reg->swizzle, 3 ) & 0x3);
3734
3735 if ((src_reg->negate & 0xf) == NEGATE_XYZW)
3736 src = ureg_negate(src);
3737
3738 if (src_reg->reladdr != NULL) {
3739 /* Normally ureg_src_indirect() would be used here, but a stupid compiler
3740 * bug in g++ makes ureg_src_indirect (an inline C function) erroneously
3741 * set the bit for src.Negate. So we have to do the operation manually
3742 * here to work around the compiler's problems. */
3743 /*src = ureg_src_indirect(src, ureg_src(t->address[0]));*/
3744 struct ureg_src addr = ureg_src(t->address[0]);
3745 src.Indirect = 1;
3746 src.IndirectFile = addr.File;
3747 src.IndirectIndex = addr.Index;
3748 src.IndirectSwizzle = addr.SwizzleX;
3749
3750 if (src_reg->file != PROGRAM_INPUT &&
3751 src_reg->file != PROGRAM_OUTPUT) {
3752 /* If src_reg->index was negative, it was set to zero in
3753 * src_register(). Reassign it now. But don't do this
3754 * for input/output regs since they get remapped while
3755 * const buffers don't.
3756 */
3757 src.Index = src_reg->index;
3758 }
3759 }
3760
3761 return src;
3762 }
3763
3764 static void
3765 compile_tgsi_instruction(struct st_translate *t,
3766 const struct glsl_to_tgsi_instruction *inst)
3767 {
3768 struct ureg_program *ureg = t->ureg;
3769 GLuint i;
3770 struct ureg_dst dst[1];
3771 struct ureg_src src[4];
3772 unsigned num_dst;
3773 unsigned num_src;
3774
3775 num_dst = num_inst_dst_regs( inst->op );
3776 num_src = num_inst_src_regs( inst->op );
3777
3778 if (num_dst)
3779 dst[0] = translate_dst( t,
3780 &inst->dst,
3781 inst->saturate);
3782
3783 for (i = 0; i < num_src; i++)
3784 src[i] = translate_src( t, &inst->src[i] );
3785
3786 switch( inst->op ) {
3787 case TGSI_OPCODE_BGNLOOP:
3788 case TGSI_OPCODE_CAL:
3789 case TGSI_OPCODE_ELSE:
3790 case TGSI_OPCODE_ENDLOOP:
3791 case TGSI_OPCODE_IF:
3792 debug_assert(num_dst == 0);
3793 ureg_label_insn( ureg,
3794 inst->op,
3795 src, num_src,
3796 get_label( t,
3797 inst->op == TGSI_OPCODE_CAL ? inst->function->sig_id : 0 ));
3798 return;
3799
3800 case TGSI_OPCODE_TEX:
3801 case TGSI_OPCODE_TXB:
3802 case TGSI_OPCODE_TXD:
3803 case TGSI_OPCODE_TXL:
3804 case TGSI_OPCODE_TXP:
3805 src[num_src++] = t->samplers[inst->sampler];
3806 ureg_tex_insn( ureg,
3807 inst->op,
3808 dst, num_dst,
3809 translate_texture_target( inst->tex_target,
3810 inst->tex_shadow ),
3811 src, num_src );
3812 return;
3813
3814 case TGSI_OPCODE_SCS:
3815 dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XY );
3816 ureg_insn( ureg,
3817 inst->op,
3818 dst, num_dst,
3819 src, num_src );
3820 break;
3821
3822 default:
3823 ureg_insn( ureg,
3824 inst->op,
3825 dst, num_dst,
3826 src, num_src );
3827 break;
3828 }
3829 }
3830
3831 /**
3832 * Emit the TGSI instructions to adjust the WPOS pixel center convention
3833 * Basically, add (adjX, adjY) to the fragment position.
3834 */
3835 static void
3836 emit_adjusted_wpos( struct st_translate *t,
3837 const struct gl_program *program,
3838 GLfloat adjX, GLfloat adjY)
3839 {
3840 struct ureg_program *ureg = t->ureg;
3841 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg);
3842 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
3843
3844 /* Note that we bias X and Y and pass Z and W through unchanged.
3845 * The shader might also use gl_FragCoord.w and .z.
3846 */
3847 ureg_ADD(ureg, wpos_temp, wpos_input,
3848 ureg_imm4f(ureg, adjX, adjY, 0.0f, 0.0f));
3849
3850 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
3851 }
3852
3853
3854 /**
3855 * Emit the TGSI instructions for inverting the WPOS y coordinate.
3856 * This code is unavoidable because it also depends on whether
3857 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
3858 */
3859 static void
3860 emit_wpos_inversion( struct st_translate *t,
3861 const struct gl_program *program,
3862 boolean invert)
3863 {
3864 struct ureg_program *ureg = t->ureg;
3865
3866 /* Fragment program uses fragment position input.
3867 * Need to replace instances of INPUT[WPOS] with temp T
3868 * where T = INPUT[WPOS] by y is inverted.
3869 */
3870 static const gl_state_index wposTransformState[STATE_LENGTH]
3871 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM,
3872 (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
3873
3874 /* XXX: note we are modifying the incoming shader here! Need to
3875 * do this before emitting the constant decls below, or this
3876 * will be missed:
3877 */
3878 unsigned wposTransConst = _mesa_add_state_reference(program->Parameters,
3879 wposTransformState);
3880
3881 struct ureg_src wpostrans = ureg_DECL_constant( ureg, wposTransConst );
3882 struct ureg_dst wpos_temp;
3883 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
3884
3885 /* MOV wpos_temp, input[wpos]
3886 */
3887 if (wpos_input.File == TGSI_FILE_TEMPORARY)
3888 wpos_temp = ureg_dst(wpos_input);
3889 else {
3890 wpos_temp = ureg_DECL_temporary( ureg );
3891 ureg_MOV( ureg, wpos_temp, wpos_input );
3892 }
3893
3894 if (invert) {
3895 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
3896 */
3897 ureg_MAD( ureg,
3898 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ),
3899 wpos_input,
3900 ureg_scalar(wpostrans, 0),
3901 ureg_scalar(wpostrans, 1));
3902 } else {
3903 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
3904 */
3905 ureg_MAD( ureg,
3906 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ),
3907 wpos_input,
3908 ureg_scalar(wpostrans, 2),
3909 ureg_scalar(wpostrans, 3));
3910 }
3911
3912 /* Use wpos_temp as position input from here on:
3913 */
3914 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
3915 }
3916
3917
3918 /**
3919 * Emit fragment position/ooordinate code.
3920 */
3921 static void
3922 emit_wpos(struct st_context *st,
3923 struct st_translate *t,
3924 const struct gl_program *program,
3925 struct ureg_program *ureg)
3926 {
3927 const struct gl_fragment_program *fp =
3928 (const struct gl_fragment_program *) program;
3929 struct pipe_screen *pscreen = st->pipe->screen;
3930 boolean invert = FALSE;
3931
3932 if (fp->OriginUpperLeft) {
3933 /* Fragment shader wants origin in upper-left */
3934 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) {
3935 /* the driver supports upper-left origin */
3936 }
3937 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) {
3938 /* the driver supports lower-left origin, need to invert Y */
3939 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
3940 invert = TRUE;
3941 }
3942 else
3943 assert(0);
3944 }
3945 else {
3946 /* Fragment shader wants origin in lower-left */
3947 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT))
3948 /* the driver supports lower-left origin */
3949 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
3950 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT))
3951 /* the driver supports upper-left origin, need to invert Y */
3952 invert = TRUE;
3953 else
3954 assert(0);
3955 }
3956
3957 if (fp->PixelCenterInteger) {
3958 /* Fragment shader wants pixel center integer */
3959 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER))
3960 /* the driver supports pixel center integer */
3961 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
3962 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER))
3963 /* the driver supports pixel center half integer, need to bias X,Y */
3964 emit_adjusted_wpos(t, program, 0.5f, invert ? 0.5f : -0.5f);
3965 else
3966 assert(0);
3967 }
3968 else {
3969 /* Fragment shader wants pixel center half integer */
3970 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
3971 /* the driver supports pixel center half integer */
3972 }
3973 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) {
3974 /* the driver supports pixel center integer, need to bias X,Y */
3975 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
3976 emit_adjusted_wpos(t, program, 0.5f, invert ? -0.5f : 0.5f);
3977 }
3978 else
3979 assert(0);
3980 }
3981
3982 /* we invert after adjustment so that we avoid the MOV to temporary,
3983 * and reuse the adjustment ADD instead */
3984 emit_wpos_inversion(t, program, invert);
3985 }
3986
3987 /**
3988 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
3989 * TGSI uses +1 for front, -1 for back.
3990 * This function converts the TGSI value to the GL value. Simply clamping/
3991 * saturating the value to [0,1] does the job.
3992 */
3993 static void
3994 emit_face_var(struct st_translate *t)
3995 {
3996 struct ureg_program *ureg = t->ureg;
3997 struct ureg_dst face_temp = ureg_DECL_temporary(ureg);
3998 struct ureg_src face_input = t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]];
3999
4000 /* MOV_SAT face_temp, input[face] */
4001 face_temp = ureg_saturate(face_temp);
4002 ureg_MOV(ureg, face_temp, face_input);
4003
4004 /* Use face_temp as face input from here on: */
4005 t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]] = ureg_src(face_temp);
4006 }
4007
4008 static void
4009 emit_edgeflags(struct st_translate *t)
4010 {
4011 struct ureg_program *ureg = t->ureg;
4012 struct ureg_dst edge_dst = t->outputs[t->outputMapping[VERT_RESULT_EDGE]];
4013 struct ureg_src edge_src = t->inputs[t->inputMapping[VERT_ATTRIB_EDGEFLAG]];
4014
4015 ureg_MOV(ureg, edge_dst, edge_src);
4016 }
4017
4018 /**
4019 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
4020 * \param program the program to translate
4021 * \param numInputs number of input registers used
4022 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
4023 * input indexes
4024 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
4025 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
4026 * each input
4027 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
4028 * \param numOutputs number of output registers used
4029 * \param outputMapping maps Mesa fragment program outputs to TGSI
4030 * generic outputs
4031 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
4032 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
4033 * each output
4034 *
4035 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
4036 */
4037 extern "C" enum pipe_error
4038 st_translate_program(
4039 struct gl_context *ctx,
4040 uint procType,
4041 struct ureg_program *ureg,
4042 glsl_to_tgsi_visitor *program,
4043 const struct gl_program *proginfo,
4044 GLuint numInputs,
4045 const GLuint inputMapping[],
4046 const ubyte inputSemanticName[],
4047 const ubyte inputSemanticIndex[],
4048 const GLuint interpMode[],
4049 GLuint numOutputs,
4050 const GLuint outputMapping[],
4051 const ubyte outputSemanticName[],
4052 const ubyte outputSemanticIndex[],
4053 boolean passthrough_edgeflags )
4054 {
4055 struct st_translate translate, *t;
4056 unsigned i;
4057 enum pipe_error ret = PIPE_OK;
4058
4059 assert(numInputs <= Elements(t->inputs));
4060 assert(numOutputs <= Elements(t->outputs));
4061
4062 t = &translate;
4063 memset(t, 0, sizeof *t);
4064
4065 t->procType = procType;
4066 t->inputMapping = inputMapping;
4067 t->outputMapping = outputMapping;
4068 t->ureg = ureg;
4069 t->pointSizeOutIndex = -1;
4070 t->prevInstWrotePointSize = GL_FALSE;
4071
4072 /*
4073 * Declare input attributes.
4074 */
4075 if (procType == TGSI_PROCESSOR_FRAGMENT) {
4076 for (i = 0; i < numInputs; i++) {
4077 t->inputs[i] = ureg_DECL_fs_input(ureg,
4078 inputSemanticName[i],
4079 inputSemanticIndex[i],
4080 interpMode[i]);
4081 }
4082
4083 if (proginfo->InputsRead & FRAG_BIT_WPOS) {
4084 /* Must do this after setting up t->inputs, and before
4085 * emitting constant references, below:
4086 */
4087 emit_wpos(st_context(ctx), t, proginfo, ureg);
4088 }
4089
4090 if (proginfo->InputsRead & FRAG_BIT_FACE)
4091 emit_face_var(t);
4092
4093 /*
4094 * Declare output attributes.
4095 */
4096 for (i = 0; i < numOutputs; i++) {
4097 switch (outputSemanticName[i]) {
4098 case TGSI_SEMANTIC_POSITION:
4099 t->outputs[i] = ureg_DECL_output( ureg,
4100 TGSI_SEMANTIC_POSITION, /* Z / Depth */
4101 outputSemanticIndex[i] );
4102
4103 t->outputs[i] = ureg_writemask( t->outputs[i],
4104 TGSI_WRITEMASK_Z );
4105 break;
4106 case TGSI_SEMANTIC_STENCIL:
4107 t->outputs[i] = ureg_DECL_output( ureg,
4108 TGSI_SEMANTIC_STENCIL, /* Stencil */
4109 outputSemanticIndex[i] );
4110 t->outputs[i] = ureg_writemask( t->outputs[i],
4111 TGSI_WRITEMASK_Y );
4112 break;
4113 case TGSI_SEMANTIC_COLOR:
4114 t->outputs[i] = ureg_DECL_output( ureg,
4115 TGSI_SEMANTIC_COLOR,
4116 outputSemanticIndex[i] );
4117 break;
4118 default:
4119 debug_assert(0);
4120 return PIPE_ERROR_BAD_INPUT;
4121 }
4122 }
4123 }
4124 else if (procType == TGSI_PROCESSOR_GEOMETRY) {
4125 for (i = 0; i < numInputs; i++) {
4126 t->inputs[i] = ureg_DECL_gs_input(ureg,
4127 i,
4128 inputSemanticName[i],
4129 inputSemanticIndex[i]);
4130 }
4131
4132 for (i = 0; i < numOutputs; i++) {
4133 t->outputs[i] = ureg_DECL_output( ureg,
4134 outputSemanticName[i],
4135 outputSemanticIndex[i] );
4136 }
4137 }
4138 else {
4139 assert(procType == TGSI_PROCESSOR_VERTEX);
4140
4141 for (i = 0; i < numInputs; i++) {
4142 t->inputs[i] = ureg_DECL_vs_input(ureg, i);
4143 }
4144
4145 for (i = 0; i < numOutputs; i++) {
4146 t->outputs[i] = ureg_DECL_output( ureg,
4147 outputSemanticName[i],
4148 outputSemanticIndex[i] );
4149 if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) {
4150 /* Writing to the point size result register requires special
4151 * handling to implement clamping.
4152 */
4153 static const gl_state_index pointSizeClampState[STATE_LENGTH]
4154 = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
4155 /* XXX: note we are modifying the incoming shader here! Need to
4156 * do this before emitting the constant decls below, or this
4157 * will be missed.
4158 */
4159 unsigned pointSizeClampConst =
4160 _mesa_add_state_reference(proginfo->Parameters,
4161 pointSizeClampState);
4162 struct ureg_dst psizregtemp = ureg_DECL_temporary( ureg );
4163 t->pointSizeConst = ureg_DECL_constant( ureg, pointSizeClampConst );
4164 t->pointSizeResult = t->outputs[i];
4165 t->pointSizeOutIndex = i;
4166 t->outputs[i] = psizregtemp;
4167 }
4168 }
4169 if (passthrough_edgeflags)
4170 emit_edgeflags(t);
4171 }
4172
4173 /* Declare address register.
4174 */
4175 if (program->num_address_regs > 0) {
4176 debug_assert( program->num_address_regs == 1 );
4177 t->address[0] = ureg_DECL_address( ureg );
4178 }
4179
4180 /* Declare misc input registers
4181 */
4182 {
4183 GLbitfield sysInputs = proginfo->SystemValuesRead;
4184 unsigned numSys = 0;
4185 for (i = 0; sysInputs; i++) {
4186 if (sysInputs & (1 << i)) {
4187 unsigned semName = mesa_sysval_to_semantic[i];
4188 t->systemValues[i] = ureg_DECL_system_value(ureg, numSys, semName, 0);
4189 numSys++;
4190 sysInputs &= ~(1 << i);
4191 }
4192 }
4193 }
4194
4195 if (program->indirect_addr_temps) {
4196 /* If temps are accessed with indirect addressing, declare temporaries
4197 * in sequential order. Else, we declare them on demand elsewhere.
4198 * (Note: the number of temporaries is equal to program->next_temp)
4199 */
4200 for (i = 0; i < (unsigned)program->next_temp; i++) {
4201 /* XXX use TGSI_FILE_TEMPORARY_ARRAY when it's supported by ureg */
4202 t->temps[i] = ureg_DECL_temporary( t->ureg );
4203 }
4204 }
4205
4206 /* Emit constants and immediates. Mesa uses a single index space
4207 * for these, so we put all the translated regs in t->constants.
4208 * XXX: this entire if block depends on proginfo->Parameters from Mesa IR
4209 */
4210 if (proginfo->Parameters) {
4211 t->constants = (struct ureg_src *)CALLOC( proginfo->Parameters->NumParameters * sizeof t->constants[0] );
4212 if (t->constants == NULL) {
4213 ret = PIPE_ERROR_OUT_OF_MEMORY;
4214 goto out;
4215 }
4216
4217 for (i = 0; i < proginfo->Parameters->NumParameters; i++) {
4218 switch (proginfo->Parameters->Parameters[i].Type) {
4219 case PROGRAM_ENV_PARAM:
4220 case PROGRAM_LOCAL_PARAM:
4221 case PROGRAM_STATE_VAR:
4222 case PROGRAM_NAMED_PARAM:
4223 case PROGRAM_UNIFORM:
4224 t->constants[i] = ureg_DECL_constant( ureg, i );
4225 break;
4226
4227 /* Emit immediates only when there's no indirect addressing of
4228 * the const buffer.
4229 * FIXME: Be smarter and recognize param arrays:
4230 * indirect addressing is only valid within the referenced
4231 * array.
4232 */
4233 case PROGRAM_CONSTANT:
4234 if (program->indirect_addr_consts)
4235 t->constants[i] = ureg_DECL_constant( ureg, i );
4236 else
4237 switch(proginfo->Parameters->Parameters[i].DataType)
4238 {
4239 case GL_FLOAT:
4240 case GL_FLOAT_VEC2:
4241 case GL_FLOAT_VEC3:
4242 case GL_FLOAT_VEC4:
4243 t->constants[i] = ureg_DECL_immediate(ureg, (float *)proginfo->Parameters->ParameterValues[i], 4);
4244 break;
4245 case GL_INT:
4246 case GL_INT_VEC2:
4247 case GL_INT_VEC3:
4248 case GL_INT_VEC4:
4249 t->constants[i] = ureg_DECL_immediate_int(ureg, (int *)proginfo->Parameters->ParameterValues[i], 4);
4250 break;
4251 case GL_UNSIGNED_INT:
4252 case GL_UNSIGNED_INT_VEC2:
4253 case GL_UNSIGNED_INT_VEC3:
4254 case GL_UNSIGNED_INT_VEC4:
4255 case GL_BOOL:
4256 case GL_BOOL_VEC2:
4257 case GL_BOOL_VEC3:
4258 case GL_BOOL_VEC4:
4259 t->constants[i] = ureg_DECL_immediate_uint(ureg, (unsigned *)proginfo->Parameters->ParameterValues[i], 4);
4260 break;
4261 default:
4262 assert(!"should not get here");
4263 }
4264 break;
4265 default:
4266 break;
4267 }
4268 }
4269 }
4270
4271 /* texture samplers */
4272 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
4273 if (program->samplers_used & (1 << i)) {
4274 t->samplers[i] = ureg_DECL_sampler( ureg, i );
4275 }
4276 }
4277
4278 /* Emit each instruction in turn:
4279 */
4280 foreach_iter(exec_list_iterator, iter, program->instructions) {
4281 set_insn_start( t, ureg_get_instruction_number( ureg ));
4282 compile_tgsi_instruction( t, (glsl_to_tgsi_instruction *)iter.get() );
4283
4284 if (t->prevInstWrotePointSize && proginfo->Id) {
4285 /* The previous instruction wrote to the (fake) vertex point size
4286 * result register. Now we need to clamp that value to the min/max
4287 * point size range, putting the result into the real point size
4288 * register.
4289 * Note that we can't do this easily at the end of program due to
4290 * possible early return.
4291 */
4292 set_insn_start( t, ureg_get_instruction_number( ureg ));
4293 ureg_MAX( t->ureg,
4294 ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X),
4295 ureg_src(t->outputs[t->pointSizeOutIndex]),
4296 ureg_swizzle(t->pointSizeConst, 1,1,1,1));
4297 ureg_MIN( t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X),
4298 ureg_src(t->outputs[t->pointSizeOutIndex]),
4299 ureg_swizzle(t->pointSizeConst, 2,2,2,2));
4300 }
4301 t->prevInstWrotePointSize = GL_FALSE;
4302 }
4303
4304 /* Fix up all emitted labels:
4305 */
4306 for (i = 0; i < t->labels_count; i++) {
4307 ureg_fixup_label( ureg,
4308 t->labels[i].token,
4309 t->insn[t->labels[i].branch_target] );
4310 }
4311
4312 out:
4313 FREE(t->insn);
4314 FREE(t->labels);
4315 FREE(t->constants);
4316
4317 if (t->error) {
4318 debug_printf("%s: translate error flag set\n", __FUNCTION__);
4319 }
4320
4321 return ret;
4322 }
4323 /* ----------------------------- End TGSI code ------------------------------ */
4324
4325 /**
4326 * Convert a shader's GLSL IR into a Mesa gl_program, although without
4327 * generating Mesa IR.
4328 */
4329 static struct gl_program *
4330 get_mesa_program(struct gl_context *ctx,
4331 struct gl_shader_program *shader_program,
4332 struct gl_shader *shader)
4333 {
4334 glsl_to_tgsi_visitor* v = new glsl_to_tgsi_visitor();
4335 struct gl_program *prog;
4336 GLenum target;
4337 const char *target_string;
4338 GLboolean progress;
4339 struct gl_shader_compiler_options *options =
4340 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)];
4341
4342 switch (shader->Type) {
4343 case GL_VERTEX_SHADER:
4344 target = GL_VERTEX_PROGRAM_ARB;
4345 target_string = "vertex";
4346 break;
4347 case GL_FRAGMENT_SHADER:
4348 target = GL_FRAGMENT_PROGRAM_ARB;
4349 target_string = "fragment";
4350 break;
4351 case GL_GEOMETRY_SHADER:
4352 target = GL_GEOMETRY_PROGRAM_NV;
4353 target_string = "geometry";
4354 break;
4355 default:
4356 assert(!"should not be reached");
4357 return NULL;
4358 }
4359
4360 validate_ir_tree(shader->ir);
4361
4362 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name);
4363 if (!prog)
4364 return NULL;
4365 prog->Parameters = _mesa_new_parameter_list();
4366 prog->Varying = _mesa_new_parameter_list();
4367 prog->Attributes = _mesa_new_parameter_list();
4368 v->ctx = ctx;
4369 v->prog = prog;
4370 v->shader_program = shader_program;
4371 v->options = options;
4372 v->glsl_version = ctx->Const.GLSLVersion;
4373
4374 add_uniforms_to_parameters_list(shader_program, shader, prog);
4375
4376 /* Emit intermediate IR for main(). */
4377 visit_exec_list(shader->ir, v);
4378
4379 /* Now emit bodies for any functions that were used. */
4380 do {
4381 progress = GL_FALSE;
4382
4383 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
4384 function_entry *entry = (function_entry *)iter.get();
4385
4386 if (!entry->bgn_inst) {
4387 v->current_function = entry;
4388
4389 entry->bgn_inst = v->emit(NULL, TGSI_OPCODE_BGNSUB);
4390 entry->bgn_inst->function = entry;
4391
4392 visit_exec_list(&entry->sig->body, v);
4393
4394 glsl_to_tgsi_instruction *last;
4395 last = (glsl_to_tgsi_instruction *)v->instructions.get_tail();
4396 if (last->op != TGSI_OPCODE_RET)
4397 v->emit(NULL, TGSI_OPCODE_RET);
4398
4399 glsl_to_tgsi_instruction *end;
4400 end = v->emit(NULL, TGSI_OPCODE_ENDSUB);
4401 end->function = entry;
4402
4403 progress = GL_TRUE;
4404 }
4405 }
4406 } while (progress);
4407
4408 #if 0
4409 /* Print out some information (for debugging purposes) used by the
4410 * optimization passes. */
4411 for (i=0; i < v->next_temp; i++) {
4412 int fr = v->get_first_temp_read(i);
4413 int fw = v->get_first_temp_write(i);
4414 int lr = v->get_last_temp_read(i);
4415 int lw = v->get_last_temp_write(i);
4416
4417 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, fr, fw, lr, lw);
4418 assert(fw <= fr);
4419 }
4420 #endif
4421
4422 /* Remove reads to output registers, and to varyings in vertex shaders. */
4423 v->remove_output_reads(PROGRAM_OUTPUT);
4424 if (target == GL_VERTEX_PROGRAM_ARB)
4425 v->remove_output_reads(PROGRAM_VARYING);
4426
4427 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
4428 v->simplify_cmp();
4429 v->copy_propagate();
4430 while (v->eliminate_dead_code_advanced());
4431
4432 /* FIXME: These passes to optimize temporary registers don't work when there
4433 * is indirect addressing of the temporary register space. We need proper
4434 * array support so that we don't have to give up these passes in every
4435 * shader that uses arrays.
4436 */
4437 if (!v->indirect_addr_temps) {
4438 v->eliminate_dead_code();
4439 v->merge_registers();
4440 v->renumber_registers();
4441 }
4442
4443 /* Write the END instruction. */
4444 v->emit(NULL, TGSI_OPCODE_END);
4445
4446 if (ctx->Shader.Flags & GLSL_DUMP) {
4447 printf("\n");
4448 printf("GLSL IR for linked %s program %d:\n", target_string,
4449 shader_program->Name);
4450 _mesa_print_ir(shader->ir, NULL);
4451 printf("\n");
4452 printf("\n");
4453 }
4454
4455 prog->Instructions = NULL;
4456 prog->NumInstructions = 0;
4457
4458 do_set_program_inouts(shader->ir, prog);
4459 count_resources(v, prog);
4460
4461 check_resources(ctx, shader_program, v, prog);
4462
4463 _mesa_reference_program(ctx, &shader->Program, prog);
4464
4465 struct st_vertex_program *stvp;
4466 struct st_fragment_program *stfp;
4467 struct st_geometry_program *stgp;
4468
4469 switch (shader->Type) {
4470 case GL_VERTEX_SHADER:
4471 stvp = (struct st_vertex_program *)prog;
4472 stvp->glsl_to_tgsi = v;
4473 break;
4474 case GL_FRAGMENT_SHADER:
4475 stfp = (struct st_fragment_program *)prog;
4476 stfp->glsl_to_tgsi = v;
4477 break;
4478 case GL_GEOMETRY_SHADER:
4479 stgp = (struct st_geometry_program *)prog;
4480 stgp->glsl_to_tgsi = v;
4481 break;
4482 default:
4483 assert(!"should not be reached");
4484 return NULL;
4485 }
4486
4487 return prog;
4488 }
4489
4490 extern "C" {
4491
4492 struct gl_shader *
4493 st_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
4494 {
4495 struct gl_shader *shader;
4496 assert(type == GL_FRAGMENT_SHADER || type == GL_VERTEX_SHADER ||
4497 type == GL_GEOMETRY_SHADER_ARB);
4498 shader = rzalloc(NULL, struct gl_shader);
4499 if (shader) {
4500 shader->Type = type;
4501 shader->Name = name;
4502 _mesa_init_shader(ctx, shader);
4503 }
4504 return shader;
4505 }
4506
4507 struct gl_shader_program *
4508 st_new_shader_program(struct gl_context *ctx, GLuint name)
4509 {
4510 struct gl_shader_program *shProg;
4511 shProg = rzalloc(NULL, struct gl_shader_program);
4512 if (shProg) {
4513 shProg->Name = name;
4514 _mesa_init_shader_program(ctx, shProg);
4515 }
4516 return shProg;
4517 }
4518
4519 /**
4520 * Link a shader.
4521 * Called via ctx->Driver.LinkShader()
4522 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
4523 * with code lowering and other optimizations.
4524 */
4525 GLboolean
4526 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4527 {
4528 assert(prog->LinkStatus);
4529
4530 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4531 if (prog->_LinkedShaders[i] == NULL)
4532 continue;
4533
4534 bool progress;
4535 exec_list *ir = prog->_LinkedShaders[i]->ir;
4536 const struct gl_shader_compiler_options *options =
4537 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)];
4538
4539 do {
4540 progress = false;
4541
4542 /* Lowering */
4543 do_mat_op_to_vec(ir);
4544 lower_instructions(ir, (MOD_TO_FRACT | DIV_TO_MUL_RCP | EXP_TO_EXP2
4545 | LOG_TO_LOG2
4546 | ((options->EmitNoPow) ? POW_TO_EXP2 : 0)));
4547
4548 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress;
4549
4550 progress = do_common_optimization(ir, true, options->MaxUnrollIterations) || progress;
4551
4552 progress = lower_quadop_vector(ir, true) || progress;
4553
4554 if (options->EmitNoIfs) {
4555 progress = lower_discard(ir) || progress;
4556 progress = lower_if_to_cond_assign(ir) || progress;
4557 }
4558
4559 if (options->EmitNoNoise)
4560 progress = lower_noise(ir) || progress;
4561
4562 /* If there are forms of indirect addressing that the driver
4563 * cannot handle, perform the lowering pass.
4564 */
4565 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput
4566 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform)
4567 progress =
4568 lower_variable_index_to_cond_assign(ir,
4569 options->EmitNoIndirectInput,
4570 options->EmitNoIndirectOutput,
4571 options->EmitNoIndirectTemp,
4572 options->EmitNoIndirectUniform)
4573 || progress;
4574
4575 progress = do_vec_index_to_cond_assign(ir) || progress;
4576 } while (progress);
4577
4578 validate_ir_tree(ir);
4579 }
4580
4581 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4582 struct gl_program *linked_prog;
4583
4584 if (prog->_LinkedShaders[i] == NULL)
4585 continue;
4586
4587 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i]);
4588
4589 if (linked_prog) {
4590 bool ok = true;
4591
4592 switch (prog->_LinkedShaders[i]->Type) {
4593 case GL_VERTEX_SHADER:
4594 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
4595 (struct gl_vertex_program *)linked_prog);
4596 ok = ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
4597 linked_prog);
4598 break;
4599 case GL_FRAGMENT_SHADER:
4600 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
4601 (struct gl_fragment_program *)linked_prog);
4602 ok = ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
4603 linked_prog);
4604 break;
4605 case GL_GEOMETRY_SHADER:
4606 _mesa_reference_geomprog(ctx, &prog->GeometryProgram,
4607 (struct gl_geometry_program *)linked_prog);
4608 ok = ctx->Driver.ProgramStringNotify(ctx, GL_GEOMETRY_PROGRAM_NV,
4609 linked_prog);
4610 break;
4611 }
4612 if (!ok) {
4613 return GL_FALSE;
4614 }
4615 }
4616
4617 _mesa_reference_program(ctx, &linked_prog, NULL);
4618 }
4619
4620 return GL_TRUE;
4621 }
4622
4623
4624 /**
4625 * Link a GLSL shader program. Called via glLinkProgram().
4626 */
4627 void
4628 st_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4629 {
4630 unsigned int i;
4631
4632 _mesa_clear_shader_program_data(ctx, prog);
4633
4634 prog->LinkStatus = GL_TRUE;
4635
4636 for (i = 0; i < prog->NumShaders; i++) {
4637 if (!prog->Shaders[i]->CompileStatus) {
4638 fail_link(prog, "linking with uncompiled shader");
4639 prog->LinkStatus = GL_FALSE;
4640 }
4641 }
4642
4643 prog->Varying = _mesa_new_parameter_list();
4644 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
4645 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
4646 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, NULL);
4647
4648 if (prog->LinkStatus) {
4649 link_shaders(ctx, prog);
4650 }
4651
4652 if (prog->LinkStatus) {
4653 if (!ctx->Driver.LinkShader(ctx, prog)) {
4654 prog->LinkStatus = GL_FALSE;
4655 }
4656 }
4657
4658 set_uniform_initializers(ctx, prog);
4659
4660 if (ctx->Shader.Flags & GLSL_DUMP) {
4661 if (!prog->LinkStatus) {
4662 printf("GLSL shader program %d failed to link\n", prog->Name);
4663 }
4664
4665 if (prog->InfoLog && prog->InfoLog[0] != 0) {
4666 printf("GLSL shader program %d info log:\n", prog->Name);
4667 printf("%s\n", prog->InfoLog);
4668 }
4669 }
4670 }
4671
4672 } /* extern "C" */