mesa, glsl_to_tgsi: Add new gl_context::NativeIntegers flag.
[mesa.git] / src / mesa / state_tracker / st_glsl_to_tgsi.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file glsl_to_tgsi.cpp
29 *
30 * Translate GLSL IR to TGSI.
31 */
32
33 #include <stdio.h>
34 #include "main/compiler.h"
35 #include "ir.h"
36 #include "ir_visitor.h"
37 #include "ir_print_visitor.h"
38 #include "ir_expression_flattening.h"
39 #include "glsl_types.h"
40 #include "glsl_parser_extras.h"
41 #include "../glsl/program.h"
42 #include "ir_optimization.h"
43 #include "ast.h"
44
45 extern "C" {
46 #include "main/mtypes.h"
47 #include "main/shaderapi.h"
48 #include "main/shaderobj.h"
49 #include "main/uniforms.h"
50 #include "program/hash_table.h"
51 #include "program/prog_instruction.h"
52 #include "program/prog_optimize.h"
53 #include "program/prog_print.h"
54 #include "program/program.h"
55 #include "program/prog_uniform.h"
56 #include "program/prog_parameter.h"
57 #include "program/sampler.h"
58
59 #include "pipe/p_compiler.h"
60 #include "pipe/p_context.h"
61 #include "pipe/p_screen.h"
62 #include "pipe/p_shader_tokens.h"
63 #include "pipe/p_state.h"
64 #include "util/u_math.h"
65 #include "tgsi/tgsi_ureg.h"
66 #include "tgsi/tgsi_info.h"
67 #include "st_context.h"
68 #include "st_program.h"
69 #include "st_glsl_to_tgsi.h"
70 #include "st_mesa_to_tgsi.h"
71 }
72
73 #define PROGRAM_IMMEDIATE PROGRAM_FILE_MAX
74 #define PROGRAM_ANY_CONST ((1 << PROGRAM_LOCAL_PARAM) | \
75 (1 << PROGRAM_ENV_PARAM) | \
76 (1 << PROGRAM_STATE_VAR) | \
77 (1 << PROGRAM_NAMED_PARAM) | \
78 (1 << PROGRAM_CONSTANT) | \
79 (1 << PROGRAM_UNIFORM))
80
81 #define MAX_TEMPS 4096
82
83 class st_src_reg;
84 class st_dst_reg;
85
86 static int swizzle_for_size(int size);
87
88 /**
89 * This struct is a corresponding struct to TGSI ureg_src.
90 */
91 class st_src_reg {
92 public:
93 st_src_reg(gl_register_file file, int index, const glsl_type *type)
94 {
95 this->file = file;
96 this->index = index;
97 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
98 this->swizzle = swizzle_for_size(type->vector_elements);
99 else
100 this->swizzle = SWIZZLE_XYZW;
101 this->negate = 0;
102 this->type = type ? type->base_type : GLSL_TYPE_ERROR;
103 this->reladdr = NULL;
104 }
105
106 st_src_reg(gl_register_file file, int index, int type)
107 {
108 this->type = type;
109 this->file = file;
110 this->index = index;
111 this->swizzle = SWIZZLE_XYZW;
112 this->negate = 0;
113 this->reladdr = NULL;
114 }
115
116 st_src_reg()
117 {
118 this->type = GLSL_TYPE_ERROR;
119 this->file = PROGRAM_UNDEFINED;
120 this->index = 0;
121 this->swizzle = 0;
122 this->negate = 0;
123 this->reladdr = NULL;
124 }
125
126 explicit st_src_reg(st_dst_reg reg);
127
128 gl_register_file file; /**< PROGRAM_* from Mesa */
129 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
130 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
131 int negate; /**< NEGATE_XYZW mask from mesa */
132 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
133 /** Register index should be offset by the integer in this reg. */
134 st_src_reg *reladdr;
135 };
136
137 class st_dst_reg {
138 public:
139 st_dst_reg(gl_register_file file, int writemask, int type)
140 {
141 this->file = file;
142 this->index = 0;
143 this->writemask = writemask;
144 this->cond_mask = COND_TR;
145 this->reladdr = NULL;
146 this->type = type;
147 }
148
149 st_dst_reg()
150 {
151 this->type = GLSL_TYPE_ERROR;
152 this->file = PROGRAM_UNDEFINED;
153 this->index = 0;
154 this->writemask = 0;
155 this->cond_mask = COND_TR;
156 this->reladdr = NULL;
157 }
158
159 explicit st_dst_reg(st_src_reg reg);
160
161 gl_register_file file; /**< PROGRAM_* from Mesa */
162 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
163 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
164 GLuint cond_mask:4;
165 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */
166 /** Register index should be offset by the integer in this reg. */
167 st_src_reg *reladdr;
168 };
169
170 st_src_reg::st_src_reg(st_dst_reg reg)
171 {
172 this->type = reg.type;
173 this->file = reg.file;
174 this->index = reg.index;
175 this->swizzle = SWIZZLE_XYZW;
176 this->negate = 0;
177 this->reladdr = reg.reladdr;
178 }
179
180 st_dst_reg::st_dst_reg(st_src_reg reg)
181 {
182 this->type = reg.type;
183 this->file = reg.file;
184 this->index = reg.index;
185 this->writemask = WRITEMASK_XYZW;
186 this->cond_mask = COND_TR;
187 this->reladdr = reg.reladdr;
188 }
189
190 class glsl_to_tgsi_instruction : public exec_node {
191 public:
192 /* Callers of this ralloc-based new need not call delete. It's
193 * easier to just ralloc_free 'ctx' (or any of its ancestors). */
194 static void* operator new(size_t size, void *ctx)
195 {
196 void *node;
197
198 node = rzalloc_size(ctx, size);
199 assert(node != NULL);
200
201 return node;
202 }
203
204 unsigned op;
205 st_dst_reg dst;
206 st_src_reg src[3];
207 /** Pointer to the ir source this tree came from for debugging */
208 ir_instruction *ir;
209 GLboolean cond_update;
210 bool saturate;
211 int sampler; /**< sampler index */
212 int tex_target; /**< One of TEXTURE_*_INDEX */
213 GLboolean tex_shadow;
214 int dead_mask; /**< Used in dead code elimination */
215
216 class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */
217 };
218
219 class variable_storage : public exec_node {
220 public:
221 variable_storage(ir_variable *var, gl_register_file file, int index)
222 : file(file), index(index), var(var)
223 {
224 /* empty */
225 }
226
227 gl_register_file file;
228 int index;
229 ir_variable *var; /* variable that maps to this, if any */
230 };
231
232 class immediate_storage : public exec_node {
233 public:
234 immediate_storage(gl_constant_value *values, int size, int type)
235 {
236 memcpy(this->values, values, size * sizeof(gl_constant_value));
237 this->size = size;
238 this->type = type;
239 }
240
241 gl_constant_value values[4];
242 int size; /**< Number of components (1-4) */
243 int type; /**< GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */
244 };
245
246 class function_entry : public exec_node {
247 public:
248 ir_function_signature *sig;
249
250 /**
251 * identifier of this function signature used by the program.
252 *
253 * At the point that TGSI instructions for function calls are
254 * generated, we don't know the address of the first instruction of
255 * the function body. So we make the BranchTarget that is called a
256 * small integer and rewrite them during set_branchtargets().
257 */
258 int sig_id;
259
260 /**
261 * Pointer to first instruction of the function body.
262 *
263 * Set during function body emits after main() is processed.
264 */
265 glsl_to_tgsi_instruction *bgn_inst;
266
267 /**
268 * Index of the first instruction of the function body in actual TGSI.
269 *
270 * Set after conversion from glsl_to_tgsi_instruction to TGSI.
271 */
272 int inst;
273
274 /** Storage for the return value. */
275 st_src_reg return_reg;
276 };
277
278 class glsl_to_tgsi_visitor : public ir_visitor {
279 public:
280 glsl_to_tgsi_visitor();
281 ~glsl_to_tgsi_visitor();
282
283 function_entry *current_function;
284
285 struct gl_context *ctx;
286 struct gl_program *prog;
287 struct gl_shader_program *shader_program;
288 struct gl_shader_compiler_options *options;
289
290 int next_temp;
291
292 int num_address_regs;
293 int samplers_used;
294 bool indirect_addr_temps;
295 bool indirect_addr_consts;
296
297 int glsl_version;
298 bool native_integers;
299
300 variable_storage *find_variable_storage(ir_variable *var);
301
302 int add_constant(gl_register_file file, gl_constant_value values[4],
303 int size, int datatype, GLuint *swizzle_out);
304
305 function_entry *get_function_signature(ir_function_signature *sig);
306
307 st_src_reg get_temp(const glsl_type *type);
308 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr);
309
310 st_src_reg st_src_reg_for_float(float val);
311 st_src_reg st_src_reg_for_int(int val);
312 st_src_reg st_src_reg_for_type(int type, int val);
313
314 /**
315 * \name Visit methods
316 *
317 * As typical for the visitor pattern, there must be one \c visit method for
318 * each concrete subclass of \c ir_instruction. Virtual base classes within
319 * the hierarchy should not have \c visit methods.
320 */
321 /*@{*/
322 virtual void visit(ir_variable *);
323 virtual void visit(ir_loop *);
324 virtual void visit(ir_loop_jump *);
325 virtual void visit(ir_function_signature *);
326 virtual void visit(ir_function *);
327 virtual void visit(ir_expression *);
328 virtual void visit(ir_swizzle *);
329 virtual void visit(ir_dereference_variable *);
330 virtual void visit(ir_dereference_array *);
331 virtual void visit(ir_dereference_record *);
332 virtual void visit(ir_assignment *);
333 virtual void visit(ir_constant *);
334 virtual void visit(ir_call *);
335 virtual void visit(ir_return *);
336 virtual void visit(ir_discard *);
337 virtual void visit(ir_texture *);
338 virtual void visit(ir_if *);
339 /*@}*/
340
341 st_src_reg result;
342
343 /** List of variable_storage */
344 exec_list variables;
345
346 /** List of immediate_storage */
347 exec_list immediates;
348 int num_immediates;
349
350 /** List of function_entry */
351 exec_list function_signatures;
352 int next_signature_id;
353
354 /** List of glsl_to_tgsi_instruction */
355 exec_list instructions;
356
357 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op);
358
359 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
360 st_dst_reg dst, st_src_reg src0);
361
362 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
363 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
364
365 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op,
366 st_dst_reg dst,
367 st_src_reg src0, st_src_reg src1, st_src_reg src2);
368
369 unsigned get_opcode(ir_instruction *ir, unsigned op,
370 st_dst_reg dst,
371 st_src_reg src0, st_src_reg src1);
372
373 /**
374 * Emit the correct dot-product instruction for the type of arguments
375 */
376 void emit_dp(ir_instruction *ir,
377 st_dst_reg dst,
378 st_src_reg src0,
379 st_src_reg src1,
380 unsigned elements);
381
382 void emit_scalar(ir_instruction *ir, unsigned op,
383 st_dst_reg dst, st_src_reg src0);
384
385 void emit_scalar(ir_instruction *ir, unsigned op,
386 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
387
388 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0);
389
390 void emit_scs(ir_instruction *ir, unsigned op,
391 st_dst_reg dst, const st_src_reg &src);
392
393 GLboolean try_emit_mad(ir_expression *ir,
394 int mul_operand);
395 GLboolean try_emit_sat(ir_expression *ir);
396
397 void emit_swz(ir_expression *ir);
398
399 bool process_move_condition(ir_rvalue *ir);
400
401 void remove_output_reads(gl_register_file type);
402 void simplify_cmp(void);
403
404 void rename_temp_register(int index, int new_index);
405 int get_first_temp_read(int index);
406 int get_first_temp_write(int index);
407 int get_last_temp_read(int index);
408 int get_last_temp_write(int index);
409
410 void copy_propagate(void);
411 void eliminate_dead_code(void);
412 int eliminate_dead_code_advanced(void);
413 void merge_registers(void);
414 void renumber_registers(void);
415
416 void *mem_ctx;
417 };
418
419 static st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR);
420
421 static st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR);
422
423 static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X, GLSL_TYPE_FLOAT);
424
425 static void
426 fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3);
427
428 static void
429 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
430 {
431 va_list args;
432 va_start(args, fmt);
433 ralloc_vasprintf_append(&prog->InfoLog, fmt, args);
434 va_end(args);
435
436 prog->LinkStatus = GL_FALSE;
437 }
438
439 static int
440 swizzle_for_size(int size)
441 {
442 int size_swizzles[4] = {
443 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
444 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
445 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
446 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
447 };
448
449 assert((size >= 1) && (size <= 4));
450 return size_swizzles[size - 1];
451 }
452
453 static bool
454 is_tex_instruction(unsigned opcode)
455 {
456 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
457 return info->is_tex;
458 }
459
460 static unsigned
461 num_inst_dst_regs(unsigned opcode)
462 {
463 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
464 return info->num_dst;
465 }
466
467 static unsigned
468 num_inst_src_regs(unsigned opcode)
469 {
470 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode);
471 return info->is_tex ? info->num_src - 1 : info->num_src;
472 }
473
474 glsl_to_tgsi_instruction *
475 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
476 st_dst_reg dst,
477 st_src_reg src0, st_src_reg src1, st_src_reg src2)
478 {
479 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction();
480 int num_reladdr = 0, i;
481
482 op = get_opcode(ir, op, dst, src0, src1);
483
484 /* If we have to do relative addressing, we want to load the ARL
485 * reg directly for one of the regs, and preload the other reladdr
486 * sources into temps.
487 */
488 num_reladdr += dst.reladdr != NULL;
489 num_reladdr += src0.reladdr != NULL;
490 num_reladdr += src1.reladdr != NULL;
491 num_reladdr += src2.reladdr != NULL;
492
493 reladdr_to_temp(ir, &src2, &num_reladdr);
494 reladdr_to_temp(ir, &src1, &num_reladdr);
495 reladdr_to_temp(ir, &src0, &num_reladdr);
496
497 if (dst.reladdr) {
498 emit_arl(ir, address_reg, *dst.reladdr);
499 num_reladdr--;
500 }
501 assert(num_reladdr == 0);
502
503 inst->op = op;
504 inst->dst = dst;
505 inst->src[0] = src0;
506 inst->src[1] = src1;
507 inst->src[2] = src2;
508 inst->ir = ir;
509 inst->dead_mask = 0;
510
511 inst->function = NULL;
512
513 if (op == TGSI_OPCODE_ARL)
514 this->num_address_regs = 1;
515
516 /* Update indirect addressing status used by TGSI */
517 if (dst.reladdr) {
518 switch(dst.file) {
519 case PROGRAM_TEMPORARY:
520 this->indirect_addr_temps = true;
521 break;
522 case PROGRAM_LOCAL_PARAM:
523 case PROGRAM_ENV_PARAM:
524 case PROGRAM_STATE_VAR:
525 case PROGRAM_NAMED_PARAM:
526 case PROGRAM_CONSTANT:
527 case PROGRAM_UNIFORM:
528 this->indirect_addr_consts = true;
529 break;
530 case PROGRAM_IMMEDIATE:
531 assert(!"immediates should not have indirect addressing");
532 break;
533 default:
534 break;
535 }
536 }
537 else {
538 for (i=0; i<3; i++) {
539 if(inst->src[i].reladdr) {
540 switch(inst->src[i].file) {
541 case PROGRAM_TEMPORARY:
542 this->indirect_addr_temps = true;
543 break;
544 case PROGRAM_LOCAL_PARAM:
545 case PROGRAM_ENV_PARAM:
546 case PROGRAM_STATE_VAR:
547 case PROGRAM_NAMED_PARAM:
548 case PROGRAM_CONSTANT:
549 case PROGRAM_UNIFORM:
550 this->indirect_addr_consts = true;
551 break;
552 case PROGRAM_IMMEDIATE:
553 assert(!"immediates should not have indirect addressing");
554 break;
555 default:
556 break;
557 }
558 }
559 }
560 }
561
562 this->instructions.push_tail(inst);
563
564 return inst;
565 }
566
567
568 glsl_to_tgsi_instruction *
569 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
570 st_dst_reg dst, st_src_reg src0, st_src_reg src1)
571 {
572 return emit(ir, op, dst, src0, src1, undef_src);
573 }
574
575 glsl_to_tgsi_instruction *
576 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op,
577 st_dst_reg dst, st_src_reg src0)
578 {
579 assert(dst.writemask != 0);
580 return emit(ir, op, dst, src0, undef_src, undef_src);
581 }
582
583 glsl_to_tgsi_instruction *
584 glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op)
585 {
586 return emit(ir, op, undef_dst, undef_src, undef_src, undef_src);
587 }
588
589 /**
590 * Determines whether to use an integer, unsigned integer, or float opcode
591 * based on the operands and input opcode, then emits the result.
592 *
593 * TODO: type checking for remaining TGSI opcodes
594 */
595 unsigned
596 glsl_to_tgsi_visitor::get_opcode(ir_instruction *ir, unsigned op,
597 st_dst_reg dst,
598 st_src_reg src0, st_src_reg src1)
599 {
600 int type = GLSL_TYPE_FLOAT;
601
602 if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
603 type = GLSL_TYPE_FLOAT;
604 else if (native_integers)
605 type = src0.type;
606
607 #define case4(c, f, i, u) \
608 case TGSI_OPCODE_##c: \
609 if (type == GLSL_TYPE_INT) op = TGSI_OPCODE_##i; \
610 else if (type == GLSL_TYPE_UINT) op = TGSI_OPCODE_##u; \
611 else op = TGSI_OPCODE_##f; \
612 break;
613 #define case3(f, i, u) case4(f, f, i, u)
614 #define case2fi(f, i) case4(f, f, i, i)
615 #define case2iu(i, u) case4(i, LAST, i, u)
616
617 switch(op) {
618 case2fi(ADD, UADD);
619 case2fi(MUL, UMUL);
620 case2fi(MAD, UMAD);
621 case3(DIV, IDIV, UDIV);
622 case3(MAX, IMAX, UMAX);
623 case3(MIN, IMIN, UMIN);
624 case2iu(MOD, UMOD);
625
626 case2fi(SEQ, USEQ);
627 case2fi(SNE, USNE);
628 case3(SGE, ISGE, USGE);
629 case3(SLT, ISLT, USLT);
630
631 case2iu(SHL, SHL);
632 case2iu(ISHR, USHR);
633 case2iu(NOT, NOT);
634 case2iu(AND, AND);
635 case2iu(OR, OR);
636 case2iu(XOR, XOR);
637
638 default: break;
639 }
640
641 assert(op != TGSI_OPCODE_LAST);
642 return op;
643 }
644
645 void
646 glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir,
647 st_dst_reg dst, st_src_reg src0, st_src_reg src1,
648 unsigned elements)
649 {
650 static const unsigned dot_opcodes[] = {
651 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4
652 };
653
654 emit(ir, dot_opcodes[elements - 2], dst, src0, src1);
655 }
656
657 /**
658 * Emits TGSI scalar opcodes to produce unique answers across channels.
659 *
660 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
661 * channel determines the result across all channels. So to do a vec4
662 * of this operation, we want to emit a scalar per source channel used
663 * to produce dest channels.
664 */
665 void
666 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
667 st_dst_reg dst,
668 st_src_reg orig_src0, st_src_reg orig_src1)
669 {
670 int i, j;
671 int done_mask = ~dst.writemask;
672
673 /* TGSI RCP is a scalar operation splatting results to all channels,
674 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
675 * dst channels.
676 */
677 for (i = 0; i < 4; i++) {
678 GLuint this_mask = (1 << i);
679 glsl_to_tgsi_instruction *inst;
680 st_src_reg src0 = orig_src0;
681 st_src_reg src1 = orig_src1;
682
683 if (done_mask & this_mask)
684 continue;
685
686 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
687 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
688 for (j = i + 1; j < 4; j++) {
689 /* If there is another enabled component in the destination that is
690 * derived from the same inputs, generate its value on this pass as
691 * well.
692 */
693 if (!(done_mask & (1 << j)) &&
694 GET_SWZ(src0.swizzle, j) == src0_swiz &&
695 GET_SWZ(src1.swizzle, j) == src1_swiz) {
696 this_mask |= (1 << j);
697 }
698 }
699 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
700 src0_swiz, src0_swiz);
701 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
702 src1_swiz, src1_swiz);
703
704 inst = emit(ir, op, dst, src0, src1);
705 inst->dst.writemask = this_mask;
706 done_mask |= this_mask;
707 }
708 }
709
710 void
711 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op,
712 st_dst_reg dst, st_src_reg src0)
713 {
714 st_src_reg undef = undef_src;
715
716 undef.swizzle = SWIZZLE_XXXX;
717
718 emit_scalar(ir, op, dst, src0, undef);
719 }
720
721 void
722 glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir,
723 st_dst_reg dst, st_src_reg src0)
724 {
725 st_src_reg tmp = get_temp(glsl_type::float_type);
726
727 if (src0.type == GLSL_TYPE_INT)
728 emit(NULL, TGSI_OPCODE_I2F, st_dst_reg(tmp), src0);
729 else if (src0.type == GLSL_TYPE_UINT)
730 emit(NULL, TGSI_OPCODE_U2F, st_dst_reg(tmp), src0);
731 else
732 tmp = src0;
733
734 emit(NULL, TGSI_OPCODE_ARL, dst, tmp);
735 }
736
737 /**
738 * Emit an TGSI_OPCODE_SCS instruction
739 *
740 * The \c SCS opcode functions a bit differently than the other TGSI opcodes.
741 * Instead of splatting its result across all four components of the
742 * destination, it writes one value to the \c x component and another value to
743 * the \c y component.
744 *
745 * \param ir IR instruction being processed
746 * \param op Either \c TGSI_OPCODE_SIN or \c TGSI_OPCODE_COS depending
747 * on which value is desired.
748 * \param dst Destination register
749 * \param src Source register
750 */
751 void
752 glsl_to_tgsi_visitor::emit_scs(ir_instruction *ir, unsigned op,
753 st_dst_reg dst,
754 const st_src_reg &src)
755 {
756 /* Vertex programs cannot use the SCS opcode.
757 */
758 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) {
759 emit_scalar(ir, op, dst, src);
760 return;
761 }
762
763 const unsigned component = (op == TGSI_OPCODE_SIN) ? 0 : 1;
764 const unsigned scs_mask = (1U << component);
765 int done_mask = ~dst.writemask;
766 st_src_reg tmp;
767
768 assert(op == TGSI_OPCODE_SIN || op == TGSI_OPCODE_COS);
769
770 /* If there are compnents in the destination that differ from the component
771 * that will be written by the SCS instrution, we'll need a temporary.
772 */
773 if (scs_mask != unsigned(dst.writemask)) {
774 tmp = get_temp(glsl_type::vec4_type);
775 }
776
777 for (unsigned i = 0; i < 4; i++) {
778 unsigned this_mask = (1U << i);
779 st_src_reg src0 = src;
780
781 if ((done_mask & this_mask) != 0)
782 continue;
783
784 /* The source swizzle specified which component of the source generates
785 * sine / cosine for the current component in the destination. The SCS
786 * instruction requires that this value be swizzle to the X component.
787 * Replace the current swizzle with a swizzle that puts the source in
788 * the X component.
789 */
790 unsigned src0_swiz = GET_SWZ(src.swizzle, i);
791
792 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
793 src0_swiz, src0_swiz);
794 for (unsigned j = i + 1; j < 4; j++) {
795 /* If there is another enabled component in the destination that is
796 * derived from the same inputs, generate its value on this pass as
797 * well.
798 */
799 if (!(done_mask & (1 << j)) &&
800 GET_SWZ(src0.swizzle, j) == src0_swiz) {
801 this_mask |= (1 << j);
802 }
803 }
804
805 if (this_mask != scs_mask) {
806 glsl_to_tgsi_instruction *inst;
807 st_dst_reg tmp_dst = st_dst_reg(tmp);
808
809 /* Emit the SCS instruction.
810 */
811 inst = emit(ir, TGSI_OPCODE_SCS, tmp_dst, src0);
812 inst->dst.writemask = scs_mask;
813
814 /* Move the result of the SCS instruction to the desired location in
815 * the destination.
816 */
817 tmp.swizzle = MAKE_SWIZZLE4(component, component,
818 component, component);
819 inst = emit(ir, TGSI_OPCODE_SCS, dst, tmp);
820 inst->dst.writemask = this_mask;
821 } else {
822 /* Emit the SCS instruction to write directly to the destination.
823 */
824 glsl_to_tgsi_instruction *inst = emit(ir, TGSI_OPCODE_SCS, dst, src0);
825 inst->dst.writemask = scs_mask;
826 }
827
828 done_mask |= this_mask;
829 }
830 }
831
832 int
833 glsl_to_tgsi_visitor::add_constant(gl_register_file file,
834 gl_constant_value values[4], int size, int datatype,
835 GLuint *swizzle_out)
836 {
837 if (file == PROGRAM_CONSTANT) {
838 return _mesa_add_typed_unnamed_constant(this->prog->Parameters, values,
839 size, datatype, swizzle_out);
840 } else {
841 int index = 0;
842 immediate_storage *entry;
843 assert(file == PROGRAM_IMMEDIATE);
844
845 /* Search immediate storage to see if we already have an identical
846 * immediate that we can use instead of adding a duplicate entry.
847 */
848 foreach_iter(exec_list_iterator, iter, this->immediates) {
849 entry = (immediate_storage *)iter.get();
850
851 if (entry->size == size &&
852 entry->type == datatype &&
853 !memcmp(entry->values, values, size * sizeof(gl_constant_value))) {
854 return index;
855 }
856 index++;
857 }
858
859 /* Add this immediate to the list. */
860 entry = new(mem_ctx) immediate_storage(values, size, datatype);
861 this->immediates.push_tail(entry);
862 this->num_immediates++;
863 return index;
864 }
865 }
866
867 struct st_src_reg
868 glsl_to_tgsi_visitor::st_src_reg_for_float(float val)
869 {
870 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_FLOAT);
871 union gl_constant_value uval;
872
873 uval.f = val;
874 src.index = add_constant(src.file, &uval, 1, GL_FLOAT, &src.swizzle);
875
876 return src;
877 }
878
879 struct st_src_reg
880 glsl_to_tgsi_visitor::st_src_reg_for_int(int val)
881 {
882 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT);
883 union gl_constant_value uval;
884
885 assert(native_integers);
886
887 uval.i = val;
888 src.index = add_constant(src.file, &uval, 1, GL_INT, &src.swizzle);
889
890 return src;
891 }
892
893 struct st_src_reg
894 glsl_to_tgsi_visitor::st_src_reg_for_type(int type, int val)
895 {
896 if (native_integers)
897 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) :
898 st_src_reg_for_int(val);
899 else
900 return st_src_reg_for_float(val);
901 }
902
903 static int
904 type_size(const struct glsl_type *type)
905 {
906 unsigned int i;
907 int size;
908
909 switch (type->base_type) {
910 case GLSL_TYPE_UINT:
911 case GLSL_TYPE_INT:
912 case GLSL_TYPE_FLOAT:
913 case GLSL_TYPE_BOOL:
914 if (type->is_matrix()) {
915 return type->matrix_columns;
916 } else {
917 /* Regardless of size of vector, it gets a vec4. This is bad
918 * packing for things like floats, but otherwise arrays become a
919 * mess. Hopefully a later pass over the code can pack scalars
920 * down if appropriate.
921 */
922 return 1;
923 }
924 case GLSL_TYPE_ARRAY:
925 assert(type->length > 0);
926 return type_size(type->fields.array) * type->length;
927 case GLSL_TYPE_STRUCT:
928 size = 0;
929 for (i = 0; i < type->length; i++) {
930 size += type_size(type->fields.structure[i].type);
931 }
932 return size;
933 case GLSL_TYPE_SAMPLER:
934 /* Samplers take up one slot in UNIFORMS[], but they're baked in
935 * at link time.
936 */
937 return 1;
938 default:
939 assert(0);
940 return 0;
941 }
942 }
943
944 /**
945 * In the initial pass of codegen, we assign temporary numbers to
946 * intermediate results. (not SSA -- variable assignments will reuse
947 * storage).
948 */
949 st_src_reg
950 glsl_to_tgsi_visitor::get_temp(const glsl_type *type)
951 {
952 st_src_reg src;
953
954 src.type = native_integers ? type->base_type : GLSL_TYPE_FLOAT;
955 src.file = PROGRAM_TEMPORARY;
956 src.index = next_temp;
957 src.reladdr = NULL;
958 next_temp += type_size(type);
959
960 if (type->is_array() || type->is_record()) {
961 src.swizzle = SWIZZLE_NOOP;
962 } else {
963 src.swizzle = swizzle_for_size(type->vector_elements);
964 }
965 src.negate = 0;
966
967 return src;
968 }
969
970 variable_storage *
971 glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var)
972 {
973
974 variable_storage *entry;
975
976 foreach_iter(exec_list_iterator, iter, this->variables) {
977 entry = (variable_storage *)iter.get();
978
979 if (entry->var == var)
980 return entry;
981 }
982
983 return NULL;
984 }
985
986 void
987 glsl_to_tgsi_visitor::visit(ir_variable *ir)
988 {
989 if (strcmp(ir->name, "gl_FragCoord") == 0) {
990 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
991
992 fp->OriginUpperLeft = ir->origin_upper_left;
993 fp->PixelCenterInteger = ir->pixel_center_integer;
994
995 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
996 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
997 switch (ir->depth_layout) {
998 case ir_depth_layout_none:
999 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
1000 break;
1001 case ir_depth_layout_any:
1002 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
1003 break;
1004 case ir_depth_layout_greater:
1005 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
1006 break;
1007 case ir_depth_layout_less:
1008 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
1009 break;
1010 case ir_depth_layout_unchanged:
1011 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
1012 break;
1013 default:
1014 assert(0);
1015 break;
1016 }
1017 }
1018
1019 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
1020 unsigned int i;
1021 const ir_state_slot *const slots = ir->state_slots;
1022 assert(ir->state_slots != NULL);
1023
1024 /* Check if this statevar's setup in the STATE file exactly
1025 * matches how we'll want to reference it as a
1026 * struct/array/whatever. If not, then we need to move it into
1027 * temporary storage and hope that it'll get copy-propagated
1028 * out.
1029 */
1030 for (i = 0; i < ir->num_state_slots; i++) {
1031 if (slots[i].swizzle != SWIZZLE_XYZW) {
1032 break;
1033 }
1034 }
1035
1036 struct variable_storage *storage;
1037 st_dst_reg dst;
1038 if (i == ir->num_state_slots) {
1039 /* We'll set the index later. */
1040 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
1041 this->variables.push_tail(storage);
1042
1043 dst = undef_dst;
1044 } else {
1045 /* The variable_storage constructor allocates slots based on the size
1046 * of the type. However, this had better match the number of state
1047 * elements that we're going to copy into the new temporary.
1048 */
1049 assert((int) ir->num_state_slots == type_size(ir->type));
1050
1051 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY,
1052 this->next_temp);
1053 this->variables.push_tail(storage);
1054 this->next_temp += type_size(ir->type);
1055
1056 dst = st_dst_reg(st_src_reg(PROGRAM_TEMPORARY, storage->index,
1057 native_integers ? ir->type->base_type : GLSL_TYPE_FLOAT));
1058 }
1059
1060
1061 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
1062 int index = _mesa_add_state_reference(this->prog->Parameters,
1063 (gl_state_index *)slots[i].tokens);
1064
1065 if (storage->file == PROGRAM_STATE_VAR) {
1066 if (storage->index == -1) {
1067 storage->index = index;
1068 } else {
1069 assert(index == storage->index + (int)i);
1070 }
1071 } else {
1072 st_src_reg src(PROGRAM_STATE_VAR, index,
1073 native_integers ? ir->type->base_type : GLSL_TYPE_FLOAT);
1074 src.swizzle = slots[i].swizzle;
1075 emit(ir, TGSI_OPCODE_MOV, dst, src);
1076 /* even a float takes up a whole vec4 reg in a struct/array. */
1077 dst.index++;
1078 }
1079 }
1080
1081 if (storage->file == PROGRAM_TEMPORARY &&
1082 dst.index != storage->index + (int) ir->num_state_slots) {
1083 fail_link(this->shader_program,
1084 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1085 ir->name, dst.index - storage->index,
1086 type_size(ir->type));
1087 }
1088 }
1089 }
1090
1091 void
1092 glsl_to_tgsi_visitor::visit(ir_loop *ir)
1093 {
1094 ir_dereference_variable *counter = NULL;
1095
1096 if (ir->counter != NULL)
1097 counter = new(ir) ir_dereference_variable(ir->counter);
1098
1099 if (ir->from != NULL) {
1100 assert(ir->counter != NULL);
1101
1102 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL);
1103
1104 a->accept(this);
1105 delete a;
1106 }
1107
1108 emit(NULL, TGSI_OPCODE_BGNLOOP);
1109
1110 if (ir->to) {
1111 ir_expression *e =
1112 new(ir) ir_expression(ir->cmp, glsl_type::bool_type,
1113 counter, ir->to);
1114 ir_if *if_stmt = new(ir) ir_if(e);
1115
1116 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
1117
1118 if_stmt->then_instructions.push_tail(brk);
1119
1120 if_stmt->accept(this);
1121
1122 delete if_stmt;
1123 delete e;
1124 delete brk;
1125 }
1126
1127 visit_exec_list(&ir->body_instructions, this);
1128
1129 if (ir->increment) {
1130 ir_expression *e =
1131 new(ir) ir_expression(ir_binop_add, counter->type,
1132 counter, ir->increment);
1133
1134 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL);
1135
1136 a->accept(this);
1137 delete a;
1138 delete e;
1139 }
1140
1141 emit(NULL, TGSI_OPCODE_ENDLOOP);
1142 }
1143
1144 void
1145 glsl_to_tgsi_visitor::visit(ir_loop_jump *ir)
1146 {
1147 switch (ir->mode) {
1148 case ir_loop_jump::jump_break:
1149 emit(NULL, TGSI_OPCODE_BRK);
1150 break;
1151 case ir_loop_jump::jump_continue:
1152 emit(NULL, TGSI_OPCODE_CONT);
1153 break;
1154 }
1155 }
1156
1157
1158 void
1159 glsl_to_tgsi_visitor::visit(ir_function_signature *ir)
1160 {
1161 assert(0);
1162 (void)ir;
1163 }
1164
1165 void
1166 glsl_to_tgsi_visitor::visit(ir_function *ir)
1167 {
1168 /* Ignore function bodies other than main() -- we shouldn't see calls to
1169 * them since they should all be inlined before we get to glsl_to_tgsi.
1170 */
1171 if (strcmp(ir->name, "main") == 0) {
1172 const ir_function_signature *sig;
1173 exec_list empty;
1174
1175 sig = ir->matching_signature(&empty);
1176
1177 assert(sig);
1178
1179 foreach_iter(exec_list_iterator, iter, sig->body) {
1180 ir_instruction *ir = (ir_instruction *)iter.get();
1181
1182 ir->accept(this);
1183 }
1184 }
1185 }
1186
1187 GLboolean
1188 glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
1189 {
1190 int nonmul_operand = 1 - mul_operand;
1191 st_src_reg a, b, c;
1192 st_dst_reg result_dst;
1193
1194 ir_expression *expr = ir->operands[mul_operand]->as_expression();
1195 if (!expr || expr->operation != ir_binop_mul)
1196 return false;
1197
1198 expr->operands[0]->accept(this);
1199 a = this->result;
1200 expr->operands[1]->accept(this);
1201 b = this->result;
1202 ir->operands[nonmul_operand]->accept(this);
1203 c = this->result;
1204
1205 this->result = get_temp(ir->type);
1206 result_dst = st_dst_reg(this->result);
1207 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1208 emit(ir, TGSI_OPCODE_MAD, result_dst, a, b, c);
1209
1210 return true;
1211 }
1212
1213 GLboolean
1214 glsl_to_tgsi_visitor::try_emit_sat(ir_expression *ir)
1215 {
1216 /* Saturates were only introduced to vertex programs in
1217 * NV_vertex_program3, so don't give them to drivers in the VP.
1218 */
1219 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB)
1220 return false;
1221
1222 ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1223 if (!sat_src)
1224 return false;
1225
1226 sat_src->accept(this);
1227 st_src_reg src = this->result;
1228
1229 /* If we generated an expression instruction into a temporary in
1230 * processing the saturate's operand, apply the saturate to that
1231 * instruction. Otherwise, generate a MOV to do the saturate.
1232 *
1233 * Note that we have to be careful to only do this optimization if
1234 * the instruction in question was what generated src->result. For
1235 * example, ir_dereference_array might generate a MUL instruction
1236 * to create the reladdr, and return us a src reg using that
1237 * reladdr. That MUL result is not the value we're trying to
1238 * saturate.
1239 */
1240 ir_expression *sat_src_expr = sat_src->as_expression();
1241 if (sat_src_expr && (sat_src_expr->operation == ir_binop_mul ||
1242 sat_src_expr->operation == ir_binop_add ||
1243 sat_src_expr->operation == ir_binop_dot)) {
1244 glsl_to_tgsi_instruction *new_inst;
1245 new_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
1246 new_inst->saturate = true;
1247 } else {
1248 this->result = get_temp(ir->type);
1249 st_dst_reg result_dst = st_dst_reg(this->result);
1250 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1251 glsl_to_tgsi_instruction *inst;
1252 inst = emit(ir, TGSI_OPCODE_MOV, result_dst, src);
1253 inst->saturate = true;
1254 }
1255
1256 return true;
1257 }
1258
1259 void
1260 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir,
1261 st_src_reg *reg, int *num_reladdr)
1262 {
1263 if (!reg->reladdr)
1264 return;
1265
1266 emit_arl(ir, address_reg, *reg->reladdr);
1267
1268 if (*num_reladdr != 1) {
1269 st_src_reg temp = get_temp(glsl_type::vec4_type);
1270
1271 emit(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg);
1272 *reg = temp;
1273 }
1274
1275 (*num_reladdr)--;
1276 }
1277
1278 void
1279 glsl_to_tgsi_visitor::visit(ir_expression *ir)
1280 {
1281 unsigned int operand;
1282 st_src_reg op[Elements(ir->operands)];
1283 st_src_reg result_src;
1284 st_dst_reg result_dst;
1285
1286 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1287 */
1288 if (ir->operation == ir_binop_add) {
1289 if (try_emit_mad(ir, 1))
1290 return;
1291 if (try_emit_mad(ir, 0))
1292 return;
1293 }
1294 if (try_emit_sat(ir))
1295 return;
1296
1297 if (ir->operation == ir_quadop_vector)
1298 assert(!"ir_quadop_vector should have been lowered");
1299
1300 for (operand = 0; operand < ir->get_num_operands(); operand++) {
1301 this->result.file = PROGRAM_UNDEFINED;
1302 ir->operands[operand]->accept(this);
1303 if (this->result.file == PROGRAM_UNDEFINED) {
1304 ir_print_visitor v;
1305 printf("Failed to get tree for expression operand:\n");
1306 ir->operands[operand]->accept(&v);
1307 exit(1);
1308 }
1309 op[operand] = this->result;
1310
1311 /* Matrix expression operands should have been broken down to vector
1312 * operations already.
1313 */
1314 assert(!ir->operands[operand]->type->is_matrix());
1315 }
1316
1317 int vector_elements = ir->operands[0]->type->vector_elements;
1318 if (ir->operands[1]) {
1319 vector_elements = MAX2(vector_elements,
1320 ir->operands[1]->type->vector_elements);
1321 }
1322
1323 this->result.file = PROGRAM_UNDEFINED;
1324
1325 /* Storage for our result. Ideally for an assignment we'd be using
1326 * the actual storage for the result here, instead.
1327 */
1328 result_src = get_temp(ir->type);
1329 /* convenience for the emit functions below. */
1330 result_dst = st_dst_reg(result_src);
1331 /* Limit writes to the channels that will be used by result_src later.
1332 * This does limit this temp's use as a temporary for multi-instruction
1333 * sequences.
1334 */
1335 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1336
1337 switch (ir->operation) {
1338 case ir_unop_logic_not:
1339 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], st_src_reg_for_type(result_dst.type, 0));
1340 break;
1341 case ir_unop_neg:
1342 assert(result_dst.type == GLSL_TYPE_FLOAT || result_dst.type == GLSL_TYPE_INT);
1343 if (result_dst.type == GLSL_TYPE_INT)
1344 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]);
1345 else {
1346 op[0].negate = ~op[0].negate;
1347 result_src = op[0];
1348 }
1349 break;
1350 case ir_unop_abs:
1351 assert(result_dst.type == GLSL_TYPE_FLOAT);
1352 emit(ir, TGSI_OPCODE_ABS, result_dst, op[0]);
1353 break;
1354 case ir_unop_sign:
1355 emit(ir, TGSI_OPCODE_SSG, result_dst, op[0]);
1356 break;
1357 case ir_unop_rcp:
1358 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]);
1359 break;
1360
1361 case ir_unop_exp2:
1362 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]);
1363 break;
1364 case ir_unop_exp:
1365 case ir_unop_log:
1366 assert(!"not reached: should be handled by ir_explog_to_explog2");
1367 break;
1368 case ir_unop_log2:
1369 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]);
1370 break;
1371 case ir_unop_sin:
1372 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1373 break;
1374 case ir_unop_cos:
1375 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1376 break;
1377 case ir_unop_sin_reduced:
1378 emit_scs(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1379 break;
1380 case ir_unop_cos_reduced:
1381 emit_scs(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1382 break;
1383
1384 case ir_unop_dFdx:
1385 emit(ir, TGSI_OPCODE_DDX, result_dst, op[0]);
1386 break;
1387 case ir_unop_dFdy:
1388 op[0].negate = ~op[0].negate;
1389 emit(ir, TGSI_OPCODE_DDY, result_dst, op[0]);
1390 break;
1391
1392 case ir_unop_noise: {
1393 /* At some point, a motivated person could add a better
1394 * implementation of noise. Currently not even the nvidia
1395 * binary drivers do anything more than this. In any case, the
1396 * place to do this is in the GL state tracker, not the poor
1397 * driver.
1398 */
1399 emit(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5));
1400 break;
1401 }
1402
1403 case ir_binop_add:
1404 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1405 break;
1406 case ir_binop_sub:
1407 emit(ir, TGSI_OPCODE_SUB, result_dst, op[0], op[1]);
1408 break;
1409
1410 case ir_binop_mul:
1411 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1412 break;
1413 case ir_binop_div:
1414 if (result_dst.type == GLSL_TYPE_FLOAT)
1415 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1416 else
1417 emit(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]);
1418 break;
1419 case ir_binop_mod:
1420 if (result_dst.type == GLSL_TYPE_FLOAT)
1421 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1422 else
1423 emit(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]);
1424 break;
1425
1426 case ir_binop_less:
1427 emit(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]);
1428 break;
1429 case ir_binop_greater:
1430 emit(ir, TGSI_OPCODE_SGT, result_dst, op[0], op[1]);
1431 break;
1432 case ir_binop_lequal:
1433 emit(ir, TGSI_OPCODE_SLE, result_dst, op[0], op[1]);
1434 break;
1435 case ir_binop_gequal:
1436 emit(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]);
1437 break;
1438 case ir_binop_equal:
1439 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1440 break;
1441 case ir_binop_nequal:
1442 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1443 break;
1444 case ir_binop_all_equal:
1445 /* "==" operator producing a scalar boolean. */
1446 if (ir->operands[0]->type->is_vector() ||
1447 ir->operands[1]->type->is_vector()) {
1448 st_src_reg temp = get_temp(native_integers ?
1449 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1450 glsl_type::vec4_type);
1451 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1452 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1453 emit_dp(ir, result_dst, temp, temp, vector_elements);
1454 emit(ir, TGSI_OPCODE_SEQ, result_dst, result_src, st_src_reg_for_float(0.0));
1455 } else {
1456 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1457 }
1458 break;
1459 case ir_binop_any_nequal:
1460 /* "!=" operator producing a scalar boolean. */
1461 if (ir->operands[0]->type->is_vector() ||
1462 ir->operands[1]->type->is_vector()) {
1463 st_src_reg temp = get_temp(native_integers ?
1464 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) :
1465 glsl_type::vec4_type);
1466 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
1467 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1468 emit_dp(ir, result_dst, temp, temp, vector_elements);
1469 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1470 } else {
1471 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1472 }
1473 break;
1474
1475 case ir_unop_any:
1476 assert(ir->operands[0]->type->is_vector());
1477 emit_dp(ir, result_dst, op[0], op[0],
1478 ir->operands[0]->type->vector_elements);
1479 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1480 break;
1481
1482 case ir_binop_logic_xor:
1483 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1484 break;
1485
1486 case ir_binop_logic_or:
1487 /* This could be a saturated add and skip the SNE. */
1488 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1489 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0));
1490 break;
1491
1492 case ir_binop_logic_and:
1493 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
1494 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1495 break;
1496
1497 case ir_binop_dot:
1498 assert(ir->operands[0]->type->is_vector());
1499 assert(ir->operands[0]->type == ir->operands[1]->type);
1500 emit_dp(ir, result_dst, op[0], op[1],
1501 ir->operands[0]->type->vector_elements);
1502 break;
1503
1504 case ir_unop_sqrt:
1505 /* sqrt(x) = x * rsq(x). */
1506 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1507 emit(ir, TGSI_OPCODE_MUL, result_dst, result_src, op[0]);
1508 /* For incoming channels <= 0, set the result to 0. */
1509 op[0].negate = ~op[0].negate;
1510 emit(ir, TGSI_OPCODE_CMP, result_dst,
1511 op[0], result_src, st_src_reg_for_float(0.0));
1512 break;
1513 case ir_unop_rsq:
1514 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
1515 break;
1516 case ir_unop_i2f:
1517 case ir_unop_b2f:
1518 if (native_integers) {
1519 emit(ir, TGSI_OPCODE_I2F, result_dst, op[0]);
1520 break;
1521 }
1522 case ir_unop_i2u:
1523 case ir_unop_u2i:
1524 /* Converting between signed and unsigned integers is a no-op. */
1525 case ir_unop_b2i:
1526 /* Booleans are stored as integers (or floats in GLSL 1.20 and lower). */
1527 result_src = op[0];
1528 break;
1529 case ir_unop_f2i:
1530 if (native_integers)
1531 emit(ir, TGSI_OPCODE_F2I, result_dst, op[0]);
1532 else
1533 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1534 break;
1535 case ir_unop_f2b:
1536 case ir_unop_i2b:
1537 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0],
1538 st_src_reg_for_type(result_dst.type, 0));
1539 break;
1540 case ir_unop_trunc:
1541 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
1542 break;
1543 case ir_unop_ceil:
1544 op[0].negate = ~op[0].negate;
1545 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1546 result_src.negate = ~result_src.negate;
1547 break;
1548 case ir_unop_floor:
1549 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
1550 break;
1551 case ir_unop_fract:
1552 emit(ir, TGSI_OPCODE_FRC, result_dst, op[0]);
1553 break;
1554
1555 case ir_binop_min:
1556 emit(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]);
1557 break;
1558 case ir_binop_max:
1559 emit(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]);
1560 break;
1561 case ir_binop_pow:
1562 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]);
1563 break;
1564
1565 case ir_unop_bit_not:
1566 if (glsl_version >= 130) {
1567 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1568 break;
1569 }
1570 case ir_unop_u2f:
1571 if (native_integers) {
1572 emit(ir, TGSI_OPCODE_U2F, result_dst, op[0]);
1573 break;
1574 }
1575 case ir_binop_lshift:
1576 if (glsl_version >= 130) {
1577 emit(ir, TGSI_OPCODE_SHL, result_dst, op[0]);
1578 break;
1579 }
1580 case ir_binop_rshift:
1581 if (glsl_version >= 130) {
1582 emit(ir, TGSI_OPCODE_ISHR, result_dst, op[0]);
1583 break;
1584 }
1585 case ir_binop_bit_and:
1586 if (glsl_version >= 130) {
1587 emit(ir, TGSI_OPCODE_AND, result_dst, op[0]);
1588 break;
1589 }
1590 case ir_binop_bit_xor:
1591 if (glsl_version >= 130) {
1592 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0]);
1593 break;
1594 }
1595 case ir_binop_bit_or:
1596 if (glsl_version >= 130) {
1597 emit(ir, TGSI_OPCODE_OR, result_dst, op[0]);
1598 break;
1599 }
1600 case ir_unop_round_even:
1601 assert(!"GLSL 1.30 features unsupported");
1602 break;
1603
1604 case ir_quadop_vector:
1605 /* This operation should have already been handled.
1606 */
1607 assert(!"Should not get here.");
1608 break;
1609 }
1610
1611 this->result = result_src;
1612 }
1613
1614
1615 void
1616 glsl_to_tgsi_visitor::visit(ir_swizzle *ir)
1617 {
1618 st_src_reg src;
1619 int i;
1620 int swizzle[4];
1621
1622 /* Note that this is only swizzles in expressions, not those on the left
1623 * hand side of an assignment, which do write masking. See ir_assignment
1624 * for that.
1625 */
1626
1627 ir->val->accept(this);
1628 src = this->result;
1629 assert(src.file != PROGRAM_UNDEFINED);
1630
1631 for (i = 0; i < 4; i++) {
1632 if (i < ir->type->vector_elements) {
1633 switch (i) {
1634 case 0:
1635 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
1636 break;
1637 case 1:
1638 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
1639 break;
1640 case 2:
1641 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
1642 break;
1643 case 3:
1644 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
1645 break;
1646 }
1647 } else {
1648 /* If the type is smaller than a vec4, replicate the last
1649 * channel out.
1650 */
1651 swizzle[i] = swizzle[ir->type->vector_elements - 1];
1652 }
1653 }
1654
1655 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1656
1657 this->result = src;
1658 }
1659
1660 void
1661 glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir)
1662 {
1663 variable_storage *entry = find_variable_storage(ir->var);
1664 ir_variable *var = ir->var;
1665
1666 if (!entry) {
1667 switch (var->mode) {
1668 case ir_var_uniform:
1669 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
1670 var->location);
1671 this->variables.push_tail(entry);
1672 break;
1673 case ir_var_in:
1674 case ir_var_inout:
1675 /* The linker assigns locations for varyings and attributes,
1676 * including deprecated builtins (like gl_Color), user-assign
1677 * generic attributes (glBindVertexLocation), and
1678 * user-defined varyings.
1679 *
1680 * FINISHME: We would hit this path for function arguments. Fix!
1681 */
1682 assert(var->location != -1);
1683 entry = new(mem_ctx) variable_storage(var,
1684 PROGRAM_INPUT,
1685 var->location);
1686 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1687 var->location >= VERT_ATTRIB_GENERIC0) {
1688 _mesa_add_attribute(this->prog->Attributes,
1689 var->name,
1690 _mesa_sizeof_glsl_type(var->type->gl_type),
1691 var->type->gl_type,
1692 var->location - VERT_ATTRIB_GENERIC0);
1693 }
1694 break;
1695 case ir_var_out:
1696 assert(var->location != -1);
1697 entry = new(mem_ctx) variable_storage(var,
1698 PROGRAM_OUTPUT,
1699 var->location);
1700 break;
1701 case ir_var_system_value:
1702 entry = new(mem_ctx) variable_storage(var,
1703 PROGRAM_SYSTEM_VALUE,
1704 var->location);
1705 break;
1706 case ir_var_auto:
1707 case ir_var_temporary:
1708 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY,
1709 this->next_temp);
1710 this->variables.push_tail(entry);
1711
1712 next_temp += type_size(var->type);
1713 break;
1714 }
1715
1716 if (!entry) {
1717 printf("Failed to make storage for %s\n", var->name);
1718 exit(1);
1719 }
1720 }
1721
1722 this->result = st_src_reg(entry->file, entry->index, var->type);
1723 if (!native_integers)
1724 this->result.type = GLSL_TYPE_FLOAT;
1725 }
1726
1727 void
1728 glsl_to_tgsi_visitor::visit(ir_dereference_array *ir)
1729 {
1730 ir_constant *index;
1731 st_src_reg src;
1732 int element_size = type_size(ir->type);
1733
1734 index = ir->array_index->constant_expression_value();
1735
1736 ir->array->accept(this);
1737 src = this->result;
1738
1739 if (index) {
1740 src.index += index->value.i[0] * element_size;
1741 } else {
1742 /* Variable index array dereference. It eats the "vec4" of the
1743 * base of the array and an index that offsets the TGSI register
1744 * index.
1745 */
1746 ir->array_index->accept(this);
1747
1748 st_src_reg index_reg;
1749
1750 if (element_size == 1) {
1751 index_reg = this->result;
1752 } else {
1753 index_reg = get_temp(glsl_type::float_type);
1754
1755 emit(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg),
1756 this->result, st_src_reg_for_float(element_size));
1757 }
1758
1759 /* If there was already a relative address register involved, add the
1760 * new and the old together to get the new offset.
1761 */
1762 if (src.reladdr != NULL) {
1763 st_src_reg accum_reg = get_temp(glsl_type::float_type);
1764
1765 emit(ir, TGSI_OPCODE_ADD, st_dst_reg(accum_reg),
1766 index_reg, *src.reladdr);
1767
1768 index_reg = accum_reg;
1769 }
1770
1771 src.reladdr = ralloc(mem_ctx, st_src_reg);
1772 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1773 }
1774
1775 /* If the type is smaller than a vec4, replicate the last channel out. */
1776 if (ir->type->is_scalar() || ir->type->is_vector())
1777 src.swizzle = swizzle_for_size(ir->type->vector_elements);
1778 else
1779 src.swizzle = SWIZZLE_NOOP;
1780
1781 this->result = src;
1782 }
1783
1784 void
1785 glsl_to_tgsi_visitor::visit(ir_dereference_record *ir)
1786 {
1787 unsigned int i;
1788 const glsl_type *struct_type = ir->record->type;
1789 int offset = 0;
1790
1791 ir->record->accept(this);
1792
1793 for (i = 0; i < struct_type->length; i++) {
1794 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1795 break;
1796 offset += type_size(struct_type->fields.structure[i].type);
1797 }
1798
1799 /* If the type is smaller than a vec4, replicate the last channel out. */
1800 if (ir->type->is_scalar() || ir->type->is_vector())
1801 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1802 else
1803 this->result.swizzle = SWIZZLE_NOOP;
1804
1805 this->result.index += offset;
1806 }
1807
1808 /**
1809 * We want to be careful in assignment setup to hit the actual storage
1810 * instead of potentially using a temporary like we might with the
1811 * ir_dereference handler.
1812 */
1813 static st_dst_reg
1814 get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v)
1815 {
1816 /* The LHS must be a dereference. If the LHS is a variable indexed array
1817 * access of a vector, it must be separated into a series conditional moves
1818 * before reaching this point (see ir_vec_index_to_cond_assign).
1819 */
1820 assert(ir->as_dereference());
1821 ir_dereference_array *deref_array = ir->as_dereference_array();
1822 if (deref_array) {
1823 assert(!deref_array->array->type->is_vector());
1824 }
1825
1826 /* Use the rvalue deref handler for the most part. We'll ignore
1827 * swizzles in it and write swizzles using writemask, though.
1828 */
1829 ir->accept(v);
1830 return st_dst_reg(v->result);
1831 }
1832
1833 /**
1834 * Process the condition of a conditional assignment
1835 *
1836 * Examines the condition of a conditional assignment to generate the optimal
1837 * first operand of a \c CMP instruction. If the condition is a relational
1838 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
1839 * used as the source for the \c CMP instruction. Otherwise the comparison
1840 * is processed to a boolean result, and the boolean result is used as the
1841 * operand to the CMP instruction.
1842 */
1843 bool
1844 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir)
1845 {
1846 ir_rvalue *src_ir = ir;
1847 bool negate = true;
1848 bool switch_order = false;
1849
1850 ir_expression *const expr = ir->as_expression();
1851 if ((expr != NULL) && (expr->get_num_operands() == 2)) {
1852 bool zero_on_left = false;
1853
1854 if (expr->operands[0]->is_zero()) {
1855 src_ir = expr->operands[1];
1856 zero_on_left = true;
1857 } else if (expr->operands[1]->is_zero()) {
1858 src_ir = expr->operands[0];
1859 zero_on_left = false;
1860 }
1861
1862 /* a is - 0 + - 0 +
1863 * (a < 0) T F F ( a < 0) T F F
1864 * (0 < a) F F T (-a < 0) F F T
1865 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands)
1866 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands)
1867 * (a > 0) F F T (-a < 0) F F T
1868 * (0 > a) T F F ( a < 0) T F F
1869 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
1870 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
1871 *
1872 * Note that exchanging the order of 0 and 'a' in the comparison simply
1873 * means that the value of 'a' should be negated.
1874 */
1875 if (src_ir != ir) {
1876 switch (expr->operation) {
1877 case ir_binop_less:
1878 switch_order = false;
1879 negate = zero_on_left;
1880 break;
1881
1882 case ir_binop_greater:
1883 switch_order = false;
1884 negate = !zero_on_left;
1885 break;
1886
1887 case ir_binop_lequal:
1888 switch_order = true;
1889 negate = !zero_on_left;
1890 break;
1891
1892 case ir_binop_gequal:
1893 switch_order = true;
1894 negate = zero_on_left;
1895 break;
1896
1897 default:
1898 /* This isn't the right kind of comparison afterall, so make sure
1899 * the whole condition is visited.
1900 */
1901 src_ir = ir;
1902 break;
1903 }
1904 }
1905 }
1906
1907 src_ir->accept(this);
1908
1909 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
1910 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
1911 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
1912 * computing the condition.
1913 */
1914 if (negate)
1915 this->result.negate = ~this->result.negate;
1916
1917 return switch_order;
1918 }
1919
1920 void
1921 glsl_to_tgsi_visitor::visit(ir_assignment *ir)
1922 {
1923 st_dst_reg l;
1924 st_src_reg r;
1925 int i;
1926
1927 ir->rhs->accept(this);
1928 r = this->result;
1929
1930 l = get_assignment_lhs(ir->lhs, this);
1931
1932 /* FINISHME: This should really set to the correct maximal writemask for each
1933 * FINISHME: component written (in the loops below). This case can only
1934 * FINISHME: occur for matrices, arrays, and structures.
1935 */
1936 if (ir->write_mask == 0) {
1937 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
1938 l.writemask = WRITEMASK_XYZW;
1939 } else if (ir->lhs->type->is_scalar() &&
1940 ir->lhs->variable_referenced()->mode == ir_var_out) {
1941 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the
1942 * FINISHME: W component of fragment shader output zero, work correctly.
1943 */
1944 l.writemask = WRITEMASK_XYZW;
1945 } else {
1946 int swizzles[4];
1947 int first_enabled_chan = 0;
1948 int rhs_chan = 0;
1949
1950 l.writemask = ir->write_mask;
1951
1952 for (int i = 0; i < 4; i++) {
1953 if (l.writemask & (1 << i)) {
1954 first_enabled_chan = GET_SWZ(r.swizzle, i);
1955 break;
1956 }
1957 }
1958
1959 /* Swizzle a small RHS vector into the channels being written.
1960 *
1961 * glsl ir treats write_mask as dictating how many channels are
1962 * present on the RHS while TGSI treats write_mask as just
1963 * showing which channels of the vec4 RHS get written.
1964 */
1965 for (int i = 0; i < 4; i++) {
1966 if (l.writemask & (1 << i))
1967 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
1968 else
1969 swizzles[i] = first_enabled_chan;
1970 }
1971 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
1972 swizzles[2], swizzles[3]);
1973 }
1974
1975 assert(l.file != PROGRAM_UNDEFINED);
1976 assert(r.file != PROGRAM_UNDEFINED);
1977
1978 if (ir->condition) {
1979 const bool switch_order = this->process_move_condition(ir->condition);
1980 st_src_reg condition = this->result;
1981
1982 for (i = 0; i < type_size(ir->lhs->type); i++) {
1983 st_src_reg l_src = st_src_reg(l);
1984 l_src.swizzle = swizzle_for_size(ir->lhs->type->vector_elements);
1985
1986 if (switch_order) {
1987 emit(ir, TGSI_OPCODE_CMP, l, condition, l_src, r);
1988 } else {
1989 emit(ir, TGSI_OPCODE_CMP, l, condition, r, l_src);
1990 }
1991
1992 l.index++;
1993 r.index++;
1994 }
1995 } else if (ir->rhs->as_expression() &&
1996 this->instructions.get_tail() &&
1997 ir->rhs == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->ir &&
1998 type_size(ir->lhs->type) == 1 &&
1999 l.writemask == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->dst.writemask) {
2000 /* To avoid emitting an extra MOV when assigning an expression to a
2001 * variable, emit the last instruction of the expression again, but
2002 * replace the destination register with the target of the assignment.
2003 * Dead code elimination will remove the original instruction.
2004 */
2005 glsl_to_tgsi_instruction *inst, *new_inst;
2006 inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2007 new_inst = emit(ir, inst->op, l, inst->src[0], inst->src[1], inst->src[2]);
2008 new_inst->saturate = inst->saturate;
2009 } else {
2010 for (i = 0; i < type_size(ir->lhs->type); i++) {
2011 emit(ir, TGSI_OPCODE_MOV, l, r);
2012 l.index++;
2013 r.index++;
2014 }
2015 }
2016 }
2017
2018
2019 void
2020 glsl_to_tgsi_visitor::visit(ir_constant *ir)
2021 {
2022 st_src_reg src;
2023 GLfloat stack_vals[4] = { 0 };
2024 gl_constant_value *values = (gl_constant_value *) stack_vals;
2025 GLenum gl_type = GL_NONE;
2026 unsigned int i;
2027 static int in_array = 0;
2028 gl_register_file file = in_array ? PROGRAM_CONSTANT : PROGRAM_IMMEDIATE;
2029
2030 /* Unfortunately, 4 floats is all we can get into
2031 * _mesa_add_typed_unnamed_constant. So, make a temp to store an
2032 * aggregate constant and move each constant value into it. If we
2033 * get lucky, copy propagation will eliminate the extra moves.
2034 */
2035 if (ir->type->base_type == GLSL_TYPE_STRUCT) {
2036 st_src_reg temp_base = get_temp(ir->type);
2037 st_dst_reg temp = st_dst_reg(temp_base);
2038
2039 foreach_iter(exec_list_iterator, iter, ir->components) {
2040 ir_constant *field_value = (ir_constant *)iter.get();
2041 int size = type_size(field_value->type);
2042
2043 assert(size > 0);
2044
2045 field_value->accept(this);
2046 src = this->result;
2047
2048 for (i = 0; i < (unsigned int)size; i++) {
2049 emit(ir, TGSI_OPCODE_MOV, temp, src);
2050
2051 src.index++;
2052 temp.index++;
2053 }
2054 }
2055 this->result = temp_base;
2056 return;
2057 }
2058
2059 if (ir->type->is_array()) {
2060 st_src_reg temp_base = get_temp(ir->type);
2061 st_dst_reg temp = st_dst_reg(temp_base);
2062 int size = type_size(ir->type->fields.array);
2063
2064 assert(size > 0);
2065 in_array++;
2066
2067 for (i = 0; i < ir->type->length; i++) {
2068 ir->array_elements[i]->accept(this);
2069 src = this->result;
2070 for (int j = 0; j < size; j++) {
2071 emit(ir, TGSI_OPCODE_MOV, temp, src);
2072
2073 src.index++;
2074 temp.index++;
2075 }
2076 }
2077 this->result = temp_base;
2078 in_array--;
2079 return;
2080 }
2081
2082 if (ir->type->is_matrix()) {
2083 st_src_reg mat = get_temp(ir->type);
2084 st_dst_reg mat_column = st_dst_reg(mat);
2085
2086 for (i = 0; i < ir->type->matrix_columns; i++) {
2087 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
2088 values = (gl_constant_value *) &ir->value.f[i * ir->type->vector_elements];
2089
2090 src = st_src_reg(file, -1, ir->type->base_type);
2091 src.index = add_constant(file,
2092 values,
2093 ir->type->vector_elements,
2094 GL_FLOAT,
2095 &src.swizzle);
2096 emit(ir, TGSI_OPCODE_MOV, mat_column, src);
2097
2098 mat_column.index++;
2099 }
2100
2101 this->result = mat;
2102 return;
2103 }
2104
2105 switch (ir->type->base_type) {
2106 case GLSL_TYPE_FLOAT:
2107 gl_type = GL_FLOAT;
2108 for (i = 0; i < ir->type->vector_elements; i++) {
2109 values[i].f = ir->value.f[i];
2110 }
2111 break;
2112 case GLSL_TYPE_UINT:
2113 gl_type = native_integers ? GL_UNSIGNED_INT : GL_FLOAT;
2114 for (i = 0; i < ir->type->vector_elements; i++) {
2115 if (native_integers)
2116 values[i].u = ir->value.u[i];
2117 else
2118 values[i].f = ir->value.u[i];
2119 }
2120 break;
2121 case GLSL_TYPE_INT:
2122 gl_type = native_integers ? GL_INT : GL_FLOAT;
2123 for (i = 0; i < ir->type->vector_elements; i++) {
2124 if (native_integers)
2125 values[i].i = ir->value.i[i];
2126 else
2127 values[i].f = ir->value.i[i];
2128 }
2129 break;
2130 case GLSL_TYPE_BOOL:
2131 gl_type = native_integers ? GL_BOOL : GL_FLOAT;
2132 for (i = 0; i < ir->type->vector_elements; i++) {
2133 if (native_integers)
2134 values[i].b = ir->value.b[i];
2135 else
2136 values[i].f = ir->value.b[i];
2137 }
2138 break;
2139 default:
2140 assert(!"Non-float/uint/int/bool constant");
2141 }
2142
2143 this->result = st_src_reg(file, -1, ir->type);
2144 this->result.index = add_constant(file,
2145 values,
2146 ir->type->vector_elements,
2147 gl_type,
2148 &this->result.swizzle);
2149 }
2150
2151 function_entry *
2152 glsl_to_tgsi_visitor::get_function_signature(ir_function_signature *sig)
2153 {
2154 function_entry *entry;
2155
2156 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
2157 entry = (function_entry *)iter.get();
2158
2159 if (entry->sig == sig)
2160 return entry;
2161 }
2162
2163 entry = ralloc(mem_ctx, function_entry);
2164 entry->sig = sig;
2165 entry->sig_id = this->next_signature_id++;
2166 entry->bgn_inst = NULL;
2167
2168 /* Allocate storage for all the parameters. */
2169 foreach_iter(exec_list_iterator, iter, sig->parameters) {
2170 ir_variable *param = (ir_variable *)iter.get();
2171 variable_storage *storage;
2172
2173 storage = find_variable_storage(param);
2174 assert(!storage);
2175
2176 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
2177 this->next_temp);
2178 this->variables.push_tail(storage);
2179
2180 this->next_temp += type_size(param->type);
2181 }
2182
2183 if (!sig->return_type->is_void()) {
2184 entry->return_reg = get_temp(sig->return_type);
2185 } else {
2186 entry->return_reg = undef_src;
2187 }
2188
2189 this->function_signatures.push_tail(entry);
2190 return entry;
2191 }
2192
2193 void
2194 glsl_to_tgsi_visitor::visit(ir_call *ir)
2195 {
2196 glsl_to_tgsi_instruction *call_inst;
2197 ir_function_signature *sig = ir->get_callee();
2198 function_entry *entry = get_function_signature(sig);
2199 int i;
2200
2201 /* Process in parameters. */
2202 exec_list_iterator sig_iter = sig->parameters.iterator();
2203 foreach_iter(exec_list_iterator, iter, *ir) {
2204 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2205 ir_variable *param = (ir_variable *)sig_iter.get();
2206
2207 if (param->mode == ir_var_in ||
2208 param->mode == ir_var_inout) {
2209 variable_storage *storage = find_variable_storage(param);
2210 assert(storage);
2211
2212 param_rval->accept(this);
2213 st_src_reg r = this->result;
2214
2215 st_dst_reg l;
2216 l.file = storage->file;
2217 l.index = storage->index;
2218 l.reladdr = NULL;
2219 l.writemask = WRITEMASK_XYZW;
2220 l.cond_mask = COND_TR;
2221
2222 for (i = 0; i < type_size(param->type); i++) {
2223 emit(ir, TGSI_OPCODE_MOV, l, r);
2224 l.index++;
2225 r.index++;
2226 }
2227 }
2228
2229 sig_iter.next();
2230 }
2231 assert(!sig_iter.has_next());
2232
2233 /* Emit call instruction */
2234 call_inst = emit(ir, TGSI_OPCODE_CAL);
2235 call_inst->function = entry;
2236
2237 /* Process out parameters. */
2238 sig_iter = sig->parameters.iterator();
2239 foreach_iter(exec_list_iterator, iter, *ir) {
2240 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
2241 ir_variable *param = (ir_variable *)sig_iter.get();
2242
2243 if (param->mode == ir_var_out ||
2244 param->mode == ir_var_inout) {
2245 variable_storage *storage = find_variable_storage(param);
2246 assert(storage);
2247
2248 st_src_reg r;
2249 r.file = storage->file;
2250 r.index = storage->index;
2251 r.reladdr = NULL;
2252 r.swizzle = SWIZZLE_NOOP;
2253 r.negate = 0;
2254
2255 param_rval->accept(this);
2256 st_dst_reg l = st_dst_reg(this->result);
2257
2258 for (i = 0; i < type_size(param->type); i++) {
2259 emit(ir, TGSI_OPCODE_MOV, l, r);
2260 l.index++;
2261 r.index++;
2262 }
2263 }
2264
2265 sig_iter.next();
2266 }
2267 assert(!sig_iter.has_next());
2268
2269 /* Process return value. */
2270 this->result = entry->return_reg;
2271 }
2272
2273 void
2274 glsl_to_tgsi_visitor::visit(ir_texture *ir)
2275 {
2276 st_src_reg result_src, coord, lod_info, projector, dx, dy;
2277 st_dst_reg result_dst, coord_dst;
2278 glsl_to_tgsi_instruction *inst = NULL;
2279 unsigned opcode = TGSI_OPCODE_NOP;
2280
2281 ir->coordinate->accept(this);
2282
2283 /* Put our coords in a temp. We'll need to modify them for shadow,
2284 * projection, or LOD, so the only case we'd use it as is is if
2285 * we're doing plain old texturing. The optimization passes on
2286 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
2287 */
2288 coord = get_temp(glsl_type::vec4_type);
2289 coord_dst = st_dst_reg(coord);
2290 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2291
2292 if (ir->projector) {
2293 ir->projector->accept(this);
2294 projector = this->result;
2295 }
2296
2297 /* Storage for our result. Ideally for an assignment we'd be using
2298 * the actual storage for the result here, instead.
2299 */
2300 result_src = get_temp(glsl_type::vec4_type);
2301 result_dst = st_dst_reg(result_src);
2302
2303 switch (ir->op) {
2304 case ir_tex:
2305 opcode = TGSI_OPCODE_TEX;
2306 break;
2307 case ir_txb:
2308 opcode = TGSI_OPCODE_TXB;
2309 ir->lod_info.bias->accept(this);
2310 lod_info = this->result;
2311 break;
2312 case ir_txl:
2313 opcode = TGSI_OPCODE_TXL;
2314 ir->lod_info.lod->accept(this);
2315 lod_info = this->result;
2316 break;
2317 case ir_txd:
2318 opcode = TGSI_OPCODE_TXD;
2319 ir->lod_info.grad.dPdx->accept(this);
2320 dx = this->result;
2321 ir->lod_info.grad.dPdy->accept(this);
2322 dy = this->result;
2323 break;
2324 case ir_txf: /* TODO: use TGSI_OPCODE_TXF here */
2325 assert(!"GLSL 1.30 features unsupported");
2326 break;
2327 }
2328
2329 if (ir->projector) {
2330 if (opcode == TGSI_OPCODE_TEX) {
2331 /* Slot the projector in as the last component of the coord. */
2332 coord_dst.writemask = WRITEMASK_W;
2333 emit(ir, TGSI_OPCODE_MOV, coord_dst, projector);
2334 coord_dst.writemask = WRITEMASK_XYZW;
2335 opcode = TGSI_OPCODE_TXP;
2336 } else {
2337 st_src_reg coord_w = coord;
2338 coord_w.swizzle = SWIZZLE_WWWW;
2339
2340 /* For the other TEX opcodes there's no projective version
2341 * since the last slot is taken up by LOD info. Do the
2342 * projective divide now.
2343 */
2344 coord_dst.writemask = WRITEMASK_W;
2345 emit(ir, TGSI_OPCODE_RCP, coord_dst, projector);
2346
2347 /* In the case where we have to project the coordinates "by hand,"
2348 * the shadow comparator value must also be projected.
2349 */
2350 st_src_reg tmp_src = coord;
2351 if (ir->shadow_comparitor) {
2352 /* Slot the shadow value in as the second to last component of the
2353 * coord.
2354 */
2355 ir->shadow_comparitor->accept(this);
2356
2357 tmp_src = get_temp(glsl_type::vec4_type);
2358 st_dst_reg tmp_dst = st_dst_reg(tmp_src);
2359
2360 tmp_dst.writemask = WRITEMASK_Z;
2361 emit(ir, TGSI_OPCODE_MOV, tmp_dst, this->result);
2362
2363 tmp_dst.writemask = WRITEMASK_XY;
2364 emit(ir, TGSI_OPCODE_MOV, tmp_dst, coord);
2365 }
2366
2367 coord_dst.writemask = WRITEMASK_XYZ;
2368 emit(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w);
2369
2370 coord_dst.writemask = WRITEMASK_XYZW;
2371 coord.swizzle = SWIZZLE_XYZW;
2372 }
2373 }
2374
2375 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow
2376 * comparator was put in the correct place (and projected) by the code,
2377 * above, that handles by-hand projection.
2378 */
2379 if (ir->shadow_comparitor && (!ir->projector || opcode == TGSI_OPCODE_TXP)) {
2380 /* Slot the shadow value in as the second to last component of the
2381 * coord.
2382 */
2383 ir->shadow_comparitor->accept(this);
2384 coord_dst.writemask = WRITEMASK_Z;
2385 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
2386 coord_dst.writemask = WRITEMASK_XYZW;
2387 }
2388
2389 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB) {
2390 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
2391 coord_dst.writemask = WRITEMASK_W;
2392 emit(ir, TGSI_OPCODE_MOV, coord_dst, lod_info);
2393 coord_dst.writemask = WRITEMASK_XYZW;
2394 }
2395
2396 if (opcode == TGSI_OPCODE_TXD)
2397 inst = emit(ir, opcode, result_dst, coord, dx, dy);
2398 else
2399 inst = emit(ir, opcode, result_dst, coord);
2400
2401 if (ir->shadow_comparitor)
2402 inst->tex_shadow = GL_TRUE;
2403
2404 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler,
2405 this->shader_program,
2406 this->prog);
2407
2408 const glsl_type *sampler_type = ir->sampler->type;
2409
2410 switch (sampler_type->sampler_dimensionality) {
2411 case GLSL_SAMPLER_DIM_1D:
2412 inst->tex_target = (sampler_type->sampler_array)
2413 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
2414 break;
2415 case GLSL_SAMPLER_DIM_2D:
2416 inst->tex_target = (sampler_type->sampler_array)
2417 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
2418 break;
2419 case GLSL_SAMPLER_DIM_3D:
2420 inst->tex_target = TEXTURE_3D_INDEX;
2421 break;
2422 case GLSL_SAMPLER_DIM_CUBE:
2423 inst->tex_target = TEXTURE_CUBE_INDEX;
2424 break;
2425 case GLSL_SAMPLER_DIM_RECT:
2426 inst->tex_target = TEXTURE_RECT_INDEX;
2427 break;
2428 case GLSL_SAMPLER_DIM_BUF:
2429 assert(!"FINISHME: Implement ARB_texture_buffer_object");
2430 break;
2431 default:
2432 assert(!"Should not get here.");
2433 }
2434
2435 this->result = result_src;
2436 }
2437
2438 void
2439 glsl_to_tgsi_visitor::visit(ir_return *ir)
2440 {
2441 if (ir->get_value()) {
2442 st_dst_reg l;
2443 int i;
2444
2445 assert(current_function);
2446
2447 ir->get_value()->accept(this);
2448 st_src_reg r = this->result;
2449
2450 l = st_dst_reg(current_function->return_reg);
2451
2452 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
2453 emit(ir, TGSI_OPCODE_MOV, l, r);
2454 l.index++;
2455 r.index++;
2456 }
2457 }
2458
2459 emit(ir, TGSI_OPCODE_RET);
2460 }
2461
2462 void
2463 glsl_to_tgsi_visitor::visit(ir_discard *ir)
2464 {
2465 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
2466
2467 if (ir->condition) {
2468 ir->condition->accept(this);
2469 this->result.negate = ~this->result.negate;
2470 emit(ir, TGSI_OPCODE_KIL, undef_dst, this->result);
2471 } else {
2472 emit(ir, TGSI_OPCODE_KILP);
2473 }
2474
2475 fp->UsesKill = GL_TRUE;
2476 }
2477
2478 void
2479 glsl_to_tgsi_visitor::visit(ir_if *ir)
2480 {
2481 glsl_to_tgsi_instruction *cond_inst, *if_inst;
2482 glsl_to_tgsi_instruction *prev_inst;
2483
2484 prev_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2485
2486 ir->condition->accept(this);
2487 assert(this->result.file != PROGRAM_UNDEFINED);
2488
2489 if (this->options->EmitCondCodes) {
2490 cond_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
2491
2492 /* See if we actually generated any instruction for generating
2493 * the condition. If not, then cook up a move to a temp so we
2494 * have something to set cond_update on.
2495 */
2496 if (cond_inst == prev_inst) {
2497 st_src_reg temp = get_temp(glsl_type::bool_type);
2498 cond_inst = emit(ir->condition, TGSI_OPCODE_MOV, st_dst_reg(temp), result);
2499 }
2500 cond_inst->cond_update = GL_TRUE;
2501
2502 if_inst = emit(ir->condition, TGSI_OPCODE_IF);
2503 if_inst->dst.cond_mask = COND_NE;
2504 } else {
2505 if_inst = emit(ir->condition, TGSI_OPCODE_IF, undef_dst, this->result);
2506 }
2507
2508 this->instructions.push_tail(if_inst);
2509
2510 visit_exec_list(&ir->then_instructions, this);
2511
2512 if (!ir->else_instructions.is_empty()) {
2513 emit(ir->condition, TGSI_OPCODE_ELSE);
2514 visit_exec_list(&ir->else_instructions, this);
2515 }
2516
2517 if_inst = emit(ir->condition, TGSI_OPCODE_ENDIF);
2518 }
2519
2520 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
2521 {
2522 result.file = PROGRAM_UNDEFINED;
2523 next_temp = 1;
2524 next_signature_id = 1;
2525 num_immediates = 0;
2526 current_function = NULL;
2527 num_address_regs = 0;
2528 indirect_addr_temps = false;
2529 indirect_addr_consts = false;
2530 mem_ctx = ralloc_context(NULL);
2531 }
2532
2533 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
2534 {
2535 ralloc_free(mem_ctx);
2536 }
2537
2538 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v)
2539 {
2540 delete v;
2541 }
2542
2543
2544 /**
2545 * Count resources used by the given gpu program (number of texture
2546 * samplers, etc).
2547 */
2548 static void
2549 count_resources(glsl_to_tgsi_visitor *v, gl_program *prog)
2550 {
2551 v->samplers_used = 0;
2552
2553 foreach_iter(exec_list_iterator, iter, v->instructions) {
2554 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2555
2556 if (is_tex_instruction(inst->op)) {
2557 v->samplers_used |= 1 << inst->sampler;
2558
2559 prog->SamplerTargets[inst->sampler] =
2560 (gl_texture_index)inst->tex_target;
2561 if (inst->tex_shadow) {
2562 prog->ShadowSamplers |= 1 << inst->sampler;
2563 }
2564 }
2565 }
2566
2567 prog->SamplersUsed = v->samplers_used;
2568 _mesa_update_shader_textures_used(prog);
2569 }
2570
2571
2572 /**
2573 * Check if the given vertex/fragment/shader program is within the
2574 * resource limits of the context (number of texture units, etc).
2575 * If any of those checks fail, record a linker error.
2576 *
2577 * XXX more checks are needed...
2578 */
2579 static void
2580 check_resources(const struct gl_context *ctx,
2581 struct gl_shader_program *shader_program,
2582 glsl_to_tgsi_visitor *prog,
2583 struct gl_program *proginfo)
2584 {
2585 switch (proginfo->Target) {
2586 case GL_VERTEX_PROGRAM_ARB:
2587 if (_mesa_bitcount(prog->samplers_used) >
2588 ctx->Const.MaxVertexTextureImageUnits) {
2589 fail_link(shader_program, "Too many vertex shader texture samplers");
2590 }
2591 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2592 fail_link(shader_program, "Too many vertex shader constants");
2593 }
2594 break;
2595 case MESA_GEOMETRY_PROGRAM:
2596 if (_mesa_bitcount(prog->samplers_used) >
2597 ctx->Const.MaxGeometryTextureImageUnits) {
2598 fail_link(shader_program, "Too many geometry shader texture samplers");
2599 }
2600 if (proginfo->Parameters->NumParameters >
2601 MAX_GEOMETRY_UNIFORM_COMPONENTS / 4) {
2602 fail_link(shader_program, "Too many geometry shader constants");
2603 }
2604 break;
2605 case GL_FRAGMENT_PROGRAM_ARB:
2606 if (_mesa_bitcount(prog->samplers_used) >
2607 ctx->Const.MaxTextureImageUnits) {
2608 fail_link(shader_program, "Too many fragment shader texture samplers");
2609 }
2610 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) {
2611 fail_link(shader_program, "Too many fragment shader constants");
2612 }
2613 break;
2614 default:
2615 _mesa_problem(ctx, "unexpected program type in check_resources()");
2616 }
2617 }
2618
2619
2620
2621 struct uniform_sort {
2622 struct gl_uniform *u;
2623 int pos;
2624 };
2625
2626 /* The shader_program->Uniforms list is almost sorted in increasing
2627 * uniform->{Frag,Vert}Pos locations, but not quite when there are
2628 * uniforms shared between targets. We need to add parameters in
2629 * increasing order for the targets.
2630 */
2631 static int
2632 sort_uniforms(const void *a, const void *b)
2633 {
2634 struct uniform_sort *u1 = (struct uniform_sort *)a;
2635 struct uniform_sort *u2 = (struct uniform_sort *)b;
2636
2637 return u1->pos - u2->pos;
2638 }
2639
2640 /* Add the uniforms to the parameters. The linker chose locations
2641 * in our parameters lists (which weren't created yet), which the
2642 * uniforms code will use to poke values into our parameters list
2643 * when uniforms are updated.
2644 */
2645 static void
2646 add_uniforms_to_parameters_list(struct gl_shader_program *shader_program,
2647 struct gl_shader *shader,
2648 struct gl_program *prog)
2649 {
2650 unsigned int i;
2651 unsigned int next_sampler = 0, num_uniforms = 0;
2652 struct uniform_sort *sorted_uniforms;
2653
2654 sorted_uniforms = ralloc_array(NULL, struct uniform_sort,
2655 shader_program->Uniforms->NumUniforms);
2656
2657 for (i = 0; i < shader_program->Uniforms->NumUniforms; i++) {
2658 struct gl_uniform *uniform = shader_program->Uniforms->Uniforms + i;
2659 int parameter_index = -1;
2660
2661 switch (shader->Type) {
2662 case GL_VERTEX_SHADER:
2663 parameter_index = uniform->VertPos;
2664 break;
2665 case GL_FRAGMENT_SHADER:
2666 parameter_index = uniform->FragPos;
2667 break;
2668 case GL_GEOMETRY_SHADER:
2669 parameter_index = uniform->GeomPos;
2670 break;
2671 }
2672
2673 /* Only add uniforms used in our target. */
2674 if (parameter_index != -1) {
2675 sorted_uniforms[num_uniforms].pos = parameter_index;
2676 sorted_uniforms[num_uniforms].u = uniform;
2677 num_uniforms++;
2678 }
2679 }
2680
2681 qsort(sorted_uniforms, num_uniforms, sizeof(struct uniform_sort),
2682 sort_uniforms);
2683
2684 for (i = 0; i < num_uniforms; i++) {
2685 struct gl_uniform *uniform = sorted_uniforms[i].u;
2686 int parameter_index = sorted_uniforms[i].pos;
2687 const glsl_type *type = uniform->Type;
2688 unsigned int size;
2689
2690 if (type->is_vector() ||
2691 type->is_scalar()) {
2692 size = type->vector_elements;
2693 } else {
2694 size = type_size(type) * 4;
2695 }
2696
2697 gl_register_file file;
2698 if (type->is_sampler() ||
2699 (type->is_array() && type->fields.array->is_sampler())) {
2700 file = PROGRAM_SAMPLER;
2701 } else {
2702 file = PROGRAM_UNIFORM;
2703 }
2704
2705 GLint index = _mesa_lookup_parameter_index(prog->Parameters, -1,
2706 uniform->Name);
2707
2708 if (index < 0) {
2709 index = _mesa_add_parameter(prog->Parameters, file,
2710 uniform->Name, size, type->gl_type,
2711 NULL, NULL, 0x0);
2712
2713 /* Sampler uniform values are stored in prog->SamplerUnits,
2714 * and the entry in that array is selected by this index we
2715 * store in ParameterValues[].
2716 */
2717 if (file == PROGRAM_SAMPLER) {
2718 for (unsigned int j = 0; j < size / 4; j++)
2719 prog->Parameters->ParameterValues[index + j][0].f = next_sampler++;
2720 }
2721
2722 /* The location chosen in the Parameters list here (returned
2723 * from _mesa_add_uniform) has to match what the linker chose.
2724 */
2725 if (index != parameter_index) {
2726 fail_link(shader_program, "Allocation of uniform `%s' to target "
2727 "failed (%d vs %d)\n",
2728 uniform->Name, index, parameter_index);
2729 }
2730 }
2731 }
2732
2733 ralloc_free(sorted_uniforms);
2734 }
2735
2736 static void
2737 set_uniform_initializer(struct gl_context *ctx, void *mem_ctx,
2738 struct gl_shader_program *shader_program,
2739 const char *name, const glsl_type *type,
2740 ir_constant *val)
2741 {
2742 if (type->is_record()) {
2743 ir_constant *field_constant;
2744
2745 field_constant = (ir_constant *)val->components.get_head();
2746
2747 for (unsigned int i = 0; i < type->length; i++) {
2748 const glsl_type *field_type = type->fields.structure[i].type;
2749 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
2750 type->fields.structure[i].name);
2751 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name,
2752 field_type, field_constant);
2753 field_constant = (ir_constant *)field_constant->next;
2754 }
2755 return;
2756 }
2757
2758 int loc = _mesa_get_uniform_location(ctx, shader_program, name);
2759
2760 if (loc == -1) {
2761 fail_link(shader_program,
2762 "Couldn't find uniform for initializer %s\n", name);
2763 return;
2764 }
2765
2766 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) {
2767 ir_constant *element;
2768 const glsl_type *element_type;
2769 if (type->is_array()) {
2770 element = val->array_elements[i];
2771 element_type = type->fields.array;
2772 } else {
2773 element = val;
2774 element_type = type;
2775 }
2776
2777 void *values;
2778
2779 if (element_type->base_type == GLSL_TYPE_BOOL) {
2780 int *conv = ralloc_array(mem_ctx, int, element_type->components());
2781 for (unsigned int j = 0; j < element_type->components(); j++) {
2782 conv[j] = element->value.b[j];
2783 }
2784 values = (void *)conv;
2785 element_type = glsl_type::get_instance(GLSL_TYPE_INT,
2786 element_type->vector_elements,
2787 1);
2788 } else {
2789 values = &element->value;
2790 }
2791
2792 if (element_type->is_matrix()) {
2793 _mesa_uniform_matrix(ctx, shader_program,
2794 element_type->matrix_columns,
2795 element_type->vector_elements,
2796 loc, 1, GL_FALSE, (GLfloat *)values);
2797 loc += element_type->matrix_columns;
2798 } else {
2799 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns,
2800 values, element_type->gl_type);
2801 loc += type_size(element_type);
2802 }
2803 }
2804 }
2805
2806 static void
2807 set_uniform_initializers(struct gl_context *ctx,
2808 struct gl_shader_program *shader_program)
2809 {
2810 void *mem_ctx = NULL;
2811
2812 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2813 struct gl_shader *shader = shader_program->_LinkedShaders[i];
2814
2815 if (shader == NULL)
2816 continue;
2817
2818 foreach_iter(exec_list_iterator, iter, *shader->ir) {
2819 ir_instruction *ir = (ir_instruction *)iter.get();
2820 ir_variable *var = ir->as_variable();
2821
2822 if (!var || var->mode != ir_var_uniform || !var->constant_value)
2823 continue;
2824
2825 if (!mem_ctx)
2826 mem_ctx = ralloc_context(NULL);
2827
2828 set_uniform_initializer(ctx, mem_ctx, shader_program, var->name,
2829 var->type, var->constant_value);
2830 }
2831 }
2832
2833 ralloc_free(mem_ctx);
2834 }
2835
2836 /*
2837 * Scan/rewrite program to remove reads of custom (output) registers.
2838 * The passed type has to be either PROGRAM_OUTPUT or PROGRAM_VARYING
2839 * (for vertex shaders).
2840 * In GLSL shaders, varying vars can be read and written.
2841 * On some hardware, trying to read an output register causes trouble.
2842 * So, rewrite the program to use a temporary register in this case.
2843 *
2844 * Based on _mesa_remove_output_reads from programopt.c.
2845 */
2846 void
2847 glsl_to_tgsi_visitor::remove_output_reads(gl_register_file type)
2848 {
2849 GLuint i;
2850 GLint outputMap[VERT_RESULT_MAX];
2851 GLint outputTypes[VERT_RESULT_MAX];
2852 GLuint numVaryingReads = 0;
2853 GLboolean usedTemps[MAX_TEMPS];
2854 GLuint firstTemp = 0;
2855
2856 _mesa_find_used_registers(prog, PROGRAM_TEMPORARY,
2857 usedTemps, MAX_TEMPS);
2858
2859 assert(type == PROGRAM_VARYING || type == PROGRAM_OUTPUT);
2860 assert(prog->Target == GL_VERTEX_PROGRAM_ARB || type != PROGRAM_VARYING);
2861
2862 for (i = 0; i < VERT_RESULT_MAX; i++)
2863 outputMap[i] = -1;
2864
2865 /* look for instructions which read from varying vars */
2866 foreach_iter(exec_list_iterator, iter, this->instructions) {
2867 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2868 const GLuint numSrc = num_inst_src_regs(inst->op);
2869 GLuint j;
2870 for (j = 0; j < numSrc; j++) {
2871 if (inst->src[j].file == type) {
2872 /* replace the read with a temp reg */
2873 const GLuint var = inst->src[j].index;
2874 if (outputMap[var] == -1) {
2875 numVaryingReads++;
2876 outputMap[var] = _mesa_find_free_register(usedTemps,
2877 MAX_TEMPS,
2878 firstTemp);
2879 outputTypes[var] = inst->src[j].type;
2880 firstTemp = outputMap[var] + 1;
2881 }
2882 inst->src[j].file = PROGRAM_TEMPORARY;
2883 inst->src[j].index = outputMap[var];
2884 }
2885 }
2886 }
2887
2888 if (numVaryingReads == 0)
2889 return; /* nothing to be done */
2890
2891 /* look for instructions which write to the varying vars identified above */
2892 foreach_iter(exec_list_iterator, iter, this->instructions) {
2893 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2894 if (inst->dst.file == type && outputMap[inst->dst.index] >= 0) {
2895 /* change inst to write to the temp reg, instead of the varying */
2896 inst->dst.file = PROGRAM_TEMPORARY;
2897 inst->dst.index = outputMap[inst->dst.index];
2898 }
2899 }
2900
2901 /* insert new MOV instructions at the end */
2902 for (i = 0; i < VERT_RESULT_MAX; i++) {
2903 if (outputMap[i] >= 0) {
2904 /* MOV VAR[i], TEMP[tmp]; */
2905 st_src_reg src = st_src_reg(PROGRAM_TEMPORARY, outputMap[i], outputTypes[i]);
2906 st_dst_reg dst = st_dst_reg(type, WRITEMASK_XYZW, outputTypes[i]);
2907 dst.index = i;
2908 this->emit(NULL, TGSI_OPCODE_MOV, dst, src);
2909 }
2910 }
2911 }
2912
2913 /**
2914 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
2915 * are read from the given src in this instruction
2916 */
2917 static int
2918 get_src_arg_mask(st_dst_reg dst, st_src_reg src)
2919 {
2920 int read_mask = 0, comp;
2921
2922 /* Now, given the src swizzle and the written channels, find which
2923 * components are actually read
2924 */
2925 for (comp = 0; comp < 4; ++comp) {
2926 const unsigned coord = GET_SWZ(src.swizzle, comp);
2927 ASSERT(coord < 4);
2928 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W)
2929 read_mask |= 1 << coord;
2930 }
2931
2932 return read_mask;
2933 }
2934
2935 /**
2936 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
2937 * instruction is the first instruction to write to register T0. There are
2938 * several lowering passes done in GLSL IR (e.g. branches and
2939 * relative addressing) that create a large number of conditional assignments
2940 * that ir_to_mesa converts to CMP instructions like the one mentioned above.
2941 *
2942 * Here is why this conversion is safe:
2943 * CMP T0, T1 T2 T0 can be expanded to:
2944 * if (T1 < 0.0)
2945 * MOV T0, T2;
2946 * else
2947 * MOV T0, T0;
2948 *
2949 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
2950 * as the original program. If (T1 < 0.0) evaluates to false, executing
2951 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
2952 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
2953 * because any instruction that was going to read from T0 after this was going
2954 * to read a garbage value anyway.
2955 */
2956 void
2957 glsl_to_tgsi_visitor::simplify_cmp(void)
2958 {
2959 unsigned tempWrites[MAX_TEMPS];
2960 unsigned outputWrites[MAX_PROGRAM_OUTPUTS];
2961
2962 memset(tempWrites, 0, sizeof(tempWrites));
2963 memset(outputWrites, 0, sizeof(outputWrites));
2964
2965 foreach_iter(exec_list_iterator, iter, this->instructions) {
2966 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
2967 unsigned prevWriteMask = 0;
2968
2969 /* Give up if we encounter relative addressing or flow control. */
2970 if (inst->dst.reladdr ||
2971 tgsi_get_opcode_info(inst->op)->is_branch ||
2972 inst->op == TGSI_OPCODE_BGNSUB ||
2973 inst->op == TGSI_OPCODE_CONT ||
2974 inst->op == TGSI_OPCODE_END ||
2975 inst->op == TGSI_OPCODE_ENDSUB ||
2976 inst->op == TGSI_OPCODE_RET) {
2977 return;
2978 }
2979
2980 if (inst->dst.file == PROGRAM_OUTPUT) {
2981 assert(inst->dst.index < MAX_PROGRAM_OUTPUTS);
2982 prevWriteMask = outputWrites[inst->dst.index];
2983 outputWrites[inst->dst.index] |= inst->dst.writemask;
2984 } else if (inst->dst.file == PROGRAM_TEMPORARY) {
2985 assert(inst->dst.index < MAX_TEMPS);
2986 prevWriteMask = tempWrites[inst->dst.index];
2987 tempWrites[inst->dst.index] |= inst->dst.writemask;
2988 }
2989
2990 /* For a CMP to be considered a conditional write, the destination
2991 * register and source register two must be the same. */
2992 if (inst->op == TGSI_OPCODE_CMP
2993 && !(inst->dst.writemask & prevWriteMask)
2994 && inst->src[2].file == inst->dst.file
2995 && inst->src[2].index == inst->dst.index
2996 && inst->dst.writemask == get_src_arg_mask(inst->dst, inst->src[2])) {
2997
2998 inst->op = TGSI_OPCODE_MOV;
2999 inst->src[0] = inst->src[1];
3000 }
3001 }
3002 }
3003
3004 /* Replaces all references to a temporary register index with another index. */
3005 void
3006 glsl_to_tgsi_visitor::rename_temp_register(int index, int new_index)
3007 {
3008 foreach_iter(exec_list_iterator, iter, this->instructions) {
3009 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3010 unsigned j;
3011
3012 for (j=0; j < num_inst_src_regs(inst->op); j++) {
3013 if (inst->src[j].file == PROGRAM_TEMPORARY &&
3014 inst->src[j].index == index) {
3015 inst->src[j].index = new_index;
3016 }
3017 }
3018
3019 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
3020 inst->dst.index = new_index;
3021 }
3022 }
3023 }
3024
3025 int
3026 glsl_to_tgsi_visitor::get_first_temp_read(int index)
3027 {
3028 int depth = 0; /* loop depth */
3029 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
3030 unsigned i = 0, j;
3031
3032 foreach_iter(exec_list_iterator, iter, this->instructions) {
3033 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3034
3035 for (j=0; j < num_inst_src_regs(inst->op); j++) {
3036 if (inst->src[j].file == PROGRAM_TEMPORARY &&
3037 inst->src[j].index == index) {
3038 return (depth == 0) ? i : loop_start;
3039 }
3040 }
3041
3042 if (inst->op == TGSI_OPCODE_BGNLOOP) {
3043 if(depth++ == 0)
3044 loop_start = i;
3045 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
3046 if (--depth == 0)
3047 loop_start = -1;
3048 }
3049 assert(depth >= 0);
3050
3051 i++;
3052 }
3053
3054 return -1;
3055 }
3056
3057 int
3058 glsl_to_tgsi_visitor::get_first_temp_write(int index)
3059 {
3060 int depth = 0; /* loop depth */
3061 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
3062 int i = 0;
3063
3064 foreach_iter(exec_list_iterator, iter, this->instructions) {
3065 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3066
3067 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) {
3068 return (depth == 0) ? i : loop_start;
3069 }
3070
3071 if (inst->op == TGSI_OPCODE_BGNLOOP) {
3072 if(depth++ == 0)
3073 loop_start = i;
3074 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
3075 if (--depth == 0)
3076 loop_start = -1;
3077 }
3078 assert(depth >= 0);
3079
3080 i++;
3081 }
3082
3083 return -1;
3084 }
3085
3086 int
3087 glsl_to_tgsi_visitor::get_last_temp_read(int index)
3088 {
3089 int depth = 0; /* loop depth */
3090 int last = -1; /* index of last instruction that reads the temporary */
3091 unsigned i = 0, j;
3092
3093 foreach_iter(exec_list_iterator, iter, this->instructions) {
3094 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3095
3096 for (j=0; j < num_inst_src_regs(inst->op); j++) {
3097 if (inst->src[j].file == PROGRAM_TEMPORARY &&
3098 inst->src[j].index == index) {
3099 last = (depth == 0) ? i : -2;
3100 }
3101 }
3102
3103 if (inst->op == TGSI_OPCODE_BGNLOOP)
3104 depth++;
3105 else if (inst->op == TGSI_OPCODE_ENDLOOP)
3106 if (--depth == 0 && last == -2)
3107 last = i;
3108 assert(depth >= 0);
3109
3110 i++;
3111 }
3112
3113 assert(last >= -1);
3114 return last;
3115 }
3116
3117 int
3118 glsl_to_tgsi_visitor::get_last_temp_write(int index)
3119 {
3120 int depth = 0; /* loop depth */
3121 int last = -1; /* index of last instruction that writes to the temporary */
3122 int i = 0;
3123
3124 foreach_iter(exec_list_iterator, iter, this->instructions) {
3125 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3126
3127 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index)
3128 last = (depth == 0) ? i : -2;
3129
3130 if (inst->op == TGSI_OPCODE_BGNLOOP)
3131 depth++;
3132 else if (inst->op == TGSI_OPCODE_ENDLOOP)
3133 if (--depth == 0 && last == -2)
3134 last = i;
3135 assert(depth >= 0);
3136
3137 i++;
3138 }
3139
3140 assert(last >= -1);
3141 return last;
3142 }
3143
3144 /*
3145 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
3146 * channels for copy propagation and updates following instructions to
3147 * use the original versions.
3148 *
3149 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3150 * will occur. As an example, a TXP production before this pass:
3151 *
3152 * 0: MOV TEMP[1], INPUT[4].xyyy;
3153 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3154 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
3155 *
3156 * and after:
3157 *
3158 * 0: MOV TEMP[1], INPUT[4].xyyy;
3159 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3160 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3161 *
3162 * which allows for dead code elimination on TEMP[1]'s writes.
3163 */
3164 void
3165 glsl_to_tgsi_visitor::copy_propagate(void)
3166 {
3167 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx,
3168 glsl_to_tgsi_instruction *,
3169 this->next_temp * 4);
3170 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
3171 int level = 0;
3172
3173 foreach_iter(exec_list_iterator, iter, this->instructions) {
3174 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3175
3176 assert(inst->dst.file != PROGRAM_TEMPORARY
3177 || inst->dst.index < this->next_temp);
3178
3179 /* First, do any copy propagation possible into the src regs. */
3180 for (int r = 0; r < 3; r++) {
3181 glsl_to_tgsi_instruction *first = NULL;
3182 bool good = true;
3183 int acp_base = inst->src[r].index * 4;
3184
3185 if (inst->src[r].file != PROGRAM_TEMPORARY ||
3186 inst->src[r].reladdr)
3187 continue;
3188
3189 /* See if we can find entries in the ACP consisting of MOVs
3190 * from the same src register for all the swizzled channels
3191 * of this src register reference.
3192 */
3193 for (int i = 0; i < 4; i++) {
3194 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3195 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan];
3196
3197 if (!copy_chan) {
3198 good = false;
3199 break;
3200 }
3201
3202 assert(acp_level[acp_base + src_chan] <= level);
3203
3204 if (!first) {
3205 first = copy_chan;
3206 } else {
3207 if (first->src[0].file != copy_chan->src[0].file ||
3208 first->src[0].index != copy_chan->src[0].index) {
3209 good = false;
3210 break;
3211 }
3212 }
3213 }
3214
3215 if (good) {
3216 /* We've now validated that we can copy-propagate to
3217 * replace this src register reference. Do it.
3218 */
3219 inst->src[r].file = first->src[0].file;
3220 inst->src[r].index = first->src[0].index;
3221
3222 int swizzle = 0;
3223 for (int i = 0; i < 4; i++) {
3224 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
3225 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan];
3226 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) <<
3227 (3 * i));
3228 }
3229 inst->src[r].swizzle = swizzle;
3230 }
3231 }
3232
3233 switch (inst->op) {
3234 case TGSI_OPCODE_BGNLOOP:
3235 case TGSI_OPCODE_ENDLOOP:
3236 /* End of a basic block, clear the ACP entirely. */
3237 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3238 break;
3239
3240 case TGSI_OPCODE_IF:
3241 ++level;
3242 break;
3243
3244 case TGSI_OPCODE_ENDIF:
3245 case TGSI_OPCODE_ELSE:
3246 /* Clear all channels written inside the block from the ACP, but
3247 * leaving those that were not touched.
3248 */
3249 for (int r = 0; r < this->next_temp; r++) {
3250 for (int c = 0; c < 4; c++) {
3251 if (!acp[4 * r + c])
3252 continue;
3253
3254 if (acp_level[4 * r + c] >= level)
3255 acp[4 * r + c] = NULL;
3256 }
3257 }
3258 if (inst->op == TGSI_OPCODE_ENDIF)
3259 --level;
3260 break;
3261
3262 default:
3263 /* Continuing the block, clear any written channels from
3264 * the ACP.
3265 */
3266 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) {
3267 /* Any temporary might be written, so no copy propagation
3268 * across this instruction.
3269 */
3270 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
3271 } else if (inst->dst.file == PROGRAM_OUTPUT &&
3272 inst->dst.reladdr) {
3273 /* Any output might be written, so no copy propagation
3274 * from outputs across this instruction.
3275 */
3276 for (int r = 0; r < this->next_temp; r++) {
3277 for (int c = 0; c < 4; c++) {
3278 if (!acp[4 * r + c])
3279 continue;
3280
3281 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
3282 acp[4 * r + c] = NULL;
3283 }
3284 }
3285 } else if (inst->dst.file == PROGRAM_TEMPORARY ||
3286 inst->dst.file == PROGRAM_OUTPUT) {
3287 /* Clear where it's used as dst. */
3288 if (inst->dst.file == PROGRAM_TEMPORARY) {
3289 for (int c = 0; c < 4; c++) {
3290 if (inst->dst.writemask & (1 << c)) {
3291 acp[4 * inst->dst.index + c] = NULL;
3292 }
3293 }
3294 }
3295
3296 /* Clear where it's used as src. */
3297 for (int r = 0; r < this->next_temp; r++) {
3298 for (int c = 0; c < 4; c++) {
3299 if (!acp[4 * r + c])
3300 continue;
3301
3302 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
3303
3304 if (acp[4 * r + c]->src[0].file == inst->dst.file &&
3305 acp[4 * r + c]->src[0].index == inst->dst.index &&
3306 inst->dst.writemask & (1 << src_chan))
3307 {
3308 acp[4 * r + c] = NULL;
3309 }
3310 }
3311 }
3312 }
3313 break;
3314 }
3315
3316 /* If this is a copy, add it to the ACP. */
3317 if (inst->op == TGSI_OPCODE_MOV &&
3318 inst->dst.file == PROGRAM_TEMPORARY &&
3319 !inst->dst.reladdr &&
3320 !inst->saturate &&
3321 !inst->src[0].reladdr &&
3322 !inst->src[0].negate) {
3323 for (int i = 0; i < 4; i++) {
3324 if (inst->dst.writemask & (1 << i)) {
3325 acp[4 * inst->dst.index + i] = inst;
3326 acp_level[4 * inst->dst.index + i] = level;
3327 }
3328 }
3329 }
3330 }
3331
3332 ralloc_free(acp_level);
3333 ralloc_free(acp);
3334 }
3335
3336 /*
3337 * Tracks available PROGRAM_TEMPORARY registers for dead code elimination.
3338 *
3339 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3340 * will occur. As an example, a TXP production after copy propagation but
3341 * before this pass:
3342 *
3343 * 0: MOV TEMP[1], INPUT[4].xyyy;
3344 * 1: MOV TEMP[1].w, INPUT[4].wwww;
3345 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3346 *
3347 * and after this pass:
3348 *
3349 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
3350 *
3351 * FIXME: assumes that all functions are inlined (no support for BGNSUB/ENDSUB)
3352 * FIXME: doesn't eliminate all dead code inside of loops; it steps around them
3353 */
3354 void
3355 glsl_to_tgsi_visitor::eliminate_dead_code(void)
3356 {
3357 int i;
3358
3359 for (i=0; i < this->next_temp; i++) {
3360 int last_read = get_last_temp_read(i);
3361 int j = 0;
3362
3363 foreach_iter(exec_list_iterator, iter, this->instructions) {
3364 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3365
3366 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == i &&
3367 j > last_read)
3368 {
3369 iter.remove();
3370 delete inst;
3371 }
3372
3373 j++;
3374 }
3375 }
3376 }
3377
3378 /*
3379 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
3380 * code elimination. This is less primitive than eliminate_dead_code(), as it
3381 * is per-channel and can detect consecutive writes without a read between them
3382 * as dead code. However, there is some dead code that can be eliminated by
3383 * eliminate_dead_code() but not this function - for example, this function
3384 * cannot eliminate an instruction writing to a register that is never read and
3385 * is the only instruction writing to that register.
3386 *
3387 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
3388 * will occur.
3389 */
3390 int
3391 glsl_to_tgsi_visitor::eliminate_dead_code_advanced(void)
3392 {
3393 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx,
3394 glsl_to_tgsi_instruction *,
3395 this->next_temp * 4);
3396 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
3397 int level = 0;
3398 int removed = 0;
3399
3400 foreach_iter(exec_list_iterator, iter, this->instructions) {
3401 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3402
3403 assert(inst->dst.file != PROGRAM_TEMPORARY
3404 || inst->dst.index < this->next_temp);
3405
3406 switch (inst->op) {
3407 case TGSI_OPCODE_BGNLOOP:
3408 case TGSI_OPCODE_ENDLOOP:
3409 /* End of a basic block, clear the write array entirely.
3410 * FIXME: This keeps us from killing dead code when the writes are
3411 * on either side of a loop, even when the register isn't touched
3412 * inside the loop.
3413 */
3414 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
3415 break;
3416
3417 case TGSI_OPCODE_ENDIF:
3418 --level;
3419 break;
3420
3421 case TGSI_OPCODE_ELSE:
3422 /* Clear all channels written inside the preceding if block from the
3423 * write array, but leave those that were not touched.
3424 *
3425 * FIXME: This destroys opportunities to remove dead code inside of
3426 * IF blocks that are followed by an ELSE block.
3427 */
3428 for (int r = 0; r < this->next_temp; r++) {
3429 for (int c = 0; c < 4; c++) {
3430 if (!writes[4 * r + c])
3431 continue;
3432
3433 if (write_level[4 * r + c] >= level)
3434 writes[4 * r + c] = NULL;
3435 }
3436 }
3437 break;
3438
3439 case TGSI_OPCODE_IF:
3440 ++level;
3441 /* fallthrough to default case to mark the condition as read */
3442
3443 default:
3444 /* Continuing the block, clear any channels from the write array that
3445 * are read by this instruction.
3446 */
3447 for (unsigned i = 0; i < Elements(inst->src); i++) {
3448 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){
3449 /* Any temporary might be read, so no dead code elimination
3450 * across this instruction.
3451 */
3452 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
3453 } else if (inst->src[i].file == PROGRAM_TEMPORARY) {
3454 /* Clear where it's used as src. */
3455 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0);
3456 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1);
3457 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2);
3458 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3);
3459
3460 for (int c = 0; c < 4; c++) {
3461 if (src_chans & (1 << c)) {
3462 writes[4 * inst->src[i].index + c] = NULL;
3463 }
3464 }
3465 }
3466 }
3467 break;
3468 }
3469
3470 /* If this instruction writes to a temporary, add it to the write array.
3471 * If there is already an instruction in the write array for one or more
3472 * of the channels, flag that channel write as dead.
3473 */
3474 if (inst->dst.file == PROGRAM_TEMPORARY &&
3475 !inst->dst.reladdr &&
3476 !inst->saturate) {
3477 for (int c = 0; c < 4; c++) {
3478 if (inst->dst.writemask & (1 << c)) {
3479 if (writes[4 * inst->dst.index + c]) {
3480 if (write_level[4 * inst->dst.index + c] < level)
3481 continue;
3482 else
3483 writes[4 * inst->dst.index + c]->dead_mask |= (1 << c);
3484 }
3485 writes[4 * inst->dst.index + c] = inst;
3486 write_level[4 * inst->dst.index + c] = level;
3487 }
3488 }
3489 }
3490 }
3491
3492 /* Anything still in the write array at this point is dead code. */
3493 for (int r = 0; r < this->next_temp; r++) {
3494 for (int c = 0; c < 4; c++) {
3495 glsl_to_tgsi_instruction *inst = writes[4 * r + c];
3496 if (inst)
3497 inst->dead_mask |= (1 << c);
3498 }
3499 }
3500
3501 /* Now actually remove the instructions that are completely dead and update
3502 * the writemask of other instructions with dead channels.
3503 */
3504 foreach_iter(exec_list_iterator, iter, this->instructions) {
3505 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3506
3507 if (!inst->dead_mask || !inst->dst.writemask)
3508 continue;
3509 else if (inst->dead_mask == inst->dst.writemask) {
3510 iter.remove();
3511 delete inst;
3512 removed++;
3513 } else
3514 inst->dst.writemask &= ~(inst->dead_mask);
3515 }
3516
3517 ralloc_free(write_level);
3518 ralloc_free(writes);
3519
3520 return removed;
3521 }
3522
3523 /* Merges temporary registers together where possible to reduce the number of
3524 * registers needed to run a program.
3525 *
3526 * Produces optimal code only after copy propagation and dead code elimination
3527 * have been run. */
3528 void
3529 glsl_to_tgsi_visitor::merge_registers(void)
3530 {
3531 int *last_reads = rzalloc_array(mem_ctx, int, this->next_temp);
3532 int *first_writes = rzalloc_array(mem_ctx, int, this->next_temp);
3533 int i, j;
3534
3535 /* Read the indices of the last read and first write to each temp register
3536 * into an array so that we don't have to traverse the instruction list as
3537 * much. */
3538 for (i=0; i < this->next_temp; i++) {
3539 last_reads[i] = get_last_temp_read(i);
3540 first_writes[i] = get_first_temp_write(i);
3541 }
3542
3543 /* Start looking for registers with non-overlapping usages that can be
3544 * merged together. */
3545 for (i=0; i < this->next_temp; i++) {
3546 /* Don't touch unused registers. */
3547 if (last_reads[i] < 0 || first_writes[i] < 0) continue;
3548
3549 for (j=0; j < this->next_temp; j++) {
3550 /* Don't touch unused registers. */
3551 if (last_reads[j] < 0 || first_writes[j] < 0) continue;
3552
3553 /* We can merge the two registers if the first write to j is after or
3554 * in the same instruction as the last read from i. Note that the
3555 * register at index i will always be used earlier or at the same time
3556 * as the register at index j. */
3557 if (first_writes[i] <= first_writes[j] &&
3558 last_reads[i] <= first_writes[j])
3559 {
3560 rename_temp_register(j, i); /* Replace all references to j with i.*/
3561
3562 /* Update the first_writes and last_reads arrays with the new
3563 * values for the merged register index, and mark the newly unused
3564 * register index as such. */
3565 last_reads[i] = last_reads[j];
3566 first_writes[j] = -1;
3567 last_reads[j] = -1;
3568 }
3569 }
3570 }
3571
3572 ralloc_free(last_reads);
3573 ralloc_free(first_writes);
3574 }
3575
3576 /* Reassign indices to temporary registers by reusing unused indices created
3577 * by optimization passes. */
3578 void
3579 glsl_to_tgsi_visitor::renumber_registers(void)
3580 {
3581 int i = 0;
3582 int new_index = 0;
3583
3584 for (i=0; i < this->next_temp; i++) {
3585 if (get_first_temp_read(i) < 0) continue;
3586 if (i != new_index)
3587 rename_temp_register(i, new_index);
3588 new_index++;
3589 }
3590
3591 this->next_temp = new_index;
3592 }
3593
3594 /**
3595 * Returns a fragment program which implements the current pixel transfer ops.
3596 * Based on get_pixel_transfer_program in st_atom_pixeltransfer.c.
3597 */
3598 extern "C" void
3599 get_pixel_transfer_visitor(struct st_fragment_program *fp,
3600 glsl_to_tgsi_visitor *original,
3601 int scale_and_bias, int pixel_maps)
3602 {
3603 glsl_to_tgsi_visitor *v = new glsl_to_tgsi_visitor();
3604 struct st_context *st = st_context(original->ctx);
3605 struct gl_program *prog = &fp->Base.Base;
3606 struct gl_program_parameter_list *params = _mesa_new_parameter_list();
3607 st_src_reg coord, src0;
3608 st_dst_reg dst0;
3609 glsl_to_tgsi_instruction *inst;
3610
3611 /* Copy attributes of the glsl_to_tgsi_visitor in the original shader. */
3612 v->ctx = original->ctx;
3613 v->prog = prog;
3614 v->glsl_version = original->glsl_version;
3615 v->native_integers = original->native_integers;
3616 v->options = original->options;
3617 v->next_temp = original->next_temp;
3618 v->num_address_regs = original->num_address_regs;
3619 v->samplers_used = prog->SamplersUsed = original->samplers_used;
3620 v->indirect_addr_temps = original->indirect_addr_temps;
3621 v->indirect_addr_consts = original->indirect_addr_consts;
3622 memcpy(&v->immediates, &original->immediates, sizeof(v->immediates));
3623
3624 /*
3625 * Get initial pixel color from the texture.
3626 * TEX colorTemp, fragment.texcoord[0], texture[0], 2D;
3627 */
3628 coord = st_src_reg(PROGRAM_INPUT, FRAG_ATTRIB_TEX0, glsl_type::vec2_type);
3629 src0 = v->get_temp(glsl_type::vec4_type);
3630 dst0 = st_dst_reg(src0);
3631 inst = v->emit(NULL, TGSI_OPCODE_TEX, dst0, coord);
3632 inst->sampler = 0;
3633 inst->tex_target = TEXTURE_2D_INDEX;
3634
3635 prog->InputsRead |= (1 << FRAG_ATTRIB_TEX0);
3636 prog->SamplersUsed |= (1 << 0); /* mark sampler 0 as used */
3637 v->samplers_used |= (1 << 0);
3638
3639 if (scale_and_bias) {
3640 static const gl_state_index scale_state[STATE_LENGTH] =
3641 { STATE_INTERNAL, STATE_PT_SCALE,
3642 (gl_state_index) 0, (gl_state_index) 0, (gl_state_index) 0 };
3643 static const gl_state_index bias_state[STATE_LENGTH] =
3644 { STATE_INTERNAL, STATE_PT_BIAS,
3645 (gl_state_index) 0, (gl_state_index) 0, (gl_state_index) 0 };
3646 GLint scale_p, bias_p;
3647 st_src_reg scale, bias;
3648
3649 scale_p = _mesa_add_state_reference(params, scale_state);
3650 bias_p = _mesa_add_state_reference(params, bias_state);
3651
3652 /* MAD colorTemp, colorTemp, scale, bias; */
3653 scale = st_src_reg(PROGRAM_STATE_VAR, scale_p, GLSL_TYPE_FLOAT);
3654 bias = st_src_reg(PROGRAM_STATE_VAR, bias_p, GLSL_TYPE_FLOAT);
3655 inst = v->emit(NULL, TGSI_OPCODE_MAD, dst0, src0, scale, bias);
3656 }
3657
3658 if (pixel_maps) {
3659 st_src_reg temp = v->get_temp(glsl_type::vec4_type);
3660 st_dst_reg temp_dst = st_dst_reg(temp);
3661
3662 assert(st->pixel_xfer.pixelmap_texture);
3663
3664 /* With a little effort, we can do four pixel map look-ups with
3665 * two TEX instructions:
3666 */
3667
3668 /* TEX temp.rg, colorTemp.rgba, texture[1], 2D; */
3669 temp_dst.writemask = WRITEMASK_XY; /* write R,G */
3670 inst = v->emit(NULL, TGSI_OPCODE_TEX, temp_dst, src0);
3671 inst->sampler = 1;
3672 inst->tex_target = TEXTURE_2D_INDEX;
3673
3674 /* TEX temp.ba, colorTemp.baba, texture[1], 2D; */
3675 src0.swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W);
3676 temp_dst.writemask = WRITEMASK_ZW; /* write B,A */
3677 inst = v->emit(NULL, TGSI_OPCODE_TEX, temp_dst, src0);
3678 inst->sampler = 1;
3679 inst->tex_target = TEXTURE_2D_INDEX;
3680
3681 prog->SamplersUsed |= (1 << 1); /* mark sampler 1 as used */
3682 v->samplers_used |= (1 << 1);
3683
3684 /* MOV colorTemp, temp; */
3685 inst = v->emit(NULL, TGSI_OPCODE_MOV, dst0, temp);
3686 }
3687
3688 /* Now copy the instructions from the original glsl_to_tgsi_visitor into the
3689 * new visitor. */
3690 foreach_iter(exec_list_iterator, iter, original->instructions) {
3691 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3692 st_src_reg src_regs[3];
3693
3694 if (inst->dst.file == PROGRAM_OUTPUT)
3695 prog->OutputsWritten |= BITFIELD64_BIT(inst->dst.index);
3696
3697 for (int i=0; i<3; i++) {
3698 src_regs[i] = inst->src[i];
3699 if (src_regs[i].file == PROGRAM_INPUT &&
3700 src_regs[i].index == FRAG_ATTRIB_COL0)
3701 {
3702 src_regs[i].file = PROGRAM_TEMPORARY;
3703 src_regs[i].index = src0.index;
3704 }
3705 else if (src_regs[i].file == PROGRAM_INPUT)
3706 prog->InputsRead |= (1 << src_regs[i].index);
3707 }
3708
3709 v->emit(NULL, inst->op, inst->dst, src_regs[0], src_regs[1], src_regs[2]);
3710 }
3711
3712 /* Make modifications to fragment program info. */
3713 prog->Parameters = _mesa_combine_parameter_lists(params,
3714 original->prog->Parameters);
3715 prog->Attributes = _mesa_clone_parameter_list(original->prog->Attributes);
3716 prog->Varying = _mesa_clone_parameter_list(original->prog->Varying);
3717 _mesa_free_parameter_list(params);
3718 count_resources(v, prog);
3719 fp->glsl_to_tgsi = v;
3720 }
3721
3722 /**
3723 * Make fragment program for glBitmap:
3724 * Sample the texture and kill the fragment if the bit is 0.
3725 * This program will be combined with the user's fragment program.
3726 *
3727 * Based on make_bitmap_fragment_program in st_cb_bitmap.c.
3728 */
3729 extern "C" void
3730 get_bitmap_visitor(struct st_fragment_program *fp,
3731 glsl_to_tgsi_visitor *original, int samplerIndex)
3732 {
3733 glsl_to_tgsi_visitor *v = new glsl_to_tgsi_visitor();
3734 struct st_context *st = st_context(original->ctx);
3735 struct gl_program *prog = &fp->Base.Base;
3736 st_src_reg coord, src0;
3737 st_dst_reg dst0;
3738 glsl_to_tgsi_instruction *inst;
3739
3740 /* Copy attributes of the glsl_to_tgsi_visitor in the original shader. */
3741 v->ctx = original->ctx;
3742 v->prog = prog;
3743 v->glsl_version = original->glsl_version;
3744 v->native_integers = original->native_integers;
3745 v->options = original->options;
3746 v->next_temp = original->next_temp;
3747 v->num_address_regs = original->num_address_regs;
3748 v->samplers_used = prog->SamplersUsed = original->samplers_used;
3749 v->indirect_addr_temps = original->indirect_addr_temps;
3750 v->indirect_addr_consts = original->indirect_addr_consts;
3751 memcpy(&v->immediates, &original->immediates, sizeof(v->immediates));
3752
3753 /* TEX tmp0, fragment.texcoord[0], texture[0], 2D; */
3754 coord = st_src_reg(PROGRAM_INPUT, FRAG_ATTRIB_TEX0, glsl_type::vec2_type);
3755 src0 = v->get_temp(glsl_type::vec4_type);
3756 dst0 = st_dst_reg(src0);
3757 inst = v->emit(NULL, TGSI_OPCODE_TEX, dst0, coord);
3758 inst->sampler = samplerIndex;
3759 inst->tex_target = TEXTURE_2D_INDEX;
3760
3761 prog->InputsRead |= (1 << FRAG_ATTRIB_TEX0);
3762 prog->SamplersUsed |= (1 << samplerIndex); /* mark sampler as used */
3763 v->samplers_used |= (1 << samplerIndex);
3764
3765 /* KIL if -tmp0 < 0 # texel=0 -> keep / texel=0 -> discard */
3766 src0.negate = NEGATE_XYZW;
3767 if (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM)
3768 src0.swizzle = SWIZZLE_XXXX;
3769 inst = v->emit(NULL, TGSI_OPCODE_KIL, undef_dst, src0);
3770
3771 /* Now copy the instructions from the original glsl_to_tgsi_visitor into the
3772 * new visitor. */
3773 foreach_iter(exec_list_iterator, iter, original->instructions) {
3774 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get();
3775 st_src_reg src_regs[3];
3776
3777 if (inst->dst.file == PROGRAM_OUTPUT)
3778 prog->OutputsWritten |= BITFIELD64_BIT(inst->dst.index);
3779
3780 for (int i=0; i<3; i++) {
3781 src_regs[i] = inst->src[i];
3782 if (src_regs[i].file == PROGRAM_INPUT)
3783 prog->InputsRead |= (1 << src_regs[i].index);
3784 }
3785
3786 v->emit(NULL, inst->op, inst->dst, src_regs[0], src_regs[1], src_regs[2]);
3787 }
3788
3789 /* Make modifications to fragment program info. */
3790 prog->Parameters = _mesa_clone_parameter_list(original->prog->Parameters);
3791 prog->Attributes = _mesa_clone_parameter_list(original->prog->Attributes);
3792 prog->Varying = _mesa_clone_parameter_list(original->prog->Varying);
3793 count_resources(v, prog);
3794 fp->glsl_to_tgsi = v;
3795 }
3796
3797 /* ------------------------- TGSI conversion stuff -------------------------- */
3798 struct label {
3799 unsigned branch_target;
3800 unsigned token;
3801 };
3802
3803 /**
3804 * Intermediate state used during shader translation.
3805 */
3806 struct st_translate {
3807 struct ureg_program *ureg;
3808
3809 struct ureg_dst temps[MAX_TEMPS];
3810 struct ureg_src *constants;
3811 struct ureg_src *immediates;
3812 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS];
3813 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS];
3814 struct ureg_dst address[1];
3815 struct ureg_src samplers[PIPE_MAX_SAMPLERS];
3816 struct ureg_src systemValues[SYSTEM_VALUE_MAX];
3817
3818 /* Extra info for handling point size clamping in vertex shader */
3819 struct ureg_dst pointSizeResult; /**< Actual point size output register */
3820 struct ureg_src pointSizeConst; /**< Point size range constant register */
3821 GLint pointSizeOutIndex; /**< Temp point size output register */
3822 GLboolean prevInstWrotePointSize;
3823
3824 const GLuint *inputMapping;
3825 const GLuint *outputMapping;
3826
3827 /* For every instruction that contains a label (eg CALL), keep
3828 * details so that we can go back afterwards and emit the correct
3829 * tgsi instruction number for each label.
3830 */
3831 struct label *labels;
3832 unsigned labels_size;
3833 unsigned labels_count;
3834
3835 /* Keep a record of the tgsi instruction number that each mesa
3836 * instruction starts at, will be used to fix up labels after
3837 * translation.
3838 */
3839 unsigned *insn;
3840 unsigned insn_size;
3841 unsigned insn_count;
3842
3843 unsigned procType; /**< TGSI_PROCESSOR_VERTEX/FRAGMENT */
3844
3845 boolean error;
3846 };
3847
3848 /** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */
3849 static unsigned mesa_sysval_to_semantic[SYSTEM_VALUE_MAX] = {
3850 TGSI_SEMANTIC_FACE,
3851 TGSI_SEMANTIC_INSTANCEID
3852 };
3853
3854 /**
3855 * Make note of a branch to a label in the TGSI code.
3856 * After we've emitted all instructions, we'll go over the list
3857 * of labels built here and patch the TGSI code with the actual
3858 * location of each label.
3859 */
3860 static unsigned *get_label(struct st_translate *t, unsigned branch_target)
3861 {
3862 unsigned i;
3863
3864 if (t->labels_count + 1 >= t->labels_size) {
3865 t->labels_size = 1 << (util_logbase2(t->labels_size) + 1);
3866 t->labels = (struct label *)realloc(t->labels,
3867 t->labels_size * sizeof(struct label));
3868 if (t->labels == NULL) {
3869 static unsigned dummy;
3870 t->error = TRUE;
3871 return &dummy;
3872 }
3873 }
3874
3875 i = t->labels_count++;
3876 t->labels[i].branch_target = branch_target;
3877 return &t->labels[i].token;
3878 }
3879
3880 /**
3881 * Called prior to emitting the TGSI code for each instruction.
3882 * Allocate additional space for instructions if needed.
3883 * Update the insn[] array so the next glsl_to_tgsi_instruction points to
3884 * the next TGSI instruction.
3885 */
3886 static void set_insn_start(struct st_translate *t, unsigned start)
3887 {
3888 if (t->insn_count + 1 >= t->insn_size) {
3889 t->insn_size = 1 << (util_logbase2(t->insn_size) + 1);
3890 t->insn = (unsigned *)realloc(t->insn, t->insn_size * sizeof(t->insn[0]));
3891 if (t->insn == NULL) {
3892 t->error = TRUE;
3893 return;
3894 }
3895 }
3896
3897 t->insn[t->insn_count++] = start;
3898 }
3899
3900 /**
3901 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate.
3902 */
3903 static struct ureg_src
3904 emit_immediate(struct st_translate *t,
3905 gl_constant_value values[4],
3906 int type, int size)
3907 {
3908 struct ureg_program *ureg = t->ureg;
3909
3910 switch(type)
3911 {
3912 case GL_FLOAT:
3913 return ureg_DECL_immediate(ureg, &values[0].f, size);
3914 case GL_INT:
3915 return ureg_DECL_immediate_int(ureg, &values[0].i, size);
3916 case GL_UNSIGNED_INT:
3917 case GL_BOOL:
3918 return ureg_DECL_immediate_uint(ureg, &values[0].u, size);
3919 default:
3920 assert(!"should not get here - type must be float, int, uint, or bool");
3921 return ureg_src_undef();
3922 }
3923 }
3924
3925 /**
3926 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register.
3927 */
3928 static struct ureg_dst
3929 dst_register(struct st_translate *t,
3930 gl_register_file file,
3931 GLuint index)
3932 {
3933 switch(file) {
3934 case PROGRAM_UNDEFINED:
3935 return ureg_dst_undef();
3936
3937 case PROGRAM_TEMPORARY:
3938 if (ureg_dst_is_undef(t->temps[index]))
3939 t->temps[index] = ureg_DECL_temporary(t->ureg);
3940
3941 return t->temps[index];
3942
3943 case PROGRAM_OUTPUT:
3944 if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ)
3945 t->prevInstWrotePointSize = GL_TRUE;
3946
3947 if (t->procType == TGSI_PROCESSOR_VERTEX)
3948 assert(index < VERT_RESULT_MAX);
3949 else if (t->procType == TGSI_PROCESSOR_FRAGMENT)
3950 assert(index < FRAG_RESULT_MAX);
3951 else
3952 assert(index < GEOM_RESULT_MAX);
3953
3954 assert(t->outputMapping[index] < Elements(t->outputs));
3955
3956 return t->outputs[t->outputMapping[index]];
3957
3958 case PROGRAM_ADDRESS:
3959 return t->address[index];
3960
3961 default:
3962 assert(!"unknown dst register file");
3963 return ureg_dst_undef();
3964 }
3965 }
3966
3967 /**
3968 * Map a glsl_to_tgsi src register to a TGSI ureg_src register.
3969 */
3970 static struct ureg_src
3971 src_register(struct st_translate *t,
3972 gl_register_file file,
3973 GLuint index)
3974 {
3975 switch(file) {
3976 case PROGRAM_UNDEFINED:
3977 return ureg_src_undef();
3978
3979 case PROGRAM_TEMPORARY:
3980 assert(index >= 0);
3981 assert(index < Elements(t->temps));
3982 if (ureg_dst_is_undef(t->temps[index]))
3983 t->temps[index] = ureg_DECL_temporary(t->ureg);
3984 return ureg_src(t->temps[index]);
3985
3986 case PROGRAM_NAMED_PARAM:
3987 case PROGRAM_ENV_PARAM:
3988 case PROGRAM_LOCAL_PARAM:
3989 case PROGRAM_UNIFORM:
3990 assert(index >= 0);
3991 return t->constants[index];
3992 case PROGRAM_STATE_VAR:
3993 case PROGRAM_CONSTANT: /* ie, immediate */
3994 if (index < 0)
3995 return ureg_DECL_constant(t->ureg, 0);
3996 else
3997 return t->constants[index];
3998
3999 case PROGRAM_IMMEDIATE:
4000 return t->immediates[index];
4001
4002 case PROGRAM_INPUT:
4003 assert(t->inputMapping[index] < Elements(t->inputs));
4004 return t->inputs[t->inputMapping[index]];
4005
4006 case PROGRAM_OUTPUT:
4007 assert(t->outputMapping[index] < Elements(t->outputs));
4008 return ureg_src(t->outputs[t->outputMapping[index]]); /* not needed? */
4009
4010 case PROGRAM_ADDRESS:
4011 return ureg_src(t->address[index]);
4012
4013 case PROGRAM_SYSTEM_VALUE:
4014 assert(index < Elements(t->systemValues));
4015 return t->systemValues[index];
4016
4017 default:
4018 assert(!"unknown src register file");
4019 return ureg_src_undef();
4020 }
4021 }
4022
4023 /**
4024 * Create a TGSI ureg_dst register from an st_dst_reg.
4025 */
4026 static struct ureg_dst
4027 translate_dst(struct st_translate *t,
4028 const st_dst_reg *dst_reg,
4029 bool saturate)
4030 {
4031 struct ureg_dst dst = dst_register(t,
4032 dst_reg->file,
4033 dst_reg->index);
4034
4035 dst = ureg_writemask(dst, dst_reg->writemask);
4036
4037 if (saturate)
4038 dst = ureg_saturate(dst);
4039
4040 if (dst_reg->reladdr != NULL)
4041 dst = ureg_dst_indirect(dst, ureg_src(t->address[0]));
4042
4043 return dst;
4044 }
4045
4046 /**
4047 * Create a TGSI ureg_src register from an st_src_reg.
4048 */
4049 static struct ureg_src
4050 translate_src(struct st_translate *t, const st_src_reg *src_reg)
4051 {
4052 struct ureg_src src = src_register(t, src_reg->file, src_reg->index);
4053
4054 src = ureg_swizzle(src,
4055 GET_SWZ(src_reg->swizzle, 0) & 0x3,
4056 GET_SWZ(src_reg->swizzle, 1) & 0x3,
4057 GET_SWZ(src_reg->swizzle, 2) & 0x3,
4058 GET_SWZ(src_reg->swizzle, 3) & 0x3);
4059
4060 if ((src_reg->negate & 0xf) == NEGATE_XYZW)
4061 src = ureg_negate(src);
4062
4063 if (src_reg->reladdr != NULL) {
4064 /* Normally ureg_src_indirect() would be used here, but a stupid compiler
4065 * bug in g++ makes ureg_src_indirect (an inline C function) erroneously
4066 * set the bit for src.Negate. So we have to do the operation manually
4067 * here to work around the compiler's problems. */
4068 /*src = ureg_src_indirect(src, ureg_src(t->address[0]));*/
4069 struct ureg_src addr = ureg_src(t->address[0]);
4070 src.Indirect = 1;
4071 src.IndirectFile = addr.File;
4072 src.IndirectIndex = addr.Index;
4073 src.IndirectSwizzle = addr.SwizzleX;
4074
4075 if (src_reg->file != PROGRAM_INPUT &&
4076 src_reg->file != PROGRAM_OUTPUT) {
4077 /* If src_reg->index was negative, it was set to zero in
4078 * src_register(). Reassign it now. But don't do this
4079 * for input/output regs since they get remapped while
4080 * const buffers don't.
4081 */
4082 src.Index = src_reg->index;
4083 }
4084 }
4085
4086 return src;
4087 }
4088
4089 static void
4090 compile_tgsi_instruction(struct st_translate *t,
4091 const struct glsl_to_tgsi_instruction *inst)
4092 {
4093 struct ureg_program *ureg = t->ureg;
4094 GLuint i;
4095 struct ureg_dst dst[1];
4096 struct ureg_src src[4];
4097 unsigned num_dst;
4098 unsigned num_src;
4099
4100 num_dst = num_inst_dst_regs(inst->op);
4101 num_src = num_inst_src_regs(inst->op);
4102
4103 if (num_dst)
4104 dst[0] = translate_dst(t,
4105 &inst->dst,
4106 inst->saturate);
4107
4108 for (i = 0; i < num_src; i++)
4109 src[i] = translate_src(t, &inst->src[i]);
4110
4111 switch(inst->op) {
4112 case TGSI_OPCODE_BGNLOOP:
4113 case TGSI_OPCODE_CAL:
4114 case TGSI_OPCODE_ELSE:
4115 case TGSI_OPCODE_ENDLOOP:
4116 case TGSI_OPCODE_IF:
4117 assert(num_dst == 0);
4118 ureg_label_insn(ureg,
4119 inst->op,
4120 src, num_src,
4121 get_label(t,
4122 inst->op == TGSI_OPCODE_CAL ? inst->function->sig_id : 0));
4123 return;
4124
4125 case TGSI_OPCODE_TEX:
4126 case TGSI_OPCODE_TXB:
4127 case TGSI_OPCODE_TXD:
4128 case TGSI_OPCODE_TXL:
4129 case TGSI_OPCODE_TXP:
4130 src[num_src++] = t->samplers[inst->sampler];
4131 ureg_tex_insn(ureg,
4132 inst->op,
4133 dst, num_dst,
4134 translate_texture_target(inst->tex_target, inst->tex_shadow),
4135 src, num_src);
4136 return;
4137
4138 case TGSI_OPCODE_SCS:
4139 dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XY);
4140 ureg_insn(ureg, inst->op, dst, num_dst, src, num_src);
4141 break;
4142
4143 default:
4144 ureg_insn(ureg,
4145 inst->op,
4146 dst, num_dst,
4147 src, num_src);
4148 break;
4149 }
4150 }
4151
4152 /**
4153 * Emit the TGSI instructions to adjust the WPOS pixel center convention
4154 * Basically, add (adjX, adjY) to the fragment position.
4155 */
4156 static void
4157 emit_adjusted_wpos(struct st_translate *t,
4158 const struct gl_program *program,
4159 float adjX, float adjY)
4160 {
4161 struct ureg_program *ureg = t->ureg;
4162 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg);
4163 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
4164
4165 /* Note that we bias X and Y and pass Z and W through unchanged.
4166 * The shader might also use gl_FragCoord.w and .z.
4167 */
4168 ureg_ADD(ureg, wpos_temp, wpos_input,
4169 ureg_imm4f(ureg, adjX, adjY, 0.0f, 0.0f));
4170
4171 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
4172 }
4173
4174
4175 /**
4176 * Emit the TGSI instructions for inverting the WPOS y coordinate.
4177 * This code is unavoidable because it also depends on whether
4178 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
4179 */
4180 static void
4181 emit_wpos_inversion(struct st_translate *t,
4182 const struct gl_program *program,
4183 bool invert)
4184 {
4185 struct ureg_program *ureg = t->ureg;
4186
4187 /* Fragment program uses fragment position input.
4188 * Need to replace instances of INPUT[WPOS] with temp T
4189 * where T = INPUT[WPOS] by y is inverted.
4190 */
4191 static const gl_state_index wposTransformState[STATE_LENGTH]
4192 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM,
4193 (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
4194
4195 /* XXX: note we are modifying the incoming shader here! Need to
4196 * do this before emitting the constant decls below, or this
4197 * will be missed:
4198 */
4199 unsigned wposTransConst = _mesa_add_state_reference(program->Parameters,
4200 wposTransformState);
4201
4202 struct ureg_src wpostrans = ureg_DECL_constant(ureg, wposTransConst);
4203 struct ureg_dst wpos_temp;
4204 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]];
4205
4206 /* MOV wpos_temp, input[wpos]
4207 */
4208 if (wpos_input.File == TGSI_FILE_TEMPORARY)
4209 wpos_temp = ureg_dst(wpos_input);
4210 else {
4211 wpos_temp = ureg_DECL_temporary(ureg);
4212 ureg_MOV(ureg, wpos_temp, wpos_input);
4213 }
4214
4215 if (invert) {
4216 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
4217 */
4218 ureg_MAD(ureg,
4219 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
4220 wpos_input,
4221 ureg_scalar(wpostrans, 0),
4222 ureg_scalar(wpostrans, 1));
4223 } else {
4224 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
4225 */
4226 ureg_MAD(ureg,
4227 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
4228 wpos_input,
4229 ureg_scalar(wpostrans, 2),
4230 ureg_scalar(wpostrans, 3));
4231 }
4232
4233 /* Use wpos_temp as position input from here on:
4234 */
4235 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp);
4236 }
4237
4238
4239 /**
4240 * Emit fragment position/ooordinate code.
4241 */
4242 static void
4243 emit_wpos(struct st_context *st,
4244 struct st_translate *t,
4245 const struct gl_program *program,
4246 struct ureg_program *ureg)
4247 {
4248 const struct gl_fragment_program *fp =
4249 (const struct gl_fragment_program *) program;
4250 struct pipe_screen *pscreen = st->pipe->screen;
4251 boolean invert = FALSE;
4252
4253 if (fp->OriginUpperLeft) {
4254 /* Fragment shader wants origin in upper-left */
4255 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) {
4256 /* the driver supports upper-left origin */
4257 }
4258 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) {
4259 /* the driver supports lower-left origin, need to invert Y */
4260 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
4261 invert = TRUE;
4262 }
4263 else
4264 assert(0);
4265 }
4266 else {
4267 /* Fragment shader wants origin in lower-left */
4268 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT))
4269 /* the driver supports lower-left origin */
4270 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
4271 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT))
4272 /* the driver supports upper-left origin, need to invert Y */
4273 invert = TRUE;
4274 else
4275 assert(0);
4276 }
4277
4278 if (fp->PixelCenterInteger) {
4279 /* Fragment shader wants pixel center integer */
4280 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER))
4281 /* the driver supports pixel center integer */
4282 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
4283 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER))
4284 /* the driver supports pixel center half integer, need to bias X,Y */
4285 emit_adjusted_wpos(t, program, 0.5f, invert ? 0.5f : -0.5f);
4286 else
4287 assert(0);
4288 }
4289 else {
4290 /* Fragment shader wants pixel center half integer */
4291 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
4292 /* the driver supports pixel center half integer */
4293 }
4294 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) {
4295 /* the driver supports pixel center integer, need to bias X,Y */
4296 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
4297 emit_adjusted_wpos(t, program, 0.5f, invert ? -0.5f : 0.5f);
4298 }
4299 else
4300 assert(0);
4301 }
4302
4303 /* we invert after adjustment so that we avoid the MOV to temporary,
4304 * and reuse the adjustment ADD instead */
4305 emit_wpos_inversion(t, program, invert);
4306 }
4307
4308 /**
4309 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
4310 * TGSI uses +1 for front, -1 for back.
4311 * This function converts the TGSI value to the GL value. Simply clamping/
4312 * saturating the value to [0,1] does the job.
4313 */
4314 static void
4315 emit_face_var(struct st_translate *t)
4316 {
4317 struct ureg_program *ureg = t->ureg;
4318 struct ureg_dst face_temp = ureg_DECL_temporary(ureg);
4319 struct ureg_src face_input = t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]];
4320
4321 /* MOV_SAT face_temp, input[face] */
4322 face_temp = ureg_saturate(face_temp);
4323 ureg_MOV(ureg, face_temp, face_input);
4324
4325 /* Use face_temp as face input from here on: */
4326 t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]] = ureg_src(face_temp);
4327 }
4328
4329 static void
4330 emit_edgeflags(struct st_translate *t)
4331 {
4332 struct ureg_program *ureg = t->ureg;
4333 struct ureg_dst edge_dst = t->outputs[t->outputMapping[VERT_RESULT_EDGE]];
4334 struct ureg_src edge_src = t->inputs[t->inputMapping[VERT_ATTRIB_EDGEFLAG]];
4335
4336 ureg_MOV(ureg, edge_dst, edge_src);
4337 }
4338
4339 /**
4340 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
4341 * \param program the program to translate
4342 * \param numInputs number of input registers used
4343 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
4344 * input indexes
4345 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
4346 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
4347 * each input
4348 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
4349 * \param numOutputs number of output registers used
4350 * \param outputMapping maps Mesa fragment program outputs to TGSI
4351 * generic outputs
4352 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
4353 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
4354 * each output
4355 *
4356 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
4357 */
4358 extern "C" enum pipe_error
4359 st_translate_program(
4360 struct gl_context *ctx,
4361 uint procType,
4362 struct ureg_program *ureg,
4363 glsl_to_tgsi_visitor *program,
4364 const struct gl_program *proginfo,
4365 GLuint numInputs,
4366 const GLuint inputMapping[],
4367 const ubyte inputSemanticName[],
4368 const ubyte inputSemanticIndex[],
4369 const GLuint interpMode[],
4370 GLuint numOutputs,
4371 const GLuint outputMapping[],
4372 const ubyte outputSemanticName[],
4373 const ubyte outputSemanticIndex[],
4374 boolean passthrough_edgeflags)
4375 {
4376 struct st_translate translate, *t;
4377 unsigned i;
4378 enum pipe_error ret = PIPE_OK;
4379
4380 assert(numInputs <= Elements(t->inputs));
4381 assert(numOutputs <= Elements(t->outputs));
4382
4383 t = &translate;
4384 memset(t, 0, sizeof *t);
4385
4386 t->procType = procType;
4387 t->inputMapping = inputMapping;
4388 t->outputMapping = outputMapping;
4389 t->ureg = ureg;
4390 t->pointSizeOutIndex = -1;
4391 t->prevInstWrotePointSize = GL_FALSE;
4392
4393 /*
4394 * Declare input attributes.
4395 */
4396 if (procType == TGSI_PROCESSOR_FRAGMENT) {
4397 for (i = 0; i < numInputs; i++) {
4398 t->inputs[i] = ureg_DECL_fs_input(ureg,
4399 inputSemanticName[i],
4400 inputSemanticIndex[i],
4401 interpMode[i]);
4402 }
4403
4404 if (proginfo->InputsRead & FRAG_BIT_WPOS) {
4405 /* Must do this after setting up t->inputs, and before
4406 * emitting constant references, below:
4407 */
4408 emit_wpos(st_context(ctx), t, proginfo, ureg);
4409 }
4410
4411 if (proginfo->InputsRead & FRAG_BIT_FACE)
4412 emit_face_var(t);
4413
4414 /*
4415 * Declare output attributes.
4416 */
4417 for (i = 0; i < numOutputs; i++) {
4418 switch (outputSemanticName[i]) {
4419 case TGSI_SEMANTIC_POSITION:
4420 t->outputs[i] = ureg_DECL_output(ureg,
4421 TGSI_SEMANTIC_POSITION, /* Z/Depth */
4422 outputSemanticIndex[i]);
4423 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Z);
4424 break;
4425 case TGSI_SEMANTIC_STENCIL:
4426 t->outputs[i] = ureg_DECL_output(ureg,
4427 TGSI_SEMANTIC_STENCIL, /* Stencil */
4428 outputSemanticIndex[i]);
4429 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Y);
4430 break;
4431 case TGSI_SEMANTIC_COLOR:
4432 t->outputs[i] = ureg_DECL_output(ureg,
4433 TGSI_SEMANTIC_COLOR,
4434 outputSemanticIndex[i]);
4435 break;
4436 default:
4437 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR");
4438 return PIPE_ERROR_BAD_INPUT;
4439 }
4440 }
4441 }
4442 else if (procType == TGSI_PROCESSOR_GEOMETRY) {
4443 for (i = 0; i < numInputs; i++) {
4444 t->inputs[i] = ureg_DECL_gs_input(ureg,
4445 i,
4446 inputSemanticName[i],
4447 inputSemanticIndex[i]);
4448 }
4449
4450 for (i = 0; i < numOutputs; i++) {
4451 t->outputs[i] = ureg_DECL_output(ureg,
4452 outputSemanticName[i],
4453 outputSemanticIndex[i]);
4454 }
4455 }
4456 else {
4457 assert(procType == TGSI_PROCESSOR_VERTEX);
4458
4459 for (i = 0; i < numInputs; i++) {
4460 t->inputs[i] = ureg_DECL_vs_input(ureg, i);
4461 }
4462
4463 for (i = 0; i < numOutputs; i++) {
4464 t->outputs[i] = ureg_DECL_output(ureg,
4465 outputSemanticName[i],
4466 outputSemanticIndex[i]);
4467 if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) {
4468 /* Writing to the point size result register requires special
4469 * handling to implement clamping.
4470 */
4471 static const gl_state_index pointSizeClampState[STATE_LENGTH]
4472 = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
4473 /* XXX: note we are modifying the incoming shader here! Need to
4474 * do this before emitting the constant decls below, or this
4475 * will be missed.
4476 */
4477 unsigned pointSizeClampConst =
4478 _mesa_add_state_reference(proginfo->Parameters,
4479 pointSizeClampState);
4480 struct ureg_dst psizregtemp = ureg_DECL_temporary(ureg);
4481 t->pointSizeConst = ureg_DECL_constant(ureg, pointSizeClampConst);
4482 t->pointSizeResult = t->outputs[i];
4483 t->pointSizeOutIndex = i;
4484 t->outputs[i] = psizregtemp;
4485 }
4486 }
4487 if (passthrough_edgeflags)
4488 emit_edgeflags(t);
4489 }
4490
4491 /* Declare address register.
4492 */
4493 if (program->num_address_regs > 0) {
4494 assert(program->num_address_regs == 1);
4495 t->address[0] = ureg_DECL_address(ureg);
4496 }
4497
4498 /* Declare misc input registers
4499 */
4500 {
4501 GLbitfield sysInputs = proginfo->SystemValuesRead;
4502 unsigned numSys = 0;
4503 for (i = 0; sysInputs; i++) {
4504 if (sysInputs & (1 << i)) {
4505 unsigned semName = mesa_sysval_to_semantic[i];
4506 t->systemValues[i] = ureg_DECL_system_value(ureg, numSys, semName, 0);
4507 numSys++;
4508 sysInputs &= ~(1 << i);
4509 }
4510 }
4511 }
4512
4513 if (program->indirect_addr_temps) {
4514 /* If temps are accessed with indirect addressing, declare temporaries
4515 * in sequential order. Else, we declare them on demand elsewhere.
4516 * (Note: the number of temporaries is equal to program->next_temp)
4517 */
4518 for (i = 0; i < (unsigned)program->next_temp; i++) {
4519 /* XXX use TGSI_FILE_TEMPORARY_ARRAY when it's supported by ureg */
4520 t->temps[i] = ureg_DECL_temporary(t->ureg);
4521 }
4522 }
4523
4524 /* Emit constants and uniforms. TGSI uses a single index space for these,
4525 * so we put all the translated regs in t->constants.
4526 */
4527 if (proginfo->Parameters) {
4528 t->constants = (struct ureg_src *)CALLOC(proginfo->Parameters->NumParameters * sizeof(t->constants[0]));
4529 if (t->constants == NULL) {
4530 ret = PIPE_ERROR_OUT_OF_MEMORY;
4531 goto out;
4532 }
4533
4534 for (i = 0; i < proginfo->Parameters->NumParameters; i++) {
4535 switch (proginfo->Parameters->Parameters[i].Type) {
4536 case PROGRAM_ENV_PARAM:
4537 case PROGRAM_LOCAL_PARAM:
4538 case PROGRAM_STATE_VAR:
4539 case PROGRAM_NAMED_PARAM:
4540 case PROGRAM_UNIFORM:
4541 t->constants[i] = ureg_DECL_constant(ureg, i);
4542 break;
4543
4544 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect
4545 * addressing of the const buffer.
4546 * FIXME: Be smarter and recognize param arrays:
4547 * indirect addressing is only valid within the referenced
4548 * array.
4549 */
4550 case PROGRAM_CONSTANT:
4551 if (program->indirect_addr_consts)
4552 t->constants[i] = ureg_DECL_constant(ureg, i);
4553 else
4554 t->constants[i] = emit_immediate(t,
4555 proginfo->Parameters->ParameterValues[i],
4556 proginfo->Parameters->Parameters[i].DataType,
4557 4);
4558 break;
4559 default:
4560 break;
4561 }
4562 }
4563 }
4564
4565 /* Emit immediate values.
4566 */
4567 t->immediates = (struct ureg_src *)CALLOC(program->num_immediates * sizeof(struct ureg_src));
4568 if (t->immediates == NULL) {
4569 ret = PIPE_ERROR_OUT_OF_MEMORY;
4570 goto out;
4571 }
4572 i = 0;
4573 foreach_iter(exec_list_iterator, iter, program->immediates) {
4574 immediate_storage *imm = (immediate_storage *)iter.get();
4575 t->immediates[i++] = emit_immediate(t, imm->values, imm->type, imm->size);
4576 }
4577
4578 /* texture samplers */
4579 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
4580 if (program->samplers_used & (1 << i)) {
4581 t->samplers[i] = ureg_DECL_sampler(ureg, i);
4582 }
4583 }
4584
4585 /* Emit each instruction in turn:
4586 */
4587 foreach_iter(exec_list_iterator, iter, program->instructions) {
4588 set_insn_start(t, ureg_get_instruction_number(ureg));
4589 compile_tgsi_instruction(t, (glsl_to_tgsi_instruction *)iter.get());
4590
4591 if (t->prevInstWrotePointSize && proginfo->Id) {
4592 /* The previous instruction wrote to the (fake) vertex point size
4593 * result register. Now we need to clamp that value to the min/max
4594 * point size range, putting the result into the real point size
4595 * register.
4596 * Note that we can't do this easily at the end of program due to
4597 * possible early return.
4598 */
4599 set_insn_start(t, ureg_get_instruction_number(ureg));
4600 ureg_MAX(t->ureg,
4601 ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X),
4602 ureg_src(t->outputs[t->pointSizeOutIndex]),
4603 ureg_swizzle(t->pointSizeConst, 1,1,1,1));
4604 ureg_MIN(t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X),
4605 ureg_src(t->outputs[t->pointSizeOutIndex]),
4606 ureg_swizzle(t->pointSizeConst, 2,2,2,2));
4607 }
4608 t->prevInstWrotePointSize = GL_FALSE;
4609 }
4610
4611 /* Fix up all emitted labels:
4612 */
4613 for (i = 0; i < t->labels_count; i++) {
4614 ureg_fixup_label(ureg, t->labels[i].token,
4615 t->insn[t->labels[i].branch_target]);
4616 }
4617
4618 out:
4619 FREE(t->insn);
4620 FREE(t->labels);
4621 FREE(t->constants);
4622 FREE(t->immediates);
4623
4624 if (t->error) {
4625 debug_printf("%s: translate error flag set\n", __FUNCTION__);
4626 }
4627
4628 return ret;
4629 }
4630 /* ----------------------------- End TGSI code ------------------------------ */
4631
4632 /**
4633 * Convert a shader's GLSL IR into a Mesa gl_program, although without
4634 * generating Mesa IR.
4635 */
4636 static struct gl_program *
4637 get_mesa_program(struct gl_context *ctx,
4638 struct gl_shader_program *shader_program,
4639 struct gl_shader *shader)
4640 {
4641 glsl_to_tgsi_visitor* v = new glsl_to_tgsi_visitor();
4642 struct gl_program *prog;
4643 GLenum target;
4644 const char *target_string;
4645 bool progress;
4646 struct gl_shader_compiler_options *options =
4647 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)];
4648
4649 switch (shader->Type) {
4650 case GL_VERTEX_SHADER:
4651 target = GL_VERTEX_PROGRAM_ARB;
4652 target_string = "vertex";
4653 break;
4654 case GL_FRAGMENT_SHADER:
4655 target = GL_FRAGMENT_PROGRAM_ARB;
4656 target_string = "fragment";
4657 break;
4658 case GL_GEOMETRY_SHADER:
4659 target = GL_GEOMETRY_PROGRAM_NV;
4660 target_string = "geometry";
4661 break;
4662 default:
4663 assert(!"should not be reached");
4664 return NULL;
4665 }
4666
4667 validate_ir_tree(shader->ir);
4668
4669 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name);
4670 if (!prog)
4671 return NULL;
4672 prog->Parameters = _mesa_new_parameter_list();
4673 prog->Varying = _mesa_new_parameter_list();
4674 prog->Attributes = _mesa_new_parameter_list();
4675 v->ctx = ctx;
4676 v->prog = prog;
4677 v->shader_program = shader_program;
4678 v->options = options;
4679 v->glsl_version = ctx->Const.GLSLVersion;
4680 v->native_integers = ctx->Const.NativeIntegers;
4681
4682 add_uniforms_to_parameters_list(shader_program, shader, prog);
4683
4684 /* Emit intermediate IR for main(). */
4685 visit_exec_list(shader->ir, v);
4686
4687 /* Now emit bodies for any functions that were used. */
4688 do {
4689 progress = GL_FALSE;
4690
4691 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
4692 function_entry *entry = (function_entry *)iter.get();
4693
4694 if (!entry->bgn_inst) {
4695 v->current_function = entry;
4696
4697 entry->bgn_inst = v->emit(NULL, TGSI_OPCODE_BGNSUB);
4698 entry->bgn_inst->function = entry;
4699
4700 visit_exec_list(&entry->sig->body, v);
4701
4702 glsl_to_tgsi_instruction *last;
4703 last = (glsl_to_tgsi_instruction *)v->instructions.get_tail();
4704 if (last->op != TGSI_OPCODE_RET)
4705 v->emit(NULL, TGSI_OPCODE_RET);
4706
4707 glsl_to_tgsi_instruction *end;
4708 end = v->emit(NULL, TGSI_OPCODE_ENDSUB);
4709 end->function = entry;
4710
4711 progress = GL_TRUE;
4712 }
4713 }
4714 } while (progress);
4715
4716 #if 0
4717 /* Print out some information (for debugging purposes) used by the
4718 * optimization passes. */
4719 for (i=0; i < v->next_temp; i++) {
4720 int fr = v->get_first_temp_read(i);
4721 int fw = v->get_first_temp_write(i);
4722 int lr = v->get_last_temp_read(i);
4723 int lw = v->get_last_temp_write(i);
4724
4725 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, fr, fw, lr, lw);
4726 assert(fw <= fr);
4727 }
4728 #endif
4729
4730 /* Remove reads to output registers, and to varyings in vertex shaders. */
4731 v->remove_output_reads(PROGRAM_OUTPUT);
4732 if (target == GL_VERTEX_PROGRAM_ARB)
4733 v->remove_output_reads(PROGRAM_VARYING);
4734
4735 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
4736 v->simplify_cmp();
4737 v->copy_propagate();
4738 while (v->eliminate_dead_code_advanced());
4739
4740 /* FIXME: These passes to optimize temporary registers don't work when there
4741 * is indirect addressing of the temporary register space. We need proper
4742 * array support so that we don't have to give up these passes in every
4743 * shader that uses arrays.
4744 */
4745 if (!v->indirect_addr_temps) {
4746 v->eliminate_dead_code();
4747 v->merge_registers();
4748 v->renumber_registers();
4749 }
4750
4751 /* Write the END instruction. */
4752 v->emit(NULL, TGSI_OPCODE_END);
4753
4754 if (ctx->Shader.Flags & GLSL_DUMP) {
4755 printf("\n");
4756 printf("GLSL IR for linked %s program %d:\n", target_string,
4757 shader_program->Name);
4758 _mesa_print_ir(shader->ir, NULL);
4759 printf("\n");
4760 printf("\n");
4761 }
4762
4763 prog->Instructions = NULL;
4764 prog->NumInstructions = 0;
4765
4766 do_set_program_inouts(shader->ir, prog);
4767 count_resources(v, prog);
4768
4769 check_resources(ctx, shader_program, v, prog);
4770
4771 _mesa_reference_program(ctx, &shader->Program, prog);
4772
4773 struct st_vertex_program *stvp;
4774 struct st_fragment_program *stfp;
4775 struct st_geometry_program *stgp;
4776
4777 switch (shader->Type) {
4778 case GL_VERTEX_SHADER:
4779 stvp = (struct st_vertex_program *)prog;
4780 stvp->glsl_to_tgsi = v;
4781 break;
4782 case GL_FRAGMENT_SHADER:
4783 stfp = (struct st_fragment_program *)prog;
4784 stfp->glsl_to_tgsi = v;
4785 break;
4786 case GL_GEOMETRY_SHADER:
4787 stgp = (struct st_geometry_program *)prog;
4788 stgp->glsl_to_tgsi = v;
4789 break;
4790 default:
4791 assert(!"should not be reached");
4792 return NULL;
4793 }
4794
4795 return prog;
4796 }
4797
4798 extern "C" {
4799
4800 struct gl_shader *
4801 st_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
4802 {
4803 struct gl_shader *shader;
4804 assert(type == GL_FRAGMENT_SHADER || type == GL_VERTEX_SHADER ||
4805 type == GL_GEOMETRY_SHADER_ARB);
4806 shader = rzalloc(NULL, struct gl_shader);
4807 if (shader) {
4808 shader->Type = type;
4809 shader->Name = name;
4810 _mesa_init_shader(ctx, shader);
4811 }
4812 return shader;
4813 }
4814
4815 struct gl_shader_program *
4816 st_new_shader_program(struct gl_context *ctx, GLuint name)
4817 {
4818 struct gl_shader_program *shProg;
4819 shProg = rzalloc(NULL, struct gl_shader_program);
4820 if (shProg) {
4821 shProg->Name = name;
4822 _mesa_init_shader_program(ctx, shProg);
4823 }
4824 return shProg;
4825 }
4826
4827 /**
4828 * Link a shader.
4829 * Called via ctx->Driver.LinkShader()
4830 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
4831 * with code lowering and other optimizations.
4832 */
4833 GLboolean
4834 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4835 {
4836 assert(prog->LinkStatus);
4837
4838 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4839 if (prog->_LinkedShaders[i] == NULL)
4840 continue;
4841
4842 bool progress;
4843 exec_list *ir = prog->_LinkedShaders[i]->ir;
4844 const struct gl_shader_compiler_options *options =
4845 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)];
4846
4847 do {
4848 progress = false;
4849
4850 /* Lowering */
4851 do_mat_op_to_vec(ir);
4852 lower_instructions(ir, (MOD_TO_FRACT | DIV_TO_MUL_RCP | EXP_TO_EXP2
4853 | LOG_TO_LOG2
4854 | ((options->EmitNoPow) ? POW_TO_EXP2 : 0)));
4855
4856 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress;
4857
4858 progress = do_common_optimization(ir, true, options->MaxUnrollIterations) || progress;
4859
4860 progress = lower_quadop_vector(ir, false) || progress;
4861
4862 if (options->EmitNoIfs) {
4863 progress = lower_discard(ir) || progress;
4864 progress = lower_if_to_cond_assign(ir) || progress;
4865 }
4866
4867 if (options->EmitNoNoise)
4868 progress = lower_noise(ir) || progress;
4869
4870 /* If there are forms of indirect addressing that the driver
4871 * cannot handle, perform the lowering pass.
4872 */
4873 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput
4874 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform)
4875 progress =
4876 lower_variable_index_to_cond_assign(ir,
4877 options->EmitNoIndirectInput,
4878 options->EmitNoIndirectOutput,
4879 options->EmitNoIndirectTemp,
4880 options->EmitNoIndirectUniform)
4881 || progress;
4882
4883 progress = do_vec_index_to_cond_assign(ir) || progress;
4884 } while (progress);
4885
4886 validate_ir_tree(ir);
4887 }
4888
4889 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
4890 struct gl_program *linked_prog;
4891
4892 if (prog->_LinkedShaders[i] == NULL)
4893 continue;
4894
4895 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i]);
4896
4897 if (linked_prog) {
4898 bool ok = true;
4899
4900 switch (prog->_LinkedShaders[i]->Type) {
4901 case GL_VERTEX_SHADER:
4902 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
4903 (struct gl_vertex_program *)linked_prog);
4904 ok = ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
4905 linked_prog);
4906 break;
4907 case GL_FRAGMENT_SHADER:
4908 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
4909 (struct gl_fragment_program *)linked_prog);
4910 ok = ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
4911 linked_prog);
4912 break;
4913 case GL_GEOMETRY_SHADER:
4914 _mesa_reference_geomprog(ctx, &prog->GeometryProgram,
4915 (struct gl_geometry_program *)linked_prog);
4916 ok = ctx->Driver.ProgramStringNotify(ctx, GL_GEOMETRY_PROGRAM_NV,
4917 linked_prog);
4918 break;
4919 }
4920 if (!ok) {
4921 return GL_FALSE;
4922 }
4923 }
4924
4925 _mesa_reference_program(ctx, &linked_prog, NULL);
4926 }
4927
4928 return GL_TRUE;
4929 }
4930
4931
4932 /**
4933 * Link a GLSL shader program. Called via glLinkProgram().
4934 */
4935 void
4936 st_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
4937 {
4938 unsigned int i;
4939
4940 _mesa_clear_shader_program_data(ctx, prog);
4941
4942 prog->LinkStatus = GL_TRUE;
4943
4944 for (i = 0; i < prog->NumShaders; i++) {
4945 if (!prog->Shaders[i]->CompileStatus) {
4946 fail_link(prog, "linking with uncompiled shader");
4947 prog->LinkStatus = GL_FALSE;
4948 }
4949 }
4950
4951 prog->Varying = _mesa_new_parameter_list();
4952 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
4953 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
4954 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, NULL);
4955
4956 if (prog->LinkStatus) {
4957 link_shaders(ctx, prog);
4958 }
4959
4960 if (prog->LinkStatus) {
4961 if (!ctx->Driver.LinkShader(ctx, prog)) {
4962 prog->LinkStatus = GL_FALSE;
4963 }
4964 }
4965
4966 set_uniform_initializers(ctx, prog);
4967
4968 if (ctx->Shader.Flags & GLSL_DUMP) {
4969 if (!prog->LinkStatus) {
4970 printf("GLSL shader program %d failed to link\n", prog->Name);
4971 }
4972
4973 if (prog->InfoLog && prog->InfoLog[0] != 0) {
4974 printf("GLSL shader program %d info log:\n", prog->Name);
4975 printf("%s\n", prog->InfoLog);
4976 }
4977 }
4978 }
4979
4980 } /* extern "C" */