352b496625d97ecaa91a8e88d3b1546b93238e2e
[mesa.git] / src / mesa / shader / ir_to_mesa.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * \file ir_to_mesa.cpp
28 *
29 * Translates the IR to ARB_fragment_program text if possible,
30 * printing the result
31 */
32
33 #include <stdio.h>
34 #include "ir.h"
35 #include "ir_visitor.h"
36 #include "ir_print_visitor.h"
37 #include "ir_expression_flattening.h"
38 #include "glsl_types.h"
39 #include "glsl_parser_extras.h"
40 #include "../glsl/program.h"
41 #include "ir_optimization.h"
42 #include "ast.h"
43
44 extern "C" {
45 #include "main/mtypes.h"
46 #include "shader/prog_instruction.h"
47 #include "shader/prog_optimize.h"
48 #include "shader/prog_print.h"
49 #include "shader/program.h"
50 #include "shader/prog_uniform.h"
51 #include "shader/prog_parameter.h"
52 #include "shader/shader_api.h"
53 }
54
55 /**
56 * This struct is a corresponding struct to Mesa prog_src_register, with
57 * wider fields.
58 */
59 typedef struct ir_to_mesa_src_reg {
60 int file; /**< PROGRAM_* from Mesa */
61 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
62 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
63 int negate; /**< NEGATE_XYZW mask from mesa */
64 /** Register index should be offset by the integer in this reg. */
65 ir_to_mesa_src_reg *reladdr;
66 } ir_to_mesa_src_reg;
67
68 typedef struct ir_to_mesa_dst_reg {
69 int file; /**< PROGRAM_* from Mesa */
70 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
71 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
72 GLuint cond_mask:4;
73 /** Register index should be offset by the integer in this reg. */
74 ir_to_mesa_src_reg *reladdr;
75 } ir_to_mesa_dst_reg;
76
77 extern ir_to_mesa_src_reg ir_to_mesa_undef;
78
79 class ir_to_mesa_instruction : public exec_node {
80 public:
81 enum prog_opcode op;
82 ir_to_mesa_dst_reg dst_reg;
83 ir_to_mesa_src_reg src_reg[3];
84 /** Pointer to the ir source this tree came from for debugging */
85 ir_instruction *ir;
86 GLboolean cond_update;
87 int sampler; /**< sampler index */
88 int tex_target; /**< One of TEXTURE_*_INDEX */
89 GLboolean tex_shadow;
90
91 class function_entry *function; /* Set on OPCODE_CAL or OPCODE_BGNSUB */
92 };
93
94 class variable_storage : public exec_node {
95 public:
96 variable_storage(ir_variable *var, int file, int index)
97 : file(file), index(index), var(var)
98 {
99 /* empty */
100 }
101
102 int file;
103 int index;
104 ir_variable *var; /* variable that maps to this, if any */
105 };
106
107 class function_entry : public exec_node {
108 public:
109 ir_function_signature *sig;
110
111 /**
112 * identifier of this function signature used by the program.
113 *
114 * At the point that Mesa instructions for function calls are
115 * generated, we don't know the address of the first instruction of
116 * the function body. So we make the BranchTarget that is called a
117 * small integer and rewrite them during set_branchtargets().
118 */
119 int sig_id;
120
121 /**
122 * Pointer to first instruction of the function body.
123 *
124 * Set during function body emits after main() is processed.
125 */
126 ir_to_mesa_instruction *bgn_inst;
127
128 /**
129 * Index of the first instruction of the function body in actual
130 * Mesa IR.
131 *
132 * Set after convertion from ir_to_mesa_instruction to prog_instruction.
133 */
134 int inst;
135
136 /** Storage for the return value. */
137 ir_to_mesa_src_reg return_reg;
138 };
139
140 class ir_to_mesa_visitor : public ir_visitor {
141 public:
142 ir_to_mesa_visitor();
143
144 function_entry *current_function;
145
146 GLcontext *ctx;
147 struct gl_program *prog;
148
149 int next_temp;
150
151 variable_storage *find_variable_storage(ir_variable *var);
152
153 function_entry *get_function_signature(ir_function_signature *sig);
154
155 ir_to_mesa_src_reg get_temp(const glsl_type *type);
156 void reladdr_to_temp(ir_instruction *ir,
157 ir_to_mesa_src_reg *reg, int *num_reladdr);
158
159 struct ir_to_mesa_src_reg src_reg_for_float(float val);
160
161 /**
162 * \name Visit methods
163 *
164 * As typical for the visitor pattern, there must be one \c visit method for
165 * each concrete subclass of \c ir_instruction. Virtual base classes within
166 * the hierarchy should not have \c visit methods.
167 */
168 /*@{*/
169 virtual void visit(ir_variable *);
170 virtual void visit(ir_loop *);
171 virtual void visit(ir_loop_jump *);
172 virtual void visit(ir_function_signature *);
173 virtual void visit(ir_function *);
174 virtual void visit(ir_expression *);
175 virtual void visit(ir_swizzle *);
176 virtual void visit(ir_dereference_variable *);
177 virtual void visit(ir_dereference_array *);
178 virtual void visit(ir_dereference_record *);
179 virtual void visit(ir_assignment *);
180 virtual void visit(ir_constant *);
181 virtual void visit(ir_call *);
182 virtual void visit(ir_return *);
183 virtual void visit(ir_discard *);
184 virtual void visit(ir_texture *);
185 virtual void visit(ir_if *);
186 /*@}*/
187
188 struct ir_to_mesa_src_reg result;
189
190 /** List of variable_storage */
191 exec_list variables;
192
193 /** List of function_entry */
194 exec_list function_signatures;
195 int next_signature_id;
196
197 /** List of ir_to_mesa_instruction */
198 exec_list instructions;
199
200 ir_to_mesa_instruction *ir_to_mesa_emit_op0(ir_instruction *ir,
201 enum prog_opcode op);
202
203 ir_to_mesa_instruction *ir_to_mesa_emit_op1(ir_instruction *ir,
204 enum prog_opcode op,
205 ir_to_mesa_dst_reg dst,
206 ir_to_mesa_src_reg src0);
207
208 ir_to_mesa_instruction *ir_to_mesa_emit_op2(ir_instruction *ir,
209 enum prog_opcode op,
210 ir_to_mesa_dst_reg dst,
211 ir_to_mesa_src_reg src0,
212 ir_to_mesa_src_reg src1);
213
214 ir_to_mesa_instruction *ir_to_mesa_emit_op3(ir_instruction *ir,
215 enum prog_opcode op,
216 ir_to_mesa_dst_reg dst,
217 ir_to_mesa_src_reg src0,
218 ir_to_mesa_src_reg src1,
219 ir_to_mesa_src_reg src2);
220
221 void ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
222 enum prog_opcode op,
223 ir_to_mesa_dst_reg dst,
224 ir_to_mesa_src_reg src0);
225
226 void ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
227 enum prog_opcode op,
228 ir_to_mesa_dst_reg dst,
229 ir_to_mesa_src_reg src0,
230 ir_to_mesa_src_reg src1);
231
232 GLboolean try_emit_mad(ir_expression *ir,
233 int mul_operand);
234
235 int *sampler_map;
236 int sampler_map_size;
237
238 void map_sampler(int location, int sampler);
239 int get_sampler_number(int location);
240
241 void *mem_ctx;
242 };
243
244 ir_to_mesa_src_reg ir_to_mesa_undef = {
245 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, NEGATE_NONE, NULL,
246 };
247
248 ir_to_mesa_dst_reg ir_to_mesa_undef_dst = {
249 PROGRAM_UNDEFINED, 0, SWIZZLE_NOOP, COND_TR, NULL,
250 };
251
252 ir_to_mesa_dst_reg ir_to_mesa_address_reg = {
253 PROGRAM_ADDRESS, 0, WRITEMASK_X, COND_TR, NULL
254 };
255
256 static int swizzle_for_size(int size)
257 {
258 int size_swizzles[4] = {
259 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
260 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
261 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
262 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
263 };
264
265 return size_swizzles[size - 1];
266 }
267
268 ir_to_mesa_instruction *
269 ir_to_mesa_visitor::ir_to_mesa_emit_op3(ir_instruction *ir,
270 enum prog_opcode op,
271 ir_to_mesa_dst_reg dst,
272 ir_to_mesa_src_reg src0,
273 ir_to_mesa_src_reg src1,
274 ir_to_mesa_src_reg src2)
275 {
276 ir_to_mesa_instruction *inst = new(mem_ctx) ir_to_mesa_instruction();
277 int num_reladdr = 0;
278
279 /* If we have to do relative addressing, we want to load the ARL
280 * reg directly for one of the regs, and preload the other reladdr
281 * sources into temps.
282 */
283 num_reladdr += dst.reladdr != NULL;
284 num_reladdr += src0.reladdr != NULL;
285 num_reladdr += src1.reladdr != NULL;
286 num_reladdr += src2.reladdr != NULL;
287
288 reladdr_to_temp(ir, &src2, &num_reladdr);
289 reladdr_to_temp(ir, &src1, &num_reladdr);
290 reladdr_to_temp(ir, &src0, &num_reladdr);
291
292 if (dst.reladdr) {
293 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg,
294 *dst.reladdr);
295
296 num_reladdr--;
297 }
298 assert(num_reladdr == 0);
299
300 inst->op = op;
301 inst->dst_reg = dst;
302 inst->src_reg[0] = src0;
303 inst->src_reg[1] = src1;
304 inst->src_reg[2] = src2;
305 inst->ir = ir;
306
307 inst->function = NULL;
308
309 this->instructions.push_tail(inst);
310
311 return inst;
312 }
313
314
315 ir_to_mesa_instruction *
316 ir_to_mesa_visitor::ir_to_mesa_emit_op2(ir_instruction *ir,
317 enum prog_opcode op,
318 ir_to_mesa_dst_reg dst,
319 ir_to_mesa_src_reg src0,
320 ir_to_mesa_src_reg src1)
321 {
322 return ir_to_mesa_emit_op3(ir, op, dst, src0, src1, ir_to_mesa_undef);
323 }
324
325 ir_to_mesa_instruction *
326 ir_to_mesa_visitor::ir_to_mesa_emit_op1(ir_instruction *ir,
327 enum prog_opcode op,
328 ir_to_mesa_dst_reg dst,
329 ir_to_mesa_src_reg src0)
330 {
331 return ir_to_mesa_emit_op3(ir, op, dst,
332 src0, ir_to_mesa_undef, ir_to_mesa_undef);
333 }
334
335 ir_to_mesa_instruction *
336 ir_to_mesa_visitor::ir_to_mesa_emit_op0(ir_instruction *ir,
337 enum prog_opcode op)
338 {
339 return ir_to_mesa_emit_op3(ir, op, ir_to_mesa_undef_dst,
340 ir_to_mesa_undef,
341 ir_to_mesa_undef,
342 ir_to_mesa_undef);
343 }
344
345 void
346 ir_to_mesa_visitor::map_sampler(int location, int sampler)
347 {
348 if (this->sampler_map_size <= location) {
349 this->sampler_map = talloc_realloc(this->mem_ctx, this->sampler_map,
350 int, location + 1);
351 this->sampler_map_size = location + 1;
352 }
353
354 this->sampler_map[location] = sampler;
355 }
356
357 int
358 ir_to_mesa_visitor::get_sampler_number(int location)
359 {
360 assert(location < this->sampler_map_size);
361 return this->sampler_map[location];
362 }
363
364 inline ir_to_mesa_dst_reg
365 ir_to_mesa_dst_reg_from_src(ir_to_mesa_src_reg reg)
366 {
367 ir_to_mesa_dst_reg dst_reg;
368
369 dst_reg.file = reg.file;
370 dst_reg.index = reg.index;
371 dst_reg.writemask = WRITEMASK_XYZW;
372 dst_reg.cond_mask = COND_TR;
373 dst_reg.reladdr = reg.reladdr;
374
375 return dst_reg;
376 }
377
378 inline ir_to_mesa_src_reg
379 ir_to_mesa_src_reg_from_dst(ir_to_mesa_dst_reg reg)
380 {
381 ir_to_mesa_src_reg src_reg;
382
383 src_reg.file = reg.file;
384 src_reg.index = reg.index;
385 src_reg.swizzle = SWIZZLE_XYZW;
386 src_reg.negate = 0;
387 src_reg.reladdr = reg.reladdr;
388
389 return src_reg;
390 }
391
392 /**
393 * Emits Mesa scalar opcodes to produce unique answers across channels.
394 *
395 * Some Mesa opcodes are scalar-only, like ARB_fp/vp. The src X
396 * channel determines the result across all channels. So to do a vec4
397 * of this operation, we want to emit a scalar per source channel used
398 * to produce dest channels.
399 */
400 void
401 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op2(ir_instruction *ir,
402 enum prog_opcode op,
403 ir_to_mesa_dst_reg dst,
404 ir_to_mesa_src_reg orig_src0,
405 ir_to_mesa_src_reg orig_src1)
406 {
407 int i, j;
408 int done_mask = ~dst.writemask;
409
410 /* Mesa RCP is a scalar operation splatting results to all channels,
411 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
412 * dst channels.
413 */
414 for (i = 0; i < 4; i++) {
415 GLuint this_mask = (1 << i);
416 ir_to_mesa_instruction *inst;
417 ir_to_mesa_src_reg src0 = orig_src0;
418 ir_to_mesa_src_reg src1 = orig_src1;
419
420 if (done_mask & this_mask)
421 continue;
422
423 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
424 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
425 for (j = i + 1; j < 4; j++) {
426 if (!(done_mask & (1 << j)) &&
427 GET_SWZ(src0.swizzle, j) == src0_swiz &&
428 GET_SWZ(src1.swizzle, j) == src1_swiz) {
429 this_mask |= (1 << j);
430 }
431 }
432 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
433 src0_swiz, src0_swiz);
434 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
435 src1_swiz, src1_swiz);
436
437 inst = ir_to_mesa_emit_op2(ir, op,
438 dst,
439 src0,
440 src1);
441 inst->dst_reg.writemask = this_mask;
442 done_mask |= this_mask;
443 }
444 }
445
446 void
447 ir_to_mesa_visitor::ir_to_mesa_emit_scalar_op1(ir_instruction *ir,
448 enum prog_opcode op,
449 ir_to_mesa_dst_reg dst,
450 ir_to_mesa_src_reg src0)
451 {
452 ir_to_mesa_src_reg undef = ir_to_mesa_undef;
453
454 undef.swizzle = SWIZZLE_XXXX;
455
456 ir_to_mesa_emit_scalar_op2(ir, op, dst, src0, undef);
457 }
458
459 struct ir_to_mesa_src_reg
460 ir_to_mesa_visitor::src_reg_for_float(float val)
461 {
462 ir_to_mesa_src_reg src_reg;
463
464 src_reg.file = PROGRAM_CONSTANT;
465 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
466 &val, 1, &src_reg.swizzle);
467 src_reg.reladdr = NULL;
468 src_reg.negate = 0;
469
470 return src_reg;
471 }
472
473 static int
474 type_size(const struct glsl_type *type)
475 {
476 unsigned int i;
477 int size;
478
479 switch (type->base_type) {
480 case GLSL_TYPE_UINT:
481 case GLSL_TYPE_INT:
482 case GLSL_TYPE_FLOAT:
483 case GLSL_TYPE_BOOL:
484 if (type->is_matrix()) {
485 return type->matrix_columns;
486 } else {
487 /* Regardless of size of vector, it gets a vec4. This is bad
488 * packing for things like floats, but otherwise arrays become a
489 * mess. Hopefully a later pass over the code can pack scalars
490 * down if appropriate.
491 */
492 return 1;
493 }
494 case GLSL_TYPE_ARRAY:
495 return type_size(type->fields.array) * type->length;
496 case GLSL_TYPE_STRUCT:
497 size = 0;
498 for (i = 0; i < type->length; i++) {
499 size += type_size(type->fields.structure[i].type);
500 }
501 return size;
502 default:
503 assert(0);
504 }
505 }
506
507 /**
508 * In the initial pass of codegen, we assign temporary numbers to
509 * intermediate results. (not SSA -- variable assignments will reuse
510 * storage). Actual register allocation for the Mesa VM occurs in a
511 * pass over the Mesa IR later.
512 */
513 ir_to_mesa_src_reg
514 ir_to_mesa_visitor::get_temp(const glsl_type *type)
515 {
516 ir_to_mesa_src_reg src_reg;
517 int swizzle[4];
518 int i;
519
520 assert(!type->is_array());
521
522 src_reg.file = PROGRAM_TEMPORARY;
523 src_reg.index = next_temp;
524 src_reg.reladdr = NULL;
525 next_temp += type_size(type);
526
527 for (i = 0; i < type->vector_elements; i++)
528 swizzle[i] = i;
529 for (; i < 4; i++)
530 swizzle[i] = type->vector_elements - 1;
531 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
532 swizzle[2], swizzle[3]);
533 src_reg.negate = 0;
534
535 return src_reg;
536 }
537
538 variable_storage *
539 ir_to_mesa_visitor::find_variable_storage(ir_variable *var)
540 {
541
542 variable_storage *entry;
543
544 foreach_iter(exec_list_iterator, iter, this->variables) {
545 entry = (variable_storage *)iter.get();
546
547 if (entry->var == var)
548 return entry;
549 }
550
551 return NULL;
552 }
553
554 void
555 ir_to_mesa_visitor::visit(ir_variable *ir)
556 {
557 (void)ir;
558 }
559
560 void
561 ir_to_mesa_visitor::visit(ir_loop *ir)
562 {
563 assert(!ir->from);
564 assert(!ir->to);
565 assert(!ir->increment);
566 assert(!ir->counter);
567
568 ir_to_mesa_emit_op0(NULL, OPCODE_BGNLOOP);
569 visit_exec_list(&ir->body_instructions, this);
570 ir_to_mesa_emit_op0(NULL, OPCODE_ENDLOOP);
571 }
572
573 void
574 ir_to_mesa_visitor::visit(ir_loop_jump *ir)
575 {
576 switch (ir->mode) {
577 case ir_loop_jump::jump_break:
578 ir_to_mesa_emit_op0(NULL, OPCODE_BRK);
579 break;
580 case ir_loop_jump::jump_continue:
581 ir_to_mesa_emit_op0(NULL, OPCODE_CONT);
582 break;
583 }
584 }
585
586
587 void
588 ir_to_mesa_visitor::visit(ir_function_signature *ir)
589 {
590 assert(0);
591 (void)ir;
592 }
593
594 void
595 ir_to_mesa_visitor::visit(ir_function *ir)
596 {
597 /* Ignore function bodies other than main() -- we shouldn't see calls to
598 * them since they should all be inlined before we get to ir_to_mesa.
599 */
600 if (strcmp(ir->name, "main") == 0) {
601 const ir_function_signature *sig;
602 exec_list empty;
603
604 sig = ir->matching_signature(&empty);
605
606 assert(sig);
607
608 foreach_iter(exec_list_iterator, iter, sig->body) {
609 ir_instruction *ir = (ir_instruction *)iter.get();
610
611 ir->accept(this);
612 }
613 }
614 }
615
616 GLboolean
617 ir_to_mesa_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
618 {
619 int nonmul_operand = 1 - mul_operand;
620 ir_to_mesa_src_reg a, b, c;
621
622 ir_expression *expr = ir->operands[mul_operand]->as_expression();
623 if (!expr || expr->operation != ir_binop_mul)
624 return false;
625
626 expr->operands[0]->accept(this);
627 a = this->result;
628 expr->operands[1]->accept(this);
629 b = this->result;
630 ir->operands[nonmul_operand]->accept(this);
631 c = this->result;
632
633 this->result = get_temp(ir->type);
634 ir_to_mesa_emit_op3(ir, OPCODE_MAD,
635 ir_to_mesa_dst_reg_from_src(this->result), a, b, c);
636
637 return true;
638 }
639
640 void
641 ir_to_mesa_visitor::reladdr_to_temp(ir_instruction *ir,
642 ir_to_mesa_src_reg *reg, int *num_reladdr)
643 {
644 if (!reg->reladdr)
645 return;
646
647 ir_to_mesa_emit_op1(ir, OPCODE_ARL, ir_to_mesa_address_reg, *reg->reladdr);
648
649 if (*num_reladdr != 1) {
650 ir_to_mesa_src_reg temp = get_temp(glsl_type::vec4_type);
651
652 ir_to_mesa_emit_op1(ir, OPCODE_MOV,
653 ir_to_mesa_dst_reg_from_src(temp), *reg);
654 *reg = temp;
655 }
656
657 (*num_reladdr)--;
658 }
659
660 void
661 ir_to_mesa_visitor::visit(ir_expression *ir)
662 {
663 unsigned int operand;
664 struct ir_to_mesa_src_reg op[2];
665 struct ir_to_mesa_src_reg result_src;
666 struct ir_to_mesa_dst_reg result_dst;
667 const glsl_type *vec4_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 4, 1);
668 const glsl_type *vec3_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 3, 1);
669 const glsl_type *vec2_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 2, 1);
670
671 /* Quick peephole: Emit OPCODE_MAD(a, b, c) instead of ADD(MUL(a, b), c)
672 */
673 if (ir->operation == ir_binop_add) {
674 if (try_emit_mad(ir, 1))
675 return;
676 if (try_emit_mad(ir, 0))
677 return;
678 }
679
680 for (operand = 0; operand < ir->get_num_operands(); operand++) {
681 this->result.file = PROGRAM_UNDEFINED;
682 ir->operands[operand]->accept(this);
683 if (this->result.file == PROGRAM_UNDEFINED) {
684 ir_print_visitor v;
685 printf("Failed to get tree for expression operand:\n");
686 ir->operands[operand]->accept(&v);
687 exit(1);
688 }
689 op[operand] = this->result;
690
691 /* Matrix expression operands should have been broken down to vector
692 * operations already.
693 */
694 assert(!ir->operands[operand]->type->is_matrix());
695 }
696
697 this->result.file = PROGRAM_UNDEFINED;
698
699 /* Storage for our result. Ideally for an assignment we'd be using
700 * the actual storage for the result here, instead.
701 */
702 result_src = get_temp(ir->type);
703 /* convenience for the emit functions below. */
704 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
705 /* Limit writes to the channels that will be used by result_src later.
706 * This does limit this temp's use as a temporary for multi-instruction
707 * sequences.
708 */
709 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
710
711 switch (ir->operation) {
712 case ir_unop_logic_not:
713 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst,
714 op[0], src_reg_for_float(0.0));
715 break;
716 case ir_unop_neg:
717 op[0].negate = ~op[0].negate;
718 result_src = op[0];
719 break;
720 case ir_unop_abs:
721 ir_to_mesa_emit_op1(ir, OPCODE_ABS, result_dst, op[0]);
722 break;
723 case ir_unop_sign:
724 ir_to_mesa_emit_op1(ir, OPCODE_SSG, result_dst, op[0]);
725 break;
726 case ir_unop_rcp:
727 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, op[0]);
728 break;
729
730 case ir_unop_exp:
731 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst,
732 src_reg_for_float(M_E), op[0]);
733 break;
734 case ir_unop_exp2:
735 ir_to_mesa_emit_scalar_op1(ir, OPCODE_EX2, result_dst, op[0]);
736 break;
737 case ir_unop_log:
738 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LOG, result_dst, op[0]);
739 break;
740 case ir_unop_log2:
741 ir_to_mesa_emit_scalar_op1(ir, OPCODE_LG2, result_dst, op[0]);
742 break;
743 case ir_unop_sin:
744 ir_to_mesa_emit_scalar_op1(ir, OPCODE_SIN, result_dst, op[0]);
745 break;
746 case ir_unop_cos:
747 ir_to_mesa_emit_scalar_op1(ir, OPCODE_COS, result_dst, op[0]);
748 break;
749
750 case ir_unop_dFdx:
751 ir_to_mesa_emit_op1(ir, OPCODE_DDX, result_dst, op[0]);
752 break;
753 case ir_unop_dFdy:
754 ir_to_mesa_emit_op1(ir, OPCODE_DDY, result_dst, op[0]);
755 break;
756
757 case ir_binop_add:
758 ir_to_mesa_emit_op2(ir, OPCODE_ADD, result_dst, op[0], op[1]);
759 break;
760 case ir_binop_sub:
761 ir_to_mesa_emit_op2(ir, OPCODE_SUB, result_dst, op[0], op[1]);
762 break;
763
764 case ir_binop_mul:
765 ir_to_mesa_emit_op2(ir, OPCODE_MUL, result_dst, op[0], op[1]);
766 break;
767 case ir_binop_div:
768 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
769 case ir_binop_mod:
770 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
771 break;
772
773 case ir_binop_less:
774 ir_to_mesa_emit_op2(ir, OPCODE_SLT, result_dst, op[0], op[1]);
775 break;
776 case ir_binop_greater:
777 ir_to_mesa_emit_op2(ir, OPCODE_SGT, result_dst, op[0], op[1]);
778 break;
779 case ir_binop_lequal:
780 ir_to_mesa_emit_op2(ir, OPCODE_SLE, result_dst, op[0], op[1]);
781 break;
782 case ir_binop_gequal:
783 ir_to_mesa_emit_op2(ir, OPCODE_SGE, result_dst, op[0], op[1]);
784 break;
785 case ir_binop_equal:
786 ir_to_mesa_emit_op2(ir, OPCODE_SEQ, result_dst, op[0], op[1]);
787 break;
788 case ir_binop_logic_xor:
789 case ir_binop_nequal:
790 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst, op[0], op[1]);
791 break;
792
793 case ir_binop_logic_or:
794 /* This could be a saturated add and skip the SNE. */
795 ir_to_mesa_emit_op2(ir, OPCODE_ADD,
796 result_dst,
797 op[0], op[1]);
798
799 ir_to_mesa_emit_op2(ir, OPCODE_SNE,
800 result_dst,
801 result_src, src_reg_for_float(0.0));
802 break;
803
804 case ir_binop_logic_and:
805 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
806 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
807 result_dst,
808 op[0], op[1]);
809 break;
810
811 case ir_binop_dot:
812 if (ir->operands[0]->type == vec4_type) {
813 assert(ir->operands[1]->type == vec4_type);
814 ir_to_mesa_emit_op2(ir, OPCODE_DP4,
815 result_dst,
816 op[0], op[1]);
817 } else if (ir->operands[0]->type == vec3_type) {
818 assert(ir->operands[1]->type == vec3_type);
819 ir_to_mesa_emit_op2(ir, OPCODE_DP3,
820 result_dst,
821 op[0], op[1]);
822 } else if (ir->operands[0]->type == vec2_type) {
823 assert(ir->operands[1]->type == vec2_type);
824 ir_to_mesa_emit_op2(ir, OPCODE_DP2,
825 result_dst,
826 op[0], op[1]);
827 }
828 break;
829
830 case ir_binop_cross:
831 ir_to_mesa_emit_op2(ir, OPCODE_XPD, result_dst, op[0], op[1]);
832 break;
833
834 case ir_unop_sqrt:
835 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
836 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RCP, result_dst, result_src);
837 /* For incoming channels < 0, set the result to 0. */
838 ir_to_mesa_emit_op3(ir, OPCODE_CMP, result_dst,
839 op[0], src_reg_for_float(0.0), result_src);
840 break;
841 case ir_unop_rsq:
842 ir_to_mesa_emit_scalar_op1(ir, OPCODE_RSQ, result_dst, op[0]);
843 break;
844 case ir_unop_i2f:
845 case ir_unop_b2f:
846 case ir_unop_b2i:
847 /* Mesa IR lacks types, ints are stored as truncated floats. */
848 result_src = op[0];
849 break;
850 case ir_unop_f2i:
851 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
852 break;
853 case ir_unop_f2b:
854 case ir_unop_i2b:
855 ir_to_mesa_emit_op2(ir, OPCODE_SNE, result_dst,
856 result_src, src_reg_for_float(0.0));
857 break;
858 case ir_unop_trunc:
859 ir_to_mesa_emit_op1(ir, OPCODE_TRUNC, result_dst, op[0]);
860 break;
861 case ir_unop_ceil:
862 op[0].negate = ~op[0].negate;
863 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
864 result_src.negate = ~result_src.negate;
865 break;
866 case ir_unop_floor:
867 ir_to_mesa_emit_op1(ir, OPCODE_FLR, result_dst, op[0]);
868 break;
869 case ir_unop_fract:
870 ir_to_mesa_emit_op1(ir, OPCODE_FRC, result_dst, op[0]);
871 break;
872
873 case ir_binop_min:
874 ir_to_mesa_emit_op2(ir, OPCODE_MIN, result_dst, op[0], op[1]);
875 break;
876 case ir_binop_max:
877 ir_to_mesa_emit_op2(ir, OPCODE_MAX, result_dst, op[0], op[1]);
878 break;
879 case ir_binop_pow:
880 ir_to_mesa_emit_scalar_op2(ir, OPCODE_POW, result_dst, op[0], op[1]);
881 break;
882
883 case ir_unop_bit_not:
884 case ir_unop_u2f:
885 case ir_binop_lshift:
886 case ir_binop_rshift:
887 case ir_binop_bit_and:
888 case ir_binop_bit_xor:
889 case ir_binop_bit_or:
890 assert(!"GLSL 1.30 features unsupported");
891 break;
892 }
893
894 this->result = result_src;
895 }
896
897
898 void
899 ir_to_mesa_visitor::visit(ir_swizzle *ir)
900 {
901 ir_to_mesa_src_reg src_reg;
902 int i;
903 int swizzle[4];
904
905 /* Note that this is only swizzles in expressions, not those on the left
906 * hand side of an assignment, which do write masking. See ir_assignment
907 * for that.
908 */
909
910 ir->val->accept(this);
911 src_reg = this->result;
912 assert(src_reg.file != PROGRAM_UNDEFINED);
913
914 for (i = 0; i < 4; i++) {
915 if (i < ir->type->vector_elements) {
916 switch (i) {
917 case 0:
918 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.x);
919 break;
920 case 1:
921 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.y);
922 break;
923 case 2:
924 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.z);
925 break;
926 case 3:
927 swizzle[i] = GET_SWZ(src_reg.swizzle, ir->mask.w);
928 break;
929 }
930 } else {
931 /* If the type is smaller than a vec4, replicate the last
932 * channel out.
933 */
934 swizzle[i] = swizzle[ir->type->vector_elements - 1];
935 }
936 }
937
938 src_reg.swizzle = MAKE_SWIZZLE4(swizzle[0],
939 swizzle[1],
940 swizzle[2],
941 swizzle[3]);
942
943 this->result = src_reg;
944 }
945
946 static int
947 add_matrix_ref(struct gl_program *prog, int *tokens)
948 {
949 int base_pos = -1;
950 int i;
951
952 /* Add a ref for each column. It looks like the reason we do
953 * it this way is that _mesa_add_state_reference doesn't work
954 * for things that aren't vec4s, so the tokens[2]/tokens[3]
955 * range has to be equal.
956 */
957 for (i = 0; i < 4; i++) {
958 tokens[2] = i;
959 tokens[3] = i;
960 int pos = _mesa_add_state_reference(prog->Parameters,
961 (gl_state_index *)tokens);
962 if (base_pos == -1)
963 base_pos = pos;
964 else
965 assert(base_pos + i == pos);
966 }
967
968 return base_pos;
969 }
970
971 static variable_storage *
972 get_builtin_matrix_ref(void *mem_ctx, struct gl_program *prog, ir_variable *var,
973 ir_rvalue *array_index)
974 {
975 /*
976 * NOTE: The ARB_vertex_program extension specified that matrices get
977 * loaded in registers in row-major order. With GLSL, we want column-
978 * major order. So, we need to transpose all matrices here...
979 */
980 static const struct {
981 const char *name;
982 int matrix;
983 int modifier;
984 } matrices[] = {
985 { "gl_ModelViewMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_TRANSPOSE },
986 { "gl_ModelViewMatrixInverse", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVTRANS },
987 { "gl_ModelViewMatrixTranspose", STATE_MODELVIEW_MATRIX, 0 },
988 { "gl_ModelViewMatrixInverseTranspose", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
989
990 { "gl_ProjectionMatrix", STATE_PROJECTION_MATRIX, STATE_MATRIX_TRANSPOSE },
991 { "gl_ProjectionMatrixInverse", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVTRANS },
992 { "gl_ProjectionMatrixTranspose", STATE_PROJECTION_MATRIX, 0 },
993 { "gl_ProjectionMatrixInverseTranspose", STATE_PROJECTION_MATRIX, STATE_MATRIX_INVERSE },
994
995 { "gl_ModelViewProjectionMatrix", STATE_MVP_MATRIX, STATE_MATRIX_TRANSPOSE },
996 { "gl_ModelViewProjectionMatrixInverse", STATE_MVP_MATRIX, STATE_MATRIX_INVTRANS },
997 { "gl_ModelViewProjectionMatrixTranspose", STATE_MVP_MATRIX, 0 },
998 { "gl_ModelViewProjectionMatrixInverseTranspose", STATE_MVP_MATRIX, STATE_MATRIX_INVERSE },
999
1000 { "gl_TextureMatrix", STATE_TEXTURE_MATRIX, STATE_MATRIX_TRANSPOSE },
1001 { "gl_TextureMatrixInverse", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVTRANS },
1002 { "gl_TextureMatrixTranspose", STATE_TEXTURE_MATRIX, 0 },
1003 { "gl_TextureMatrixInverseTranspose", STATE_TEXTURE_MATRIX, STATE_MATRIX_INVERSE },
1004
1005 { "gl_NormalMatrix", STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE },
1006
1007 };
1008 unsigned int i;
1009 variable_storage *entry;
1010
1011 /* C++ gets angry when we try to use an int as a gl_state_index, so we use
1012 * ints for gl_state_index. Make sure they're compatible.
1013 */
1014 assert(sizeof(gl_state_index) == sizeof(int));
1015
1016 for (i = 0; i < Elements(matrices); i++) {
1017 if (strcmp(var->name, matrices[i].name) == 0) {
1018 int tokens[STATE_LENGTH];
1019 int base_pos = -1;
1020
1021 tokens[0] = matrices[i].matrix;
1022 tokens[4] = matrices[i].modifier;
1023 if (matrices[i].matrix == STATE_TEXTURE_MATRIX) {
1024 ir_constant *index = array_index->constant_expression_value();
1025 if (index) {
1026 tokens[1] = index->value.i[0];
1027 base_pos = add_matrix_ref(prog, tokens);
1028 } else {
1029 for (i = 0; i < var->type->length; i++) {
1030 tokens[1] = i;
1031 int pos = add_matrix_ref(prog, tokens);
1032 if (base_pos == -1)
1033 base_pos = pos;
1034 else
1035 assert(base_pos + (int)i * 4 == pos);
1036 }
1037 }
1038 } else {
1039 tokens[1] = 0; /* unused array index */
1040 base_pos = add_matrix_ref(prog, tokens);
1041 }
1042 tokens[4] = matrices[i].modifier;
1043
1044 entry = new(mem_ctx) variable_storage(var,
1045 PROGRAM_STATE_VAR,
1046 base_pos);
1047
1048 return entry;
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
1055 void
1056 ir_to_mesa_visitor::visit(ir_dereference_variable *ir)
1057 {
1058 ir_to_mesa_src_reg src_reg;
1059 variable_storage *entry = find_variable_storage(ir->var);
1060 unsigned int loc;
1061
1062 if (!entry) {
1063 switch (ir->var->mode) {
1064 case ir_var_uniform:
1065 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, ir->var,
1066 NULL);
1067 if (entry)
1068 break;
1069
1070 /* FINISHME: Fix up uniform name for arrays and things */
1071 if (ir->var->type->base_type == GLSL_TYPE_SAMPLER) {
1072 /* FINISHME: we whack the location of the var here, which
1073 * is probably not expected. But we need to communicate
1074 * mesa's sampler number to the tex instruction.
1075 */
1076 int sampler = _mesa_add_sampler(this->prog->Parameters,
1077 ir->var->name,
1078 ir->var->type->gl_type);
1079 map_sampler(ir->var->location, sampler);
1080
1081 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_SAMPLER,
1082 sampler);
1083 this->variables.push_tail(entry);
1084 break;
1085 }
1086
1087 assert(ir->var->type->gl_type != 0 &&
1088 ir->var->type->gl_type != GL_INVALID_ENUM);
1089 loc = _mesa_add_uniform(this->prog->Parameters,
1090 ir->var->name,
1091 type_size(ir->var->type) * 4,
1092 ir->var->type->gl_type,
1093 NULL);
1094
1095 /* Always mark the uniform used at this point. If it isn't
1096 * used, dead code elimination should have nuked the decl already.
1097 */
1098 this->prog->Parameters->Parameters[loc].Used = GL_TRUE;
1099
1100 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_UNIFORM, loc);
1101 this->variables.push_tail(entry);
1102 break;
1103 case ir_var_in:
1104 case ir_var_out:
1105 case ir_var_inout:
1106 /* The linker assigns locations for varyings and attributes,
1107 * including deprecated builtins (like gl_Color), user-assign
1108 * generic attributes (glBindVertexLocation), and
1109 * user-defined varyings.
1110 *
1111 * FINISHME: We would hit this path for function arguments. Fix!
1112 */
1113 assert(ir->var->location != -1);
1114 if (ir->var->mode == ir_var_in ||
1115 ir->var->mode == ir_var_inout) {
1116 entry = new(mem_ctx) variable_storage(ir->var,
1117 PROGRAM_INPUT,
1118 ir->var->location);
1119
1120 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1121 ir->var->location >= VERT_ATTRIB_GENERIC0) {
1122 _mesa_add_attribute(prog->Attributes,
1123 ir->var->name,
1124 type_size(ir->var->type) * 4,
1125 ir->var->type->gl_type,
1126 ir->var->location - VERT_ATTRIB_GENERIC0);
1127 }
1128 } else {
1129 entry = new(mem_ctx) variable_storage(ir->var,
1130 PROGRAM_OUTPUT,
1131 ir->var->location);
1132 }
1133
1134 break;
1135 case ir_var_auto:
1136 entry = new(mem_ctx) variable_storage(ir->var, PROGRAM_TEMPORARY,
1137 this->next_temp);
1138 this->variables.push_tail(entry);
1139
1140 next_temp += type_size(ir->var->type);
1141 break;
1142 }
1143
1144 if (!entry) {
1145 printf("Failed to make storage for %s\n", ir->var->name);
1146 exit(1);
1147 }
1148 }
1149
1150 src_reg.file = entry->file;
1151 src_reg.index = entry->index;
1152 /* If the type is smaller than a vec4, replicate the last channel out. */
1153 src_reg.swizzle = swizzle_for_size(ir->var->type->vector_elements);
1154 src_reg.reladdr = NULL;
1155 src_reg.negate = 0;
1156
1157 this->result = src_reg;
1158 }
1159
1160 void
1161 ir_to_mesa_visitor::visit(ir_dereference_array *ir)
1162 {
1163 ir_constant *index;
1164 ir_to_mesa_src_reg src_reg;
1165 ir_dereference_variable *deref_var = ir->array->as_dereference_variable();
1166 int element_size = type_size(ir->type);
1167
1168 index = ir->array_index->constant_expression_value();
1169
1170 if (deref_var && strncmp(deref_var->var->name,
1171 "gl_TextureMatrix",
1172 strlen("gl_TextureMatrix")) == 0) {
1173 ir_to_mesa_src_reg src_reg;
1174 struct variable_storage *entry;
1175
1176 entry = get_builtin_matrix_ref(this->mem_ctx, this->prog, deref_var->var,
1177 ir->array_index);
1178 assert(entry);
1179
1180 src_reg.file = entry->file;
1181 src_reg.index = entry->index;
1182 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1183 src_reg.negate = 0;
1184
1185 if (index) {
1186 src_reg.reladdr = NULL;
1187 } else {
1188 ir_to_mesa_src_reg index_reg = get_temp(glsl_type::float_type);
1189
1190 ir->array_index->accept(this);
1191 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1192 ir_to_mesa_dst_reg_from_src(index_reg),
1193 this->result, src_reg_for_float(element_size));
1194
1195 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1196 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1197 }
1198
1199 this->result = src_reg;
1200 return;
1201 }
1202
1203 ir->array->accept(this);
1204 src_reg = this->result;
1205
1206 if (index) {
1207 src_reg.index += index->value.i[0] * element_size;
1208 } else {
1209 ir_to_mesa_src_reg array_base = this->result;
1210 /* Variable index array dereference. It eats the "vec4" of the
1211 * base of the array and an index that offsets the Mesa register
1212 * index.
1213 */
1214 ir->array_index->accept(this);
1215
1216 ir_to_mesa_src_reg index_reg;
1217
1218 if (element_size == 1) {
1219 index_reg = this->result;
1220 } else {
1221 index_reg = get_temp(glsl_type::float_type);
1222
1223 ir_to_mesa_emit_op2(ir, OPCODE_MUL,
1224 ir_to_mesa_dst_reg_from_src(index_reg),
1225 this->result, src_reg_for_float(element_size));
1226 }
1227
1228 src_reg.reladdr = talloc(mem_ctx, ir_to_mesa_src_reg);
1229 memcpy(src_reg.reladdr, &index_reg, sizeof(index_reg));
1230 }
1231
1232 /* If the type is smaller than a vec4, replicate the last channel out. */
1233 src_reg.swizzle = swizzle_for_size(ir->type->vector_elements);
1234
1235 this->result = src_reg;
1236 }
1237
1238 void
1239 ir_to_mesa_visitor::visit(ir_dereference_record *ir)
1240 {
1241 unsigned int i;
1242 const glsl_type *struct_type = ir->record->type;
1243 int offset = 0;
1244
1245 ir->record->accept(this);
1246
1247 for (i = 0; i < struct_type->length; i++) {
1248 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1249 break;
1250 offset += type_size(struct_type->fields.structure[i].type);
1251 }
1252 this->result.index += offset;
1253 }
1254
1255 /**
1256 * We want to be careful in assignment setup to hit the actual storage
1257 * instead of potentially using a temporary like we might with the
1258 * ir_dereference handler.
1259 *
1260 * Thanks to ir_swizzle_swizzle, and ir_vec_index_to_swizzle, we
1261 * should only see potentially one variable array index of a vector,
1262 * and one swizzle, before getting to actual vec4 storage. So handle
1263 * those, then go use ir_dereference to handle the rest.
1264 */
1265 static struct ir_to_mesa_dst_reg
1266 get_assignment_lhs(ir_instruction *ir, ir_to_mesa_visitor *v,
1267 ir_to_mesa_src_reg *r)
1268 {
1269 struct ir_to_mesa_dst_reg dst_reg;
1270 ir_swizzle *swiz;
1271
1272 ir_dereference_array *deref_array = ir->as_dereference_array();
1273 /* This should have been handled by ir_vec_index_to_cond_assign */
1274 if (deref_array) {
1275 assert(!deref_array->array->type->is_vector());
1276 }
1277
1278 /* Use the rvalue deref handler for the most part. We'll ignore
1279 * swizzles in it and write swizzles using writemask, though.
1280 */
1281 ir->accept(v);
1282 dst_reg = ir_to_mesa_dst_reg_from_src(v->result);
1283
1284 if ((swiz = ir->as_swizzle())) {
1285 int swizzles[4] = {
1286 swiz->mask.x,
1287 swiz->mask.y,
1288 swiz->mask.z,
1289 swiz->mask.w
1290 };
1291 int new_r_swizzle[4];
1292 int orig_r_swizzle = r->swizzle;
1293 int i;
1294
1295 for (i = 0; i < 4; i++) {
1296 new_r_swizzle[i] = GET_SWZ(orig_r_swizzle, 0);
1297 }
1298
1299 dst_reg.writemask = 0;
1300 for (i = 0; i < 4; i++) {
1301 if (i < swiz->mask.num_components) {
1302 dst_reg.writemask |= 1 << swizzles[i];
1303 new_r_swizzle[swizzles[i]] = GET_SWZ(orig_r_swizzle, i);
1304 }
1305 }
1306
1307 r->swizzle = MAKE_SWIZZLE4(new_r_swizzle[0],
1308 new_r_swizzle[1],
1309 new_r_swizzle[2],
1310 new_r_swizzle[3]);
1311 }
1312
1313 return dst_reg;
1314 }
1315
1316 void
1317 ir_to_mesa_visitor::visit(ir_assignment *ir)
1318 {
1319 struct ir_to_mesa_dst_reg l;
1320 struct ir_to_mesa_src_reg r;
1321 int i;
1322
1323 assert(!ir->lhs->type->is_array());
1324 assert(ir->lhs->type->base_type != GLSL_TYPE_STRUCT);
1325
1326 ir->rhs->accept(this);
1327 r = this->result;
1328
1329 l = get_assignment_lhs(ir->lhs, this, &r);
1330
1331 assert(l.file != PROGRAM_UNDEFINED);
1332 assert(r.file != PROGRAM_UNDEFINED);
1333
1334 if (ir->condition) {
1335 ir_to_mesa_src_reg condition;
1336
1337 ir->condition->accept(this);
1338 condition = this->result;
1339
1340 /* We use the OPCODE_CMP (a < 0 ? b : c) for conditional moves,
1341 * and the condition we produced is 0.0 or 1.0. By flipping the
1342 * sign, we can choose which value OPCODE_CMP produces without
1343 * an extra computing the condition.
1344 */
1345 condition.negate = ~condition.negate;
1346 for (i = 0; i < type_size(ir->lhs->type); i++) {
1347 ir_to_mesa_emit_op3(ir, OPCODE_CMP, l,
1348 condition, r, ir_to_mesa_src_reg_from_dst(l));
1349 l.index++;
1350 r.index++;
1351 }
1352 } else {
1353 for (i = 0; i < type_size(ir->lhs->type); i++) {
1354 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1355 l.index++;
1356 r.index++;
1357 }
1358 }
1359 }
1360
1361
1362 void
1363 ir_to_mesa_visitor::visit(ir_constant *ir)
1364 {
1365 ir_to_mesa_src_reg src_reg;
1366 GLfloat stack_vals[4];
1367 GLfloat *values = stack_vals;
1368 unsigned int i;
1369
1370 if (ir->type->is_array()) {
1371 ir->print();
1372 printf("\n");
1373 assert(!"FINISHME: array constants");
1374 }
1375
1376 if (ir->type->is_matrix()) {
1377 /* Unfortunately, 4 floats is all we can get into
1378 * _mesa_add_unnamed_constant. So, make a temp to store the
1379 * matrix and move each constant value into it. If we get
1380 * lucky, copy propagation will eliminate the extra moves.
1381 */
1382 ir_to_mesa_src_reg mat = get_temp(glsl_type::vec4_type);
1383 ir_to_mesa_dst_reg mat_column = ir_to_mesa_dst_reg_from_src(mat);
1384
1385 for (i = 0; i < ir->type->matrix_columns; i++) {
1386 src_reg.file = PROGRAM_CONSTANT;
1387
1388 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1389 values = &ir->value.f[i * ir->type->vector_elements];
1390
1391 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1392 values,
1393 ir->type->vector_elements,
1394 &src_reg.swizzle);
1395 src_reg.reladdr = NULL;
1396 src_reg.negate = 0;
1397 ir_to_mesa_emit_op1(ir, OPCODE_MOV, mat_column, src_reg);
1398
1399 mat_column.index++;
1400 }
1401
1402 this->result = mat;
1403 }
1404
1405 src_reg.file = PROGRAM_CONSTANT;
1406 switch (ir->type->base_type) {
1407 case GLSL_TYPE_FLOAT:
1408 values = &ir->value.f[0];
1409 break;
1410 case GLSL_TYPE_UINT:
1411 for (i = 0; i < ir->type->vector_elements; i++) {
1412 values[i] = ir->value.u[i];
1413 }
1414 break;
1415 case GLSL_TYPE_INT:
1416 for (i = 0; i < ir->type->vector_elements; i++) {
1417 values[i] = ir->value.i[i];
1418 }
1419 break;
1420 case GLSL_TYPE_BOOL:
1421 for (i = 0; i < ir->type->vector_elements; i++) {
1422 values[i] = ir->value.b[i];
1423 }
1424 break;
1425 default:
1426 assert(!"Non-float/uint/int/bool constant");
1427 }
1428
1429 src_reg.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1430 values, ir->type->vector_elements,
1431 &src_reg.swizzle);
1432 src_reg.reladdr = NULL;
1433 src_reg.negate = 0;
1434
1435 this->result = src_reg;
1436 }
1437
1438 function_entry *
1439 ir_to_mesa_visitor::get_function_signature(ir_function_signature *sig)
1440 {
1441 function_entry *entry;
1442
1443 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
1444 entry = (function_entry *)iter.get();
1445
1446 if (entry->sig == sig)
1447 return entry;
1448 }
1449
1450 entry = talloc(mem_ctx, function_entry);
1451 entry->sig = sig;
1452 entry->sig_id = this->next_signature_id++;
1453 entry->bgn_inst = NULL;
1454
1455 /* Allocate storage for all the parameters. */
1456 foreach_iter(exec_list_iterator, iter, sig->parameters) {
1457 ir_variable *param = (ir_variable *)iter.get();
1458 variable_storage *storage;
1459
1460 storage = find_variable_storage(param);
1461 assert(!storage);
1462
1463 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
1464 this->next_temp);
1465 this->variables.push_tail(storage);
1466
1467 this->next_temp += type_size(param->type);
1468 break;
1469 }
1470
1471 if (sig->return_type) {
1472 entry->return_reg = get_temp(sig->return_type);
1473 } else {
1474 entry->return_reg = ir_to_mesa_undef;
1475 }
1476
1477 this->function_signatures.push_tail(entry);
1478 return entry;
1479 }
1480
1481 void
1482 ir_to_mesa_visitor::visit(ir_call *ir)
1483 {
1484 ir_to_mesa_instruction *call_inst;
1485 ir_function_signature *sig = ir->get_callee();
1486 function_entry *entry = get_function_signature(sig);
1487 int i;
1488
1489 /* Process in parameters. */
1490 exec_list_iterator sig_iter = sig->parameters.iterator();
1491 foreach_iter(exec_list_iterator, iter, *ir) {
1492 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1493 ir_variable *param = (ir_variable *)sig_iter.get();
1494
1495 if (param->mode == ir_var_in ||
1496 param->mode == ir_var_inout) {
1497 variable_storage *storage = find_variable_storage(param);
1498 assert(storage);
1499
1500 param_rval->accept(this);
1501 ir_to_mesa_src_reg r = this->result;
1502
1503 ir_to_mesa_dst_reg l;
1504 l.file = storage->file;
1505 l.index = storage->index;
1506 l.reladdr = NULL;
1507 l.writemask = WRITEMASK_XYZW;
1508 l.cond_mask = COND_TR;
1509
1510 for (i = 0; i < type_size(param->type); i++) {
1511 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1512 l.index++;
1513 r.index++;
1514 }
1515 }
1516
1517 sig_iter.next();
1518 }
1519 assert(!sig_iter.has_next());
1520
1521 /* Emit call instruction */
1522 call_inst = ir_to_mesa_emit_op1(ir, OPCODE_CAL,
1523 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1524 call_inst->function = entry;
1525
1526 /* Process out parameters. */
1527 sig_iter = sig->parameters.iterator();
1528 foreach_iter(exec_list_iterator, iter, *ir) {
1529 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1530 ir_variable *param = (ir_variable *)sig_iter.get();
1531
1532 if (param->mode == ir_var_out ||
1533 param->mode == ir_var_inout) {
1534 variable_storage *storage = find_variable_storage(param);
1535 assert(storage);
1536
1537 ir_to_mesa_src_reg r;
1538 r.file = storage->file;
1539 r.index = storage->index;
1540 r.reladdr = NULL;
1541 r.swizzle = SWIZZLE_NOOP;
1542 r.negate = 0;
1543
1544 param_rval->accept(this);
1545 ir_to_mesa_dst_reg l = ir_to_mesa_dst_reg_from_src(this->result);
1546
1547 for (i = 0; i < type_size(param->type); i++) {
1548 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1549 l.index++;
1550 r.index++;
1551 }
1552 }
1553
1554 sig_iter.next();
1555 }
1556 assert(!sig_iter.has_next());
1557
1558 /* Process return value. */
1559 this->result = entry->return_reg;
1560 }
1561
1562
1563 void
1564 ir_to_mesa_visitor::visit(ir_texture *ir)
1565 {
1566 ir_to_mesa_src_reg result_src, coord, lod_info, projector;
1567 ir_to_mesa_dst_reg result_dst, coord_dst;
1568 ir_to_mesa_instruction *inst = NULL;
1569 prog_opcode opcode = OPCODE_NOP;
1570
1571 ir->coordinate->accept(this);
1572
1573 /* Put our coords in a temp. We'll need to modify them for shadow,
1574 * projection, or LOD, so the only case we'd use it as is is if
1575 * we're doing plain old texturing. Mesa IR optimization should
1576 * handle cleaning up our mess in that case.
1577 */
1578 coord = get_temp(glsl_type::vec4_type);
1579 coord_dst = ir_to_mesa_dst_reg_from_src(coord);
1580 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst,
1581 this->result);
1582
1583 if (ir->projector) {
1584 ir->projector->accept(this);
1585 projector = this->result;
1586 }
1587
1588 /* Storage for our result. Ideally for an assignment we'd be using
1589 * the actual storage for the result here, instead.
1590 */
1591 result_src = get_temp(glsl_type::vec4_type);
1592 result_dst = ir_to_mesa_dst_reg_from_src(result_src);
1593
1594 switch (ir->op) {
1595 case ir_tex:
1596 opcode = OPCODE_TEX;
1597 break;
1598 case ir_txb:
1599 opcode = OPCODE_TXB;
1600 ir->lod_info.bias->accept(this);
1601 lod_info = this->result;
1602 break;
1603 case ir_txl:
1604 opcode = OPCODE_TXL;
1605 ir->lod_info.lod->accept(this);
1606 lod_info = this->result;
1607 break;
1608 case ir_txd:
1609 case ir_txf:
1610 assert(!"GLSL 1.30 features unsupported");
1611 break;
1612 }
1613
1614 if (ir->projector) {
1615 if (opcode == OPCODE_TEX) {
1616 /* Slot the projector in as the last component of the coord. */
1617 coord_dst.writemask = WRITEMASK_W;
1618 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, projector);
1619 coord_dst.writemask = WRITEMASK_XYZW;
1620 opcode = OPCODE_TXP;
1621 } else {
1622 ir_to_mesa_src_reg coord_w = coord;
1623 coord_w.swizzle = SWIZZLE_WWWW;
1624
1625 /* For the other TEX opcodes there's no projective version
1626 * since the last slot is taken up by lod info. Do the
1627 * projective divide now.
1628 */
1629 coord_dst.writemask = WRITEMASK_W;
1630 ir_to_mesa_emit_op1(ir, OPCODE_RCP, coord_dst, projector);
1631
1632 coord_dst.writemask = WRITEMASK_XYZ;
1633 ir_to_mesa_emit_op2(ir, OPCODE_MUL, coord_dst, coord, coord_w);
1634
1635 coord_dst.writemask = WRITEMASK_XYZW;
1636 coord.swizzle = SWIZZLE_XYZW;
1637 }
1638 }
1639
1640 if (ir->shadow_comparitor) {
1641 /* Slot the shadow value in as the second to last component of the
1642 * coord.
1643 */
1644 ir->shadow_comparitor->accept(this);
1645 coord_dst.writemask = WRITEMASK_Z;
1646 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, this->result);
1647 coord_dst.writemask = WRITEMASK_XYZW;
1648 }
1649
1650 if (opcode == OPCODE_TXL || opcode == OPCODE_TXB) {
1651 /* Mesa IR stores lod or lod bias in the last channel of the coords. */
1652 coord_dst.writemask = WRITEMASK_W;
1653 ir_to_mesa_emit_op1(ir, OPCODE_MOV, coord_dst, lod_info);
1654 coord_dst.writemask = WRITEMASK_XYZW;
1655 }
1656
1657 inst = ir_to_mesa_emit_op1(ir, opcode, result_dst, coord);
1658
1659 if (ir->shadow_comparitor)
1660 inst->tex_shadow = GL_TRUE;
1661
1662 ir_dereference_variable *sampler = ir->sampler->as_dereference_variable();
1663 assert(sampler); /* FINISHME: sampler arrays */
1664 /* generate the mapping, remove when we generate storage at
1665 * declaration time
1666 */
1667 sampler->accept(this);
1668
1669 inst->sampler = get_sampler_number(sampler->var->location);
1670
1671 switch (sampler->type->sampler_dimensionality) {
1672 case GLSL_SAMPLER_DIM_1D:
1673 inst->tex_target = TEXTURE_1D_INDEX;
1674 break;
1675 case GLSL_SAMPLER_DIM_2D:
1676 inst->tex_target = TEXTURE_2D_INDEX;
1677 break;
1678 case GLSL_SAMPLER_DIM_3D:
1679 inst->tex_target = TEXTURE_3D_INDEX;
1680 break;
1681 case GLSL_SAMPLER_DIM_CUBE:
1682 inst->tex_target = TEXTURE_CUBE_INDEX;
1683 break;
1684 default:
1685 assert(!"FINISHME: other texture targets");
1686 }
1687
1688 this->result = result_src;
1689 }
1690
1691 void
1692 ir_to_mesa_visitor::visit(ir_return *ir)
1693 {
1694 assert(current_function);
1695
1696 if (ir->get_value()) {
1697 ir_to_mesa_dst_reg l;
1698 int i;
1699
1700 ir->get_value()->accept(this);
1701 ir_to_mesa_src_reg r = this->result;
1702
1703 l = ir_to_mesa_dst_reg_from_src(current_function->return_reg);
1704
1705 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
1706 ir_to_mesa_emit_op1(ir, OPCODE_MOV, l, r);
1707 l.index++;
1708 r.index++;
1709 }
1710 }
1711
1712 ir_to_mesa_emit_op0(ir, OPCODE_RET);
1713 }
1714
1715 void
1716 ir_to_mesa_visitor::visit(ir_discard *ir)
1717 {
1718 assert(ir->condition == NULL); /* FINISHME */
1719
1720 ir_to_mesa_emit_op0(ir, OPCODE_KIL_NV);
1721 }
1722
1723 void
1724 ir_to_mesa_visitor::visit(ir_if *ir)
1725 {
1726 ir_to_mesa_instruction *cond_inst, *if_inst, *else_inst = NULL;
1727 ir_to_mesa_instruction *prev_inst;
1728
1729 prev_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1730
1731 ir->condition->accept(this);
1732 assert(this->result.file != PROGRAM_UNDEFINED);
1733
1734 if (ctx->Shader.EmitCondCodes) {
1735 cond_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
1736
1737 /* See if we actually generated any instruction for generating
1738 * the condition. If not, then cook up a move to a temp so we
1739 * have something to set cond_update on.
1740 */
1741 if (cond_inst == prev_inst) {
1742 ir_to_mesa_src_reg temp = get_temp(glsl_type::bool_type);
1743 cond_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_MOV,
1744 ir_to_mesa_dst_reg_from_src(temp),
1745 result);
1746 }
1747 cond_inst->cond_update = GL_TRUE;
1748
1749 if_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_IF);
1750 if_inst->dst_reg.cond_mask = COND_NE;
1751 } else {
1752 if_inst = ir_to_mesa_emit_op1(ir->condition,
1753 OPCODE_IF, ir_to_mesa_undef_dst,
1754 this->result);
1755 }
1756
1757 this->instructions.push_tail(if_inst);
1758
1759 visit_exec_list(&ir->then_instructions, this);
1760
1761 if (!ir->else_instructions.is_empty()) {
1762 else_inst = ir_to_mesa_emit_op0(ir->condition, OPCODE_ELSE);
1763 visit_exec_list(&ir->else_instructions, this);
1764 }
1765
1766 if_inst = ir_to_mesa_emit_op1(ir->condition, OPCODE_ENDIF,
1767 ir_to_mesa_undef_dst, ir_to_mesa_undef);
1768 }
1769
1770 ir_to_mesa_visitor::ir_to_mesa_visitor()
1771 {
1772 result.file = PROGRAM_UNDEFINED;
1773 next_temp = 1;
1774 next_signature_id = 1;
1775 sampler_map = NULL;
1776 sampler_map_size = 0;
1777 current_function = NULL;
1778 }
1779
1780 static struct prog_src_register
1781 mesa_src_reg_from_ir_src_reg(ir_to_mesa_src_reg reg)
1782 {
1783 struct prog_src_register mesa_reg;
1784
1785 mesa_reg.File = reg.file;
1786 assert(reg.index < (1 << INST_INDEX_BITS) - 1);
1787 mesa_reg.Index = reg.index;
1788 mesa_reg.Swizzle = reg.swizzle;
1789 mesa_reg.RelAddr = reg.reladdr != NULL;
1790 mesa_reg.Negate = reg.negate;
1791 mesa_reg.Abs = 0;
1792
1793 return mesa_reg;
1794 }
1795
1796 static void
1797 set_branchtargets(ir_to_mesa_visitor *v,
1798 struct prog_instruction *mesa_instructions,
1799 int num_instructions)
1800 {
1801 int if_count = 0, loop_count = 0;
1802 int *if_stack, *loop_stack;
1803 int if_stack_pos = 0, loop_stack_pos = 0;
1804 int i, j;
1805
1806 for (i = 0; i < num_instructions; i++) {
1807 switch (mesa_instructions[i].Opcode) {
1808 case OPCODE_IF:
1809 if_count++;
1810 break;
1811 case OPCODE_BGNLOOP:
1812 loop_count++;
1813 break;
1814 case OPCODE_BRK:
1815 case OPCODE_CONT:
1816 mesa_instructions[i].BranchTarget = -1;
1817 break;
1818 default:
1819 break;
1820 }
1821 }
1822
1823 if_stack = (int *)calloc(if_count, sizeof(*if_stack));
1824 loop_stack = (int *)calloc(loop_count, sizeof(*loop_stack));
1825
1826 for (i = 0; i < num_instructions; i++) {
1827 switch (mesa_instructions[i].Opcode) {
1828 case OPCODE_IF:
1829 if_stack[if_stack_pos] = i;
1830 if_stack_pos++;
1831 break;
1832 case OPCODE_ELSE:
1833 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1834 if_stack[if_stack_pos - 1] = i;
1835 break;
1836 case OPCODE_ENDIF:
1837 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
1838 if_stack_pos--;
1839 break;
1840 case OPCODE_BGNLOOP:
1841 loop_stack[loop_stack_pos] = i;
1842 loop_stack_pos++;
1843 break;
1844 case OPCODE_ENDLOOP:
1845 loop_stack_pos--;
1846 /* Rewrite any breaks/conts at this nesting level (haven't
1847 * already had a BranchTarget assigned) to point to the end
1848 * of the loop.
1849 */
1850 for (j = loop_stack[loop_stack_pos]; j < i; j++) {
1851 if (mesa_instructions[j].Opcode == OPCODE_BRK ||
1852 mesa_instructions[j].Opcode == OPCODE_CONT) {
1853 if (mesa_instructions[j].BranchTarget == -1) {
1854 mesa_instructions[j].BranchTarget = i;
1855 }
1856 }
1857 }
1858 /* The loop ends point at each other. */
1859 mesa_instructions[i].BranchTarget = loop_stack[loop_stack_pos];
1860 mesa_instructions[loop_stack[loop_stack_pos]].BranchTarget = i;
1861 break;
1862 case OPCODE_CAL:
1863 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
1864 function_entry *entry = (function_entry *)iter.get();
1865
1866 if (entry->sig_id == mesa_instructions[i].BranchTarget) {
1867 mesa_instructions[i].BranchTarget = entry->inst;
1868 break;
1869 }
1870 }
1871 break;
1872 default:
1873 break;
1874 }
1875 }
1876
1877 free(if_stack);
1878 }
1879
1880 static void
1881 print_program(struct prog_instruction *mesa_instructions,
1882 ir_instruction **mesa_instruction_annotation,
1883 int num_instructions)
1884 {
1885 ir_instruction *last_ir = NULL;
1886 int i;
1887
1888 for (i = 0; i < num_instructions; i++) {
1889 struct prog_instruction *mesa_inst = mesa_instructions + i;
1890 ir_instruction *ir = mesa_instruction_annotation[i];
1891
1892 if (last_ir != ir && ir) {
1893 ir_print_visitor print;
1894 ir->accept(&print);
1895 printf("\n");
1896 last_ir = ir;
1897 }
1898
1899 _mesa_print_instruction(mesa_inst);
1900 }
1901 }
1902
1903 static void
1904 mark_input(struct gl_program *prog,
1905 int index,
1906 GLboolean reladdr)
1907 {
1908 prog->InputsRead |= BITFIELD64_BIT(index);
1909 int i;
1910
1911 if (reladdr) {
1912 if (index >= FRAG_ATTRIB_TEX0 && index <= FRAG_ATTRIB_TEX7) {
1913 for (i = 0; i < 8; i++) {
1914 prog->InputsRead |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1915 }
1916 } else {
1917 assert(!"FINISHME: Mark InputsRead for varying arrays");
1918 }
1919 }
1920 }
1921
1922 static void
1923 mark_output(struct gl_program *prog,
1924 int index,
1925 GLboolean reladdr)
1926 {
1927 prog->OutputsWritten |= BITFIELD64_BIT(index);
1928 int i;
1929
1930 if (reladdr) {
1931 if (index >= VERT_RESULT_TEX0 && index <= VERT_RESULT_TEX7) {
1932 for (i = 0; i < 8; i++) {
1933 prog->OutputsWritten |= BITFIELD64_BIT(FRAG_ATTRIB_TEX0 + i);
1934 }
1935 } else {
1936 assert(!"FINISHME: Mark OutputsWritten for varying arrays");
1937 }
1938 }
1939 }
1940
1941 static void
1942 count_resources(struct gl_program *prog)
1943 {
1944 unsigned int i;
1945
1946 prog->InputsRead = 0;
1947 prog->OutputsWritten = 0;
1948 prog->SamplersUsed = 0;
1949
1950 for (i = 0; i < prog->NumInstructions; i++) {
1951 struct prog_instruction *inst = &prog->Instructions[i];
1952 unsigned int reg;
1953
1954 switch (inst->DstReg.File) {
1955 case PROGRAM_OUTPUT:
1956 mark_output(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1957 break;
1958 case PROGRAM_INPUT:
1959 mark_input(prog, inst->DstReg.Index, inst->DstReg.RelAddr);
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 for (reg = 0; reg < _mesa_num_inst_src_regs(inst->Opcode); reg++) {
1966 switch (inst->SrcReg[reg].File) {
1967 case PROGRAM_OUTPUT:
1968 mark_output(prog, inst->SrcReg[reg].Index,
1969 inst->SrcReg[reg].RelAddr);
1970 break;
1971 case PROGRAM_INPUT:
1972 mark_input(prog, inst->SrcReg[reg].Index, inst->SrcReg[reg].RelAddr);
1973 break;
1974 default:
1975 break;
1976 }
1977 }
1978
1979 /* Instead of just using the uniform's value to map to a
1980 * sampler, Mesa first allocates a separate number for the
1981 * sampler (_mesa_add_sampler), then we reindex it down to a
1982 * small integer (sampler_map[], SamplersUsed), then that gets
1983 * mapped to the uniform's value, and we get an actual sampler.
1984 */
1985 if (_mesa_is_tex_instruction(inst->Opcode)) {
1986 prog->SamplerTargets[inst->TexSrcUnit] =
1987 (gl_texture_index)inst->TexSrcTarget;
1988 prog->SamplersUsed |= 1 << inst->TexSrcUnit;
1989 if (inst->TexShadow) {
1990 prog->ShadowSamplers |= 1 << inst->TexSrcUnit;
1991 }
1992 }
1993 }
1994
1995 _mesa_update_shader_textures_used(prog);
1996 }
1997
1998 /* Each stage has some uniforms in its Parameters list. The Uniforms
1999 * list for the linked shader program has a pointer to these uniforms
2000 * in each of the stage's Parameters list, so that their values can be
2001 * updated when a uniform is set.
2002 */
2003 static void
2004 link_uniforms_to_shared_uniform_list(struct gl_uniform_list *uniforms,
2005 struct gl_program *prog)
2006 {
2007 unsigned int i;
2008
2009 for (i = 0; i < prog->Parameters->NumParameters; i++) {
2010 const struct gl_program_parameter *p = prog->Parameters->Parameters + i;
2011
2012 if (p->Type == PROGRAM_UNIFORM || p->Type == PROGRAM_SAMPLER) {
2013 struct gl_uniform *uniform =
2014 _mesa_append_uniform(uniforms, p->Name, prog->Target, i);
2015 if (uniform)
2016 uniform->Initialized = p->Initialized;
2017 }
2018 }
2019 }
2020
2021 struct gl_program *
2022 get_mesa_program(GLcontext *ctx, struct gl_shader_program *shader_program,
2023 struct gl_shader *shader)
2024 {
2025 void *mem_ctx = shader_program;
2026 ir_to_mesa_visitor v;
2027 struct prog_instruction *mesa_instructions, *mesa_inst;
2028 ir_instruction **mesa_instruction_annotation;
2029 int i;
2030 struct gl_program *prog;
2031 GLenum target;
2032 GLboolean progress;
2033
2034 switch (shader->Type) {
2035 case GL_VERTEX_SHADER: target = GL_VERTEX_PROGRAM_ARB; break;
2036 case GL_FRAGMENT_SHADER: target = GL_FRAGMENT_PROGRAM_ARB; break;
2037 default: assert(!"should not be reached"); break;
2038 }
2039
2040 validate_ir_tree(shader->ir);
2041
2042 prog = ctx->Driver.NewProgram(ctx, target, 1);
2043 if (!prog)
2044 return NULL;
2045 prog->Parameters = _mesa_new_parameter_list();
2046 prog->Varying = _mesa_new_parameter_list();
2047 prog->Attributes = _mesa_new_parameter_list();
2048 v.ctx = ctx;
2049 v.prog = prog;
2050
2051 v.mem_ctx = talloc_new(NULL);
2052
2053 /* Emit Mesa IR for main(). */
2054 visit_exec_list(shader->ir, &v);
2055 v.ir_to_mesa_emit_op0(NULL, OPCODE_END);
2056
2057 /* Now emit bodies for any functions that were used. */
2058 do {
2059 progress = GL_FALSE;
2060
2061 foreach_iter(exec_list_iterator, iter, v.function_signatures) {
2062 function_entry *entry = (function_entry *)iter.get();
2063
2064 if (!entry->bgn_inst) {
2065 v.current_function = entry;
2066
2067 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_BGNSUB);
2068 entry->bgn_inst->function = entry;
2069
2070 visit_exec_list(&entry->sig->body, &v);
2071
2072 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_RET);
2073 entry->bgn_inst = v.ir_to_mesa_emit_op0(NULL, OPCODE_ENDSUB);
2074 progress = GL_TRUE;
2075 }
2076 }
2077 } while (progress);
2078
2079 prog->NumTemporaries = v.next_temp;
2080
2081 int num_instructions = 0;
2082 foreach_iter(exec_list_iterator, iter, v.instructions) {
2083 num_instructions++;
2084 }
2085
2086 mesa_instructions =
2087 (struct prog_instruction *)calloc(num_instructions,
2088 sizeof(*mesa_instructions));
2089 mesa_instruction_annotation = talloc_array(mem_ctx, ir_instruction *,
2090 num_instructions);
2091
2092 mesa_inst = mesa_instructions;
2093 i = 0;
2094 foreach_iter(exec_list_iterator, iter, v.instructions) {
2095 ir_to_mesa_instruction *inst = (ir_to_mesa_instruction *)iter.get();
2096
2097 mesa_inst->Opcode = inst->op;
2098 mesa_inst->CondUpdate = inst->cond_update;
2099 mesa_inst->DstReg.File = inst->dst_reg.file;
2100 mesa_inst->DstReg.Index = inst->dst_reg.index;
2101 mesa_inst->DstReg.CondMask = inst->dst_reg.cond_mask;
2102 mesa_inst->DstReg.WriteMask = inst->dst_reg.writemask;
2103 mesa_inst->DstReg.RelAddr = inst->dst_reg.reladdr != NULL;
2104 mesa_inst->SrcReg[0] = mesa_src_reg_from_ir_src_reg(inst->src_reg[0]);
2105 mesa_inst->SrcReg[1] = mesa_src_reg_from_ir_src_reg(inst->src_reg[1]);
2106 mesa_inst->SrcReg[2] = mesa_src_reg_from_ir_src_reg(inst->src_reg[2]);
2107 mesa_inst->TexSrcUnit = inst->sampler;
2108 mesa_inst->TexSrcTarget = inst->tex_target;
2109 mesa_inst->TexShadow = inst->tex_shadow;
2110 mesa_instruction_annotation[i] = inst->ir;
2111
2112 if (ctx->Shader.EmitNoIfs && mesa_inst->Opcode == OPCODE_IF) {
2113 shader_program->InfoLog =
2114 talloc_asprintf_append(shader_program->InfoLog,
2115 "Couldn't flatten if statement\n");
2116 shader_program->LinkStatus = false;
2117 }
2118
2119 if (mesa_inst->Opcode == OPCODE_BGNSUB)
2120 inst->function->inst = i;
2121 else if (mesa_inst->Opcode == OPCODE_CAL)
2122 mesa_inst->BranchTarget = inst->function->sig_id; /* rewritten later */
2123
2124 mesa_inst++;
2125 i++;
2126 }
2127
2128 set_branchtargets(&v, mesa_instructions, num_instructions);
2129 if (0) {
2130 print_program(mesa_instructions, mesa_instruction_annotation,
2131 num_instructions);
2132 }
2133
2134 prog->Instructions = mesa_instructions;
2135 prog->NumInstructions = num_instructions;
2136
2137 _mesa_reference_program(ctx, &shader->Program, prog);
2138
2139 if ((ctx->Shader.Flags & GLSL_NO_OPT) == 0) {
2140 _mesa_optimize_program(ctx, prog);
2141 }
2142
2143 return prog;
2144 }
2145
2146 extern "C" {
2147
2148 static void
2149 steal_memory(ir_instruction *ir, void *new_ctx)
2150 {
2151 talloc_steal(new_ctx, ir);
2152 }
2153
2154 void
2155 _mesa_glsl_compile_shader(GLcontext *ctx, struct gl_shader *shader)
2156 {
2157 struct _mesa_glsl_parse_state *state;
2158
2159 state = talloc_zero(shader, struct _mesa_glsl_parse_state);
2160 switch (shader->Type) {
2161 case GL_VERTEX_SHADER: state->target = vertex_shader; break;
2162 case GL_FRAGMENT_SHADER: state->target = fragment_shader; break;
2163 case GL_GEOMETRY_SHADER: state->target = geometry_shader; break;
2164 }
2165
2166 state->scanner = NULL;
2167 state->translation_unit.make_empty();
2168 state->symbols = new(shader) glsl_symbol_table;
2169 state->info_log = talloc_strdup(shader, "");
2170 state->error = false;
2171 state->loop_or_switch_nesting = NULL;
2172 state->ARB_texture_rectangle_enable = true;
2173
2174 state->extensions = &ctx->Extensions;
2175
2176 state->Const.MaxLights = ctx->Const.MaxLights;
2177 state->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
2178 state->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
2179 state->Const.MaxTextureCoords = ctx->Const.MaxTextureCoordUnits;
2180 state->Const.MaxVertexAttribs = ctx->Const.VertexProgram.MaxAttribs;
2181 state->Const.MaxVertexUniformComponents = ctx->Const.VertexProgram.MaxUniformComponents;
2182 state->Const.MaxVaryingFloats = ctx->Const.MaxVarying * 4;
2183 state->Const.MaxVertexTextureImageUnits = ctx->Const.MaxVertexTextureImageUnits;
2184 state->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxCombinedTextureImageUnits;
2185 state->Const.MaxTextureImageUnits = ctx->Const.MaxTextureImageUnits;
2186 state->Const.MaxFragmentUniformComponents = ctx->Const.FragmentProgram.MaxUniformComponents;
2187
2188 state->Const.MaxDrawBuffers = ctx->Const.MaxDrawBuffers;
2189
2190 const char *source = shader->Source;
2191 state->error = preprocess(state, &source, &state->info_log,
2192 &ctx->Extensions);
2193
2194 if (!state->error) {
2195 _mesa_glsl_lexer_ctor(state, source);
2196 _mesa_glsl_parse(state);
2197 _mesa_glsl_lexer_dtor(state);
2198 }
2199
2200 shader->ir = new(shader) exec_list;
2201 if (!state->error && !state->translation_unit.is_empty())
2202 _mesa_ast_to_hir(shader->ir, state);
2203
2204 if (!state->error && !shader->ir->is_empty()) {
2205 validate_ir_tree(shader->ir);
2206
2207 /* Lowering */
2208 do_mat_op_to_vec(shader->ir);
2209 do_mod_to_fract(shader->ir);
2210 do_div_to_mul_rcp(shader->ir);
2211
2212 /* Optimization passes */
2213 bool progress;
2214 do {
2215 progress = false;
2216
2217 progress = do_function_inlining(shader->ir) || progress;
2218 progress = do_if_simplification(shader->ir) || progress;
2219 progress = do_copy_propagation(shader->ir) || progress;
2220 progress = do_dead_code_local(shader->ir) || progress;
2221 progress = do_dead_code_unlinked(state, shader->ir) || progress;
2222 progress = do_constant_variable_unlinked(shader->ir) || progress;
2223 progress = do_constant_folding(shader->ir) || progress;
2224 progress = do_if_return(shader->ir) || progress;
2225 if (ctx->Shader.EmitNoIfs)
2226 progress = do_if_to_cond_assign(shader->ir) || progress;
2227
2228 progress = do_vec_index_to_swizzle(shader->ir) || progress;
2229 /* Do this one after the previous to let the easier pass handle
2230 * constant vector indexing.
2231 */
2232 progress = do_vec_index_to_cond_assign(shader->ir) || progress;
2233
2234 progress = do_swizzle_swizzle(shader->ir) || progress;
2235 } while (progress);
2236
2237 validate_ir_tree(shader->ir);
2238 }
2239
2240 shader->symbols = state->symbols;
2241
2242 shader->CompileStatus = !state->error;
2243 shader->InfoLog = state->info_log;
2244 shader->Version = state->language_version;
2245
2246 /* Retain any live IR, but trash the rest. */
2247 foreach_list(node, shader->ir) {
2248 visit_tree((ir_instruction *) node, steal_memory, shader);
2249 }
2250
2251 talloc_free(state);
2252 }
2253
2254 void
2255 _mesa_glsl_link_shader(GLcontext *ctx, struct gl_shader_program *prog)
2256 {
2257 unsigned int i;
2258
2259 _mesa_clear_shader_program_data(ctx, prog);
2260
2261 prog->LinkStatus = GL_TRUE;
2262
2263 for (i = 0; i < prog->NumShaders; i++) {
2264 if (!prog->Shaders[i]->CompileStatus) {
2265 prog->InfoLog =
2266 talloc_asprintf_append(prog->InfoLog,
2267 "linking with uncompiled shader");
2268 prog->LinkStatus = GL_FALSE;
2269 }
2270 }
2271
2272 prog->Varying = _mesa_new_parameter_list();
2273 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
2274 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
2275
2276 if (prog->LinkStatus) {
2277 link_shaders(prog);
2278
2279 /* We don't use the linker's uniforms list, and cook up our own at
2280 * generate time.
2281 */
2282 free(prog->Uniforms);
2283 prog->Uniforms = _mesa_new_uniform_list();
2284 }
2285
2286 if (prog->LinkStatus) {
2287 for (i = 0; i < prog->_NumLinkedShaders; i++) {
2288 struct gl_program *linked_prog;
2289
2290 linked_prog = get_mesa_program(ctx, prog,
2291 prog->_LinkedShaders[i]);
2292 count_resources(linked_prog);
2293
2294 link_uniforms_to_shared_uniform_list(prog->Uniforms, linked_prog);
2295
2296 switch (prog->_LinkedShaders[i]->Type) {
2297 case GL_VERTEX_SHADER:
2298 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
2299 (struct gl_vertex_program *)linked_prog);
2300 ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
2301 linked_prog);
2302 break;
2303 case GL_FRAGMENT_SHADER:
2304 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
2305 (struct gl_fragment_program *)linked_prog);
2306 ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
2307 linked_prog);
2308 break;
2309 }
2310 }
2311 }
2312 }
2313
2314 } /* extern "C" */