ir_to_mesa: Rename ir_to_mesa_emit_*_opX methods to emit_*.
[mesa.git] / src / mesa / program / ir_to_mesa.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 /**
27 * \file ir_to_mesa.cpp
28 *
29 * Translate GLSL IR to Mesa's gl_program representation.
30 */
31
32 #include <stdio.h>
33 #include "main/compiler.h"
34 #include "ir.h"
35 #include "ir_visitor.h"
36 #include "ir_print_visitor.h"
37 #include "ir_expression_flattening.h"
38 #include "glsl_types.h"
39 #include "glsl_parser_extras.h"
40 #include "../glsl/program.h"
41 #include "ir_optimization.h"
42 #include "ast.h"
43
44 extern "C" {
45 #include "main/mtypes.h"
46 #include "main/shaderapi.h"
47 #include "main/shaderobj.h"
48 #include "main/uniforms.h"
49 #include "program/hash_table.h"
50 #include "program/prog_instruction.h"
51 #include "program/prog_optimize.h"
52 #include "program/prog_print.h"
53 #include "program/program.h"
54 #include "program/prog_uniform.h"
55 #include "program/prog_parameter.h"
56 #include "program/sampler.h"
57 }
58
59 class src_reg;
60 class dst_reg;
61
62 static int swizzle_for_size(int size);
63
64 /**
65 * This struct is a corresponding struct to Mesa prog_src_register, with
66 * wider fields.
67 */
68 class src_reg {
69 public:
70 src_reg(int file, int index, const glsl_type *type)
71 {
72 this->file = (gl_register_file) file;
73 this->index = index;
74 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
75 this->swizzle = swizzle_for_size(type->vector_elements);
76 else
77 this->swizzle = SWIZZLE_XYZW;
78 this->negate = 0;
79 this->reladdr = NULL;
80 }
81
82 src_reg()
83 {
84 this->file = PROGRAM_UNDEFINED;
85 this->index = 0;
86 this->swizzle = 0;
87 this->negate = 0;
88 this->reladdr = NULL;
89 }
90
91 explicit src_reg(dst_reg reg);
92
93 gl_register_file file; /**< PROGRAM_* from Mesa */
94 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
95 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */
96 int negate; /**< NEGATE_XYZW mask from mesa */
97 /** Register index should be offset by the integer in this reg. */
98 src_reg *reladdr;
99 };
100
101 class dst_reg {
102 public:
103 dst_reg(int file, int writemask)
104 {
105 this->file = file;
106 this->index = 0;
107 this->writemask = writemask;
108 this->cond_mask = COND_TR;
109 this->reladdr = NULL;
110 }
111
112 dst_reg()
113 {
114 this->file = PROGRAM_UNDEFINED;
115 this->index = 0;
116 this->writemask = 0;
117 this->cond_mask = COND_TR;
118 this->reladdr = NULL;
119 }
120
121 explicit dst_reg(src_reg reg);
122
123 int file; /**< PROGRAM_* from Mesa */
124 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */
125 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
126 GLuint cond_mask:4;
127 /** Register index should be offset by the integer in this reg. */
128 src_reg *reladdr;
129 };
130
131 src_reg::src_reg(dst_reg reg)
132 {
133 this->file = (gl_register_file) reg.file;
134 this->index = reg.index;
135 this->swizzle = SWIZZLE_XYZW;
136 this->negate = 0;
137 this->reladdr = NULL;
138 }
139
140 dst_reg::dst_reg(src_reg reg)
141 {
142 this->file = reg.file;
143 this->index = reg.index;
144 this->writemask = WRITEMASK_XYZW;
145 this->cond_mask = COND_TR;
146 this->reladdr = reg.reladdr;
147 }
148
149 extern src_reg ir_to_mesa_undef;
150
151 class ir_to_mesa_instruction : public exec_node {
152 public:
153 /* Callers of this ralloc-based new need not call delete. It's
154 * easier to just ralloc_free 'ctx' (or any of its ancestors). */
155 static void* operator new(size_t size, void *ctx)
156 {
157 void *node;
158
159 node = rzalloc_size(ctx, size);
160 assert(node != NULL);
161
162 return node;
163 }
164
165 enum prog_opcode op;
166 dst_reg dst;
167 src_reg src[3];
168 /** Pointer to the ir source this tree came from for debugging */
169 ir_instruction *ir;
170 GLboolean cond_update;
171 bool saturate;
172 int sampler; /**< sampler index */
173 int tex_target; /**< One of TEXTURE_*_INDEX */
174 GLboolean tex_shadow;
175
176 class function_entry *function; /* Set on OPCODE_CAL or OPCODE_BGNSUB */
177 };
178
179 class variable_storage : public exec_node {
180 public:
181 variable_storage(ir_variable *var, gl_register_file file, int index)
182 : file(file), index(index), var(var)
183 {
184 /* empty */
185 }
186
187 gl_register_file file;
188 int index;
189 ir_variable *var; /* variable that maps to this, if any */
190 };
191
192 class function_entry : public exec_node {
193 public:
194 ir_function_signature *sig;
195
196 /**
197 * identifier of this function signature used by the program.
198 *
199 * At the point that Mesa instructions for function calls are
200 * generated, we don't know the address of the first instruction of
201 * the function body. So we make the BranchTarget that is called a
202 * small integer and rewrite them during set_branchtargets().
203 */
204 int sig_id;
205
206 /**
207 * Pointer to first instruction of the function body.
208 *
209 * Set during function body emits after main() is processed.
210 */
211 ir_to_mesa_instruction *bgn_inst;
212
213 /**
214 * Index of the first instruction of the function body in actual
215 * Mesa IR.
216 *
217 * Set after convertion from ir_to_mesa_instruction to prog_instruction.
218 */
219 int inst;
220
221 /** Storage for the return value. */
222 src_reg return_reg;
223 };
224
225 class ir_to_mesa_visitor : public ir_visitor {
226 public:
227 ir_to_mesa_visitor();
228 ~ir_to_mesa_visitor();
229
230 function_entry *current_function;
231
232 struct gl_context *ctx;
233 struct gl_program *prog;
234 struct gl_shader_program *shader_program;
235 struct gl_shader_compiler_options *options;
236
237 int next_temp;
238
239 variable_storage *find_variable_storage(ir_variable *var);
240
241 function_entry *get_function_signature(ir_function_signature *sig);
242
243 src_reg get_temp(const glsl_type *type);
244 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
245
246 src_reg src_reg_for_float(float val);
247
248 /**
249 * \name Visit methods
250 *
251 * As typical for the visitor pattern, there must be one \c visit method for
252 * each concrete subclass of \c ir_instruction. Virtual base classes within
253 * the hierarchy should not have \c visit methods.
254 */
255 /*@{*/
256 virtual void visit(ir_variable *);
257 virtual void visit(ir_loop *);
258 virtual void visit(ir_loop_jump *);
259 virtual void visit(ir_function_signature *);
260 virtual void visit(ir_function *);
261 virtual void visit(ir_expression *);
262 virtual void visit(ir_swizzle *);
263 virtual void visit(ir_dereference_variable *);
264 virtual void visit(ir_dereference_array *);
265 virtual void visit(ir_dereference_record *);
266 virtual void visit(ir_assignment *);
267 virtual void visit(ir_constant *);
268 virtual void visit(ir_call *);
269 virtual void visit(ir_return *);
270 virtual void visit(ir_discard *);
271 virtual void visit(ir_texture *);
272 virtual void visit(ir_if *);
273 /*@}*/
274
275 src_reg result;
276
277 /** List of variable_storage */
278 exec_list variables;
279
280 /** List of function_entry */
281 exec_list function_signatures;
282 int next_signature_id;
283
284 /** List of ir_to_mesa_instruction */
285 exec_list instructions;
286
287 ir_to_mesa_instruction *emit(ir_instruction *ir, enum prog_opcode op);
288
289 ir_to_mesa_instruction *emit(ir_instruction *ir, enum prog_opcode op,
290 dst_reg dst, src_reg src0);
291
292 ir_to_mesa_instruction *emit(ir_instruction *ir, enum prog_opcode op,
293 dst_reg dst, src_reg src0, src_reg src1);
294
295 ir_to_mesa_instruction *emit(ir_instruction *ir, enum prog_opcode op,
296 dst_reg dst,
297 src_reg src0, src_reg src1, src_reg src2);
298
299 /**
300 * Emit the correct dot-product instruction for the type of arguments
301 */
302 void emit_dp(ir_instruction *ir,
303 dst_reg dst,
304 src_reg src0,
305 src_reg src1,
306 unsigned elements);
307
308 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
309 dst_reg dst, src_reg src0);
310
311 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
312 dst_reg dst, src_reg src0, src_reg src1);
313
314 void emit_scs(ir_instruction *ir, enum prog_opcode op,
315 dst_reg dst, const src_reg &src);
316
317 GLboolean try_emit_mad(ir_expression *ir,
318 int mul_operand);
319 GLboolean try_emit_sat(ir_expression *ir);
320
321 void emit_swz(ir_expression *ir);
322
323 bool process_move_condition(ir_rvalue *ir);
324
325 void copy_propagate(void);
326
327 void *mem_ctx;
328 };
329
330 src_reg ir_to_mesa_undef = src_reg(PROGRAM_UNDEFINED, 0, NULL);
331
332 dst_reg ir_to_mesa_undef_dst = dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP);
333
334 dst_reg ir_to_mesa_address_reg = dst_reg(PROGRAM_ADDRESS, WRITEMASK_X);
335
336 static void
337 fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3);
338
339 static void
340 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
341 {
342 va_list args;
343 va_start(args, fmt);
344 ralloc_vasprintf_append(&prog->InfoLog, fmt, args);
345 va_end(args);
346
347 prog->LinkStatus = GL_FALSE;
348 }
349
350 static int
351 swizzle_for_size(int size)
352 {
353 int size_swizzles[4] = {
354 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
355 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
356 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
357 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
358 };
359
360 assert((size >= 1) && (size <= 4));
361 return size_swizzles[size - 1];
362 }
363
364 ir_to_mesa_instruction *
365 ir_to_mesa_visitor::emit(ir_instruction *ir, enum prog_opcode op,
366 dst_reg dst,
367 src_reg src0, src_reg src1, src_reg src2)
368 {
369 ir_to_mesa_instruction *inst = new(mem_ctx) ir_to_mesa_instruction();
370 int num_reladdr = 0;
371
372 /* If we have to do relative addressing, we want to load the ARL
373 * reg directly for one of the regs, and preload the other reladdr
374 * sources into temps.
375 */
376 num_reladdr += dst.reladdr != NULL;
377 num_reladdr += src0.reladdr != NULL;
378 num_reladdr += src1.reladdr != NULL;
379 num_reladdr += src2.reladdr != NULL;
380
381 reladdr_to_temp(ir, &src2, &num_reladdr);
382 reladdr_to_temp(ir, &src1, &num_reladdr);
383 reladdr_to_temp(ir, &src0, &num_reladdr);
384
385 if (dst.reladdr) {
386 emit(ir, OPCODE_ARL, ir_to_mesa_address_reg, *dst.reladdr);
387 num_reladdr--;
388 }
389 assert(num_reladdr == 0);
390
391 inst->op = op;
392 inst->dst = dst;
393 inst->src[0] = src0;
394 inst->src[1] = src1;
395 inst->src[2] = src2;
396 inst->ir = ir;
397
398 inst->function = NULL;
399
400 this->instructions.push_tail(inst);
401
402 return inst;
403 }
404
405
406 ir_to_mesa_instruction *
407 ir_to_mesa_visitor::emit(ir_instruction *ir, enum prog_opcode op,
408 dst_reg dst, src_reg src0, src_reg src1)
409 {
410 return emit(ir, op, dst, src0, src1, ir_to_mesa_undef);
411 }
412
413 ir_to_mesa_instruction *
414 ir_to_mesa_visitor::emit(ir_instruction *ir, enum prog_opcode op,
415 dst_reg dst, src_reg src0)
416 {
417 assert(dst.writemask != 0);
418 return emit(ir, op, dst, src0, ir_to_mesa_undef, ir_to_mesa_undef);
419 }
420
421 ir_to_mesa_instruction *
422 ir_to_mesa_visitor::emit(ir_instruction *ir, enum prog_opcode op)
423 {
424 return emit(ir, op, ir_to_mesa_undef_dst,
425 ir_to_mesa_undef, ir_to_mesa_undef, ir_to_mesa_undef);
426 }
427
428 void
429 ir_to_mesa_visitor::emit_dp(ir_instruction *ir,
430 dst_reg dst, src_reg src0, src_reg src1,
431 unsigned elements)
432 {
433 static const gl_inst_opcode dot_opcodes[] = {
434 OPCODE_DP2, OPCODE_DP3, OPCODE_DP4
435 };
436
437 emit(ir, dot_opcodes[elements - 2], dst, src0, src1, ir_to_mesa_undef);
438 }
439
440 /**
441 * Emits Mesa scalar opcodes to produce unique answers across channels.
442 *
443 * Some Mesa opcodes are scalar-only, like ARB_fp/vp. The src X
444 * channel determines the result across all channels. So to do a vec4
445 * of this operation, we want to emit a scalar per source channel used
446 * to produce dest channels.
447 */
448 void
449 ir_to_mesa_visitor::emit_scalar(ir_instruction *ir, enum prog_opcode op,
450 dst_reg dst,
451 src_reg orig_src0, src_reg orig_src1)
452 {
453 int i, j;
454 int done_mask = ~dst.writemask;
455
456 /* Mesa RCP is a scalar operation splatting results to all channels,
457 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
458 * dst channels.
459 */
460 for (i = 0; i < 4; i++) {
461 GLuint this_mask = (1 << i);
462 ir_to_mesa_instruction *inst;
463 src_reg src0 = orig_src0;
464 src_reg src1 = orig_src1;
465
466 if (done_mask & this_mask)
467 continue;
468
469 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
470 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
471 for (j = i + 1; j < 4; j++) {
472 /* If there is another enabled component in the destination that is
473 * derived from the same inputs, generate its value on this pass as
474 * well.
475 */
476 if (!(done_mask & (1 << j)) &&
477 GET_SWZ(src0.swizzle, j) == src0_swiz &&
478 GET_SWZ(src1.swizzle, j) == src1_swiz) {
479 this_mask |= (1 << j);
480 }
481 }
482 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
483 src0_swiz, src0_swiz);
484 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
485 src1_swiz, src1_swiz);
486
487 inst = emit(ir, op, dst, src0, src1);
488 inst->dst.writemask = this_mask;
489 done_mask |= this_mask;
490 }
491 }
492
493 void
494 ir_to_mesa_visitor::emit_scalar(ir_instruction *ir, enum prog_opcode op,
495 dst_reg dst, src_reg src0)
496 {
497 src_reg undef = ir_to_mesa_undef;
498
499 undef.swizzle = SWIZZLE_XXXX;
500
501 emit_scalar(ir, op, dst, src0, undef);
502 }
503
504 /**
505 * Emit an OPCODE_SCS instruction
506 *
507 * The \c SCS opcode functions a bit differently than the other Mesa (or
508 * ARB_fragment_program) opcodes. Instead of splatting its result across all
509 * four components of the destination, it writes one value to the \c x
510 * component and another value to the \c y component.
511 *
512 * \param ir IR instruction being processed
513 * \param op Either \c OPCODE_SIN or \c OPCODE_COS depending on which
514 * value is desired.
515 * \param dst Destination register
516 * \param src Source register
517 */
518 void
519 ir_to_mesa_visitor::emit_scs(ir_instruction *ir, enum prog_opcode op,
520 dst_reg dst,
521 const src_reg &src)
522 {
523 /* Vertex programs cannot use the SCS opcode.
524 */
525 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) {
526 emit_scalar(ir, op, dst, src);
527 return;
528 }
529
530 const unsigned component = (op == OPCODE_SIN) ? 0 : 1;
531 const unsigned scs_mask = (1U << component);
532 int done_mask = ~dst.writemask;
533 src_reg tmp;
534
535 assert(op == OPCODE_SIN || op == OPCODE_COS);
536
537 /* If there are compnents in the destination that differ from the component
538 * that will be written by the SCS instrution, we'll need a temporary.
539 */
540 if (scs_mask != unsigned(dst.writemask)) {
541 tmp = get_temp(glsl_type::vec4_type);
542 }
543
544 for (unsigned i = 0; i < 4; i++) {
545 unsigned this_mask = (1U << i);
546 src_reg src0 = src;
547
548 if ((done_mask & this_mask) != 0)
549 continue;
550
551 /* The source swizzle specified which component of the source generates
552 * sine / cosine for the current component in the destination. The SCS
553 * instruction requires that this value be swizzle to the X component.
554 * Replace the current swizzle with a swizzle that puts the source in
555 * the X component.
556 */
557 unsigned src0_swiz = GET_SWZ(src.swizzle, i);
558
559 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
560 src0_swiz, src0_swiz);
561 for (unsigned j = i + 1; j < 4; j++) {
562 /* If there is another enabled component in the destination that is
563 * derived from the same inputs, generate its value on this pass as
564 * well.
565 */
566 if (!(done_mask & (1 << j)) &&
567 GET_SWZ(src0.swizzle, j) == src0_swiz) {
568 this_mask |= (1 << j);
569 }
570 }
571
572 if (this_mask != scs_mask) {
573 ir_to_mesa_instruction *inst;
574 dst_reg tmp_dst = dst_reg(tmp);
575
576 /* Emit the SCS instruction.
577 */
578 inst = emit(ir, OPCODE_SCS, tmp_dst, src0);
579 inst->dst.writemask = scs_mask;
580
581 /* Move the result of the SCS instruction to the desired location in
582 * the destination.
583 */
584 tmp.swizzle = MAKE_SWIZZLE4(component, component,
585 component, component);
586 inst = emit(ir, OPCODE_SCS, dst, tmp);
587 inst->dst.writemask = this_mask;
588 } else {
589 /* Emit the SCS instruction to write directly to the destination.
590 */
591 ir_to_mesa_instruction *inst = emit(ir, OPCODE_SCS, dst, src0);
592 inst->dst.writemask = scs_mask;
593 }
594
595 done_mask |= this_mask;
596 }
597 }
598
599 struct src_reg
600 ir_to_mesa_visitor::src_reg_for_float(float val)
601 {
602 src_reg src(PROGRAM_CONSTANT, -1, NULL);
603
604 src.index = _mesa_add_unnamed_constant(this->prog->Parameters,
605 &val, 1, &src.swizzle);
606
607 return src;
608 }
609
610 static int
611 type_size(const struct glsl_type *type)
612 {
613 unsigned int i;
614 int size;
615
616 switch (type->base_type) {
617 case GLSL_TYPE_UINT:
618 case GLSL_TYPE_INT:
619 case GLSL_TYPE_FLOAT:
620 case GLSL_TYPE_BOOL:
621 if (type->is_matrix()) {
622 return type->matrix_columns;
623 } else {
624 /* Regardless of size of vector, it gets a vec4. This is bad
625 * packing for things like floats, but otherwise arrays become a
626 * mess. Hopefully a later pass over the code can pack scalars
627 * down if appropriate.
628 */
629 return 1;
630 }
631 case GLSL_TYPE_ARRAY:
632 assert(type->length > 0);
633 return type_size(type->fields.array) * type->length;
634 case GLSL_TYPE_STRUCT:
635 size = 0;
636 for (i = 0; i < type->length; i++) {
637 size += type_size(type->fields.structure[i].type);
638 }
639 return size;
640 case GLSL_TYPE_SAMPLER:
641 /* Samplers take up one slot in UNIFORMS[], but they're baked in
642 * at link time.
643 */
644 return 1;
645 default:
646 assert(0);
647 return 0;
648 }
649 }
650
651 /**
652 * In the initial pass of codegen, we assign temporary numbers to
653 * intermediate results. (not SSA -- variable assignments will reuse
654 * storage). Actual register allocation for the Mesa VM occurs in a
655 * pass over the Mesa IR later.
656 */
657 src_reg
658 ir_to_mesa_visitor::get_temp(const glsl_type *type)
659 {
660 src_reg src;
661 int swizzle[4];
662 int i;
663
664 src.file = PROGRAM_TEMPORARY;
665 src.index = next_temp;
666 src.reladdr = NULL;
667 next_temp += type_size(type);
668
669 if (type->is_array() || type->is_record()) {
670 src.swizzle = SWIZZLE_NOOP;
671 } else {
672 for (i = 0; i < type->vector_elements; i++)
673 swizzle[i] = i;
674 for (; i < 4; i++)
675 swizzle[i] = type->vector_elements - 1;
676 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1],
677 swizzle[2], swizzle[3]);
678 }
679 src.negate = 0;
680
681 return src;
682 }
683
684 variable_storage *
685 ir_to_mesa_visitor::find_variable_storage(ir_variable *var)
686 {
687
688 variable_storage *entry;
689
690 foreach_iter(exec_list_iterator, iter, this->variables) {
691 entry = (variable_storage *)iter.get();
692
693 if (entry->var == var)
694 return entry;
695 }
696
697 return NULL;
698 }
699
700 void
701 ir_to_mesa_visitor::visit(ir_variable *ir)
702 {
703 if (strcmp(ir->name, "gl_FragCoord") == 0) {
704 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
705
706 fp->OriginUpperLeft = ir->origin_upper_left;
707 fp->PixelCenterInteger = ir->pixel_center_integer;
708
709 } else if (strcmp(ir->name, "gl_FragDepth") == 0) {
710 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
711 switch (ir->depth_layout) {
712 case ir_depth_layout_none:
713 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
714 break;
715 case ir_depth_layout_any:
716 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
717 break;
718 case ir_depth_layout_greater:
719 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
720 break;
721 case ir_depth_layout_less:
722 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
723 break;
724 case ir_depth_layout_unchanged:
725 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
726 break;
727 default:
728 assert(0);
729 break;
730 }
731 }
732
733 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
734 unsigned int i;
735 const ir_state_slot *const slots = ir->state_slots;
736 assert(ir->state_slots != NULL);
737
738 /* Check if this statevar's setup in the STATE file exactly
739 * matches how we'll want to reference it as a
740 * struct/array/whatever. If not, then we need to move it into
741 * temporary storage and hope that it'll get copy-propagated
742 * out.
743 */
744 for (i = 0; i < ir->num_state_slots; i++) {
745 if (slots[i].swizzle != SWIZZLE_XYZW) {
746 break;
747 }
748 }
749
750 struct variable_storage *storage;
751 dst_reg dst;
752 if (i == ir->num_state_slots) {
753 /* We'll set the index later. */
754 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
755 this->variables.push_tail(storage);
756
757 dst = ir_to_mesa_undef_dst;
758 } else {
759 /* The variable_storage constructor allocates slots based on the size
760 * of the type. However, this had better match the number of state
761 * elements that we're going to copy into the new temporary.
762 */
763 assert(ir->num_state_slots == type_size(ir->type));
764
765 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY,
766 this->next_temp);
767 this->variables.push_tail(storage);
768 this->next_temp += type_size(ir->type);
769
770 dst = dst_reg(src_reg(PROGRAM_TEMPORARY, storage->index, NULL));
771 }
772
773
774 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
775 int index = _mesa_add_state_reference(this->prog->Parameters,
776 (gl_state_index *)slots[i].tokens);
777
778 if (storage->file == PROGRAM_STATE_VAR) {
779 if (storage->index == -1) {
780 storage->index = index;
781 } else {
782 assert(index == storage->index + (int)i);
783 }
784 } else {
785 src_reg src(PROGRAM_STATE_VAR, index, NULL);
786 src.swizzle = slots[i].swizzle;
787 emit(ir, OPCODE_MOV, dst, src);
788 /* even a float takes up a whole vec4 reg in a struct/array. */
789 dst.index++;
790 }
791 }
792
793 if (storage->file == PROGRAM_TEMPORARY &&
794 dst.index != storage->index + ir->num_state_slots) {
795 fail_link(this->shader_program,
796 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
797 ir->name, dst.index - storage->index,
798 type_size(ir->type));
799 }
800 }
801 }
802
803 void
804 ir_to_mesa_visitor::visit(ir_loop *ir)
805 {
806 ir_dereference_variable *counter = NULL;
807
808 if (ir->counter != NULL)
809 counter = new(ir) ir_dereference_variable(ir->counter);
810
811 if (ir->from != NULL) {
812 assert(ir->counter != NULL);
813
814 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL);
815
816 a->accept(this);
817 delete a;
818 }
819
820 emit(NULL, OPCODE_BGNLOOP);
821
822 if (ir->to) {
823 ir_expression *e =
824 new(ir) ir_expression(ir->cmp, glsl_type::bool_type,
825 counter, ir->to);
826 ir_if *if_stmt = new(ir) ir_if(e);
827
828 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
829
830 if_stmt->then_instructions.push_tail(brk);
831
832 if_stmt->accept(this);
833
834 delete if_stmt;
835 delete e;
836 delete brk;
837 }
838
839 visit_exec_list(&ir->body_instructions, this);
840
841 if (ir->increment) {
842 ir_expression *e =
843 new(ir) ir_expression(ir_binop_add, counter->type,
844 counter, ir->increment);
845
846 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL);
847
848 a->accept(this);
849 delete a;
850 delete e;
851 }
852
853 emit(NULL, OPCODE_ENDLOOP);
854 }
855
856 void
857 ir_to_mesa_visitor::visit(ir_loop_jump *ir)
858 {
859 switch (ir->mode) {
860 case ir_loop_jump::jump_break:
861 emit(NULL, OPCODE_BRK);
862 break;
863 case ir_loop_jump::jump_continue:
864 emit(NULL, OPCODE_CONT);
865 break;
866 }
867 }
868
869
870 void
871 ir_to_mesa_visitor::visit(ir_function_signature *ir)
872 {
873 assert(0);
874 (void)ir;
875 }
876
877 void
878 ir_to_mesa_visitor::visit(ir_function *ir)
879 {
880 /* Ignore function bodies other than main() -- we shouldn't see calls to
881 * them since they should all be inlined before we get to ir_to_mesa.
882 */
883 if (strcmp(ir->name, "main") == 0) {
884 const ir_function_signature *sig;
885 exec_list empty;
886
887 sig = ir->matching_signature(&empty);
888
889 assert(sig);
890
891 foreach_iter(exec_list_iterator, iter, sig->body) {
892 ir_instruction *ir = (ir_instruction *)iter.get();
893
894 ir->accept(this);
895 }
896 }
897 }
898
899 GLboolean
900 ir_to_mesa_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
901 {
902 int nonmul_operand = 1 - mul_operand;
903 src_reg a, b, c;
904
905 ir_expression *expr = ir->operands[mul_operand]->as_expression();
906 if (!expr || expr->operation != ir_binop_mul)
907 return false;
908
909 expr->operands[0]->accept(this);
910 a = this->result;
911 expr->operands[1]->accept(this);
912 b = this->result;
913 ir->operands[nonmul_operand]->accept(this);
914 c = this->result;
915
916 this->result = get_temp(ir->type);
917 emit(ir, OPCODE_MAD, dst_reg(this->result), a, b, c);
918
919 return true;
920 }
921
922 GLboolean
923 ir_to_mesa_visitor::try_emit_sat(ir_expression *ir)
924 {
925 /* Saturates were only introduced to vertex programs in
926 * NV_vertex_program3, so don't give them to drivers in the VP.
927 */
928 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB)
929 return false;
930
931 ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
932 if (!sat_src)
933 return false;
934
935 sat_src->accept(this);
936 src_reg src = this->result;
937
938 this->result = get_temp(ir->type);
939 ir_to_mesa_instruction *inst;
940 inst = emit(ir, OPCODE_MOV, dst_reg(this->result), src);
941 inst->saturate = true;
942
943 return true;
944 }
945
946 void
947 ir_to_mesa_visitor::reladdr_to_temp(ir_instruction *ir,
948 src_reg *reg, int *num_reladdr)
949 {
950 if (!reg->reladdr)
951 return;
952
953 emit(ir, OPCODE_ARL, ir_to_mesa_address_reg, *reg->reladdr);
954
955 if (*num_reladdr != 1) {
956 src_reg temp = get_temp(glsl_type::vec4_type);
957
958 emit(ir, OPCODE_MOV, dst_reg(temp), *reg);
959 *reg = temp;
960 }
961
962 (*num_reladdr)--;
963 }
964
965 void
966 ir_to_mesa_visitor::emit_swz(ir_expression *ir)
967 {
968 /* Assume that the vector operator is in a form compatible with OPCODE_SWZ.
969 * This means that each of the operands is either an immediate value of -1,
970 * 0, or 1, or is a component from one source register (possibly with
971 * negation).
972 */
973 uint8_t components[4] = { 0 };
974 bool negate[4] = { false };
975 ir_variable *var = NULL;
976
977 for (unsigned i = 0; i < ir->type->vector_elements; i++) {
978 ir_rvalue *op = ir->operands[i];
979
980 assert(op->type->is_scalar());
981
982 while (op != NULL) {
983 switch (op->ir_type) {
984 case ir_type_constant: {
985
986 assert(op->type->is_scalar());
987
988 const ir_constant *const c = op->as_constant();
989 if (c->is_one()) {
990 components[i] = SWIZZLE_ONE;
991 } else if (c->is_zero()) {
992 components[i] = SWIZZLE_ZERO;
993 } else if (c->is_negative_one()) {
994 components[i] = SWIZZLE_ONE;
995 negate[i] = true;
996 } else {
997 assert(!"SWZ constant must be 0.0 or 1.0.");
998 }
999
1000 op = NULL;
1001 break;
1002 }
1003
1004 case ir_type_dereference_variable: {
1005 ir_dereference_variable *const deref =
1006 (ir_dereference_variable *) op;
1007
1008 assert((var == NULL) || (deref->var == var));
1009 components[i] = SWIZZLE_X;
1010 var = deref->var;
1011 op = NULL;
1012 break;
1013 }
1014
1015 case ir_type_expression: {
1016 ir_expression *const expr = (ir_expression *) op;
1017
1018 assert(expr->operation == ir_unop_neg);
1019 negate[i] = true;
1020
1021 op = expr->operands[0];
1022 break;
1023 }
1024
1025 case ir_type_swizzle: {
1026 ir_swizzle *const swiz = (ir_swizzle *) op;
1027
1028 components[i] = swiz->mask.x;
1029 op = swiz->val;
1030 break;
1031 }
1032
1033 default:
1034 assert(!"Should not get here.");
1035 return;
1036 }
1037 }
1038 }
1039
1040 assert(var != NULL);
1041
1042 ir_dereference_variable *const deref =
1043 new(mem_ctx) ir_dereference_variable(var);
1044
1045 this->result.file = PROGRAM_UNDEFINED;
1046 deref->accept(this);
1047 if (this->result.file == PROGRAM_UNDEFINED) {
1048 ir_print_visitor v;
1049 printf("Failed to get tree for expression operand:\n");
1050 deref->accept(&v);
1051 exit(1);
1052 }
1053
1054 src_reg src;
1055
1056 src = this->result;
1057 src.swizzle = MAKE_SWIZZLE4(components[0],
1058 components[1],
1059 components[2],
1060 components[3]);
1061 src.negate = ((unsigned(negate[0]) << 0)
1062 | (unsigned(negate[1]) << 1)
1063 | (unsigned(negate[2]) << 2)
1064 | (unsigned(negate[3]) << 3));
1065
1066 /* Storage for our result. Ideally for an assignment we'd be using the
1067 * actual storage for the result here, instead.
1068 */
1069 const src_reg result_src = get_temp(ir->type);
1070 dst_reg result_dst = dst_reg(result_src);
1071
1072 /* Limit writes to the channels that will be used by result_src later.
1073 * This does limit this temp's use as a temporary for multi-instruction
1074 * sequences.
1075 */
1076 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1077
1078 emit(ir, OPCODE_SWZ, result_dst, src);
1079 this->result = result_src;
1080 }
1081
1082 void
1083 ir_to_mesa_visitor::visit(ir_expression *ir)
1084 {
1085 unsigned int operand;
1086 src_reg op[Elements(ir->operands)];
1087 src_reg result_src;
1088 dst_reg result_dst;
1089
1090 /* Quick peephole: Emit OPCODE_MAD(a, b, c) instead of ADD(MUL(a, b), c)
1091 */
1092 if (ir->operation == ir_binop_add) {
1093 if (try_emit_mad(ir, 1))
1094 return;
1095 if (try_emit_mad(ir, 0))
1096 return;
1097 }
1098 if (try_emit_sat(ir))
1099 return;
1100
1101 if (ir->operation == ir_quadop_vector) {
1102 this->emit_swz(ir);
1103 return;
1104 }
1105
1106 for (operand = 0; operand < ir->get_num_operands(); operand++) {
1107 this->result.file = PROGRAM_UNDEFINED;
1108 ir->operands[operand]->accept(this);
1109 if (this->result.file == PROGRAM_UNDEFINED) {
1110 ir_print_visitor v;
1111 printf("Failed to get tree for expression operand:\n");
1112 ir->operands[operand]->accept(&v);
1113 exit(1);
1114 }
1115 op[operand] = this->result;
1116
1117 /* Matrix expression operands should have been broken down to vector
1118 * operations already.
1119 */
1120 assert(!ir->operands[operand]->type->is_matrix());
1121 }
1122
1123 int vector_elements = ir->operands[0]->type->vector_elements;
1124 if (ir->operands[1]) {
1125 vector_elements = MAX2(vector_elements,
1126 ir->operands[1]->type->vector_elements);
1127 }
1128
1129 this->result.file = PROGRAM_UNDEFINED;
1130
1131 /* Storage for our result. Ideally for an assignment we'd be using
1132 * the actual storage for the result here, instead.
1133 */
1134 result_src = get_temp(ir->type);
1135 /* convenience for the emit functions below. */
1136 result_dst = dst_reg(result_src);
1137 /* Limit writes to the channels that will be used by result_src later.
1138 * This does limit this temp's use as a temporary for multi-instruction
1139 * sequences.
1140 */
1141 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1142
1143 switch (ir->operation) {
1144 case ir_unop_logic_not:
1145 emit(ir, OPCODE_SEQ, result_dst, op[0], src_reg_for_float(0.0));
1146 break;
1147 case ir_unop_neg:
1148 op[0].negate = ~op[0].negate;
1149 result_src = op[0];
1150 break;
1151 case ir_unop_abs:
1152 emit(ir, OPCODE_ABS, result_dst, op[0]);
1153 break;
1154 case ir_unop_sign:
1155 emit(ir, OPCODE_SSG, result_dst, op[0]);
1156 break;
1157 case ir_unop_rcp:
1158 emit_scalar(ir, OPCODE_RCP, result_dst, op[0]);
1159 break;
1160
1161 case ir_unop_exp2:
1162 emit_scalar(ir, OPCODE_EX2, result_dst, op[0]);
1163 break;
1164 case ir_unop_exp:
1165 case ir_unop_log:
1166 assert(!"not reached: should be handled by ir_explog_to_explog2");
1167 break;
1168 case ir_unop_log2:
1169 emit_scalar(ir, OPCODE_LG2, result_dst, op[0]);
1170 break;
1171 case ir_unop_sin:
1172 emit_scalar(ir, OPCODE_SIN, result_dst, op[0]);
1173 break;
1174 case ir_unop_cos:
1175 emit_scalar(ir, OPCODE_COS, result_dst, op[0]);
1176 break;
1177 case ir_unop_sin_reduced:
1178 emit_scs(ir, OPCODE_SIN, result_dst, op[0]);
1179 break;
1180 case ir_unop_cos_reduced:
1181 emit_scs(ir, OPCODE_COS, result_dst, op[0]);
1182 break;
1183
1184 case ir_unop_dFdx:
1185 emit(ir, OPCODE_DDX, result_dst, op[0]);
1186 break;
1187 case ir_unop_dFdy:
1188 emit(ir, OPCODE_DDY, result_dst, op[0]);
1189 break;
1190
1191 case ir_unop_noise: {
1192 const enum prog_opcode opcode =
1193 prog_opcode(OPCODE_NOISE1
1194 + (ir->operands[0]->type->vector_elements) - 1);
1195 assert((opcode >= OPCODE_NOISE1) && (opcode <= OPCODE_NOISE4));
1196
1197 emit(ir, opcode, result_dst, op[0]);
1198 break;
1199 }
1200
1201 case ir_binop_add:
1202 emit(ir, OPCODE_ADD, result_dst, op[0], op[1]);
1203 break;
1204 case ir_binop_sub:
1205 emit(ir, OPCODE_SUB, result_dst, op[0], op[1]);
1206 break;
1207
1208 case ir_binop_mul:
1209 emit(ir, OPCODE_MUL, result_dst, op[0], op[1]);
1210 break;
1211 case ir_binop_div:
1212 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1213 case ir_binop_mod:
1214 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1215 break;
1216
1217 case ir_binop_less:
1218 emit(ir, OPCODE_SLT, result_dst, op[0], op[1]);
1219 break;
1220 case ir_binop_greater:
1221 emit(ir, OPCODE_SGT, result_dst, op[0], op[1]);
1222 break;
1223 case ir_binop_lequal:
1224 emit(ir, OPCODE_SLE, result_dst, op[0], op[1]);
1225 break;
1226 case ir_binop_gequal:
1227 emit(ir, OPCODE_SGE, result_dst, op[0], op[1]);
1228 break;
1229 case ir_binop_equal:
1230 emit(ir, OPCODE_SEQ, result_dst, op[0], op[1]);
1231 break;
1232 case ir_binop_nequal:
1233 emit(ir, OPCODE_SNE, result_dst, op[0], op[1]);
1234 break;
1235 case ir_binop_all_equal:
1236 /* "==" operator producing a scalar boolean. */
1237 if (ir->operands[0]->type->is_vector() ||
1238 ir->operands[1]->type->is_vector()) {
1239 src_reg temp = get_temp(glsl_type::vec4_type);
1240 emit(ir, OPCODE_SNE, dst_reg(temp), op[0], op[1]);
1241 emit_dp(ir, result_dst, temp, temp, vector_elements);
1242 emit(ir, OPCODE_SEQ, result_dst, result_src, src_reg_for_float(0.0));
1243 } else {
1244 emit(ir, OPCODE_SEQ, result_dst, op[0], op[1]);
1245 }
1246 break;
1247 case ir_binop_any_nequal:
1248 /* "!=" operator producing a scalar boolean. */
1249 if (ir->operands[0]->type->is_vector() ||
1250 ir->operands[1]->type->is_vector()) {
1251 src_reg temp = get_temp(glsl_type::vec4_type);
1252 emit(ir, OPCODE_SNE, dst_reg(temp), op[0], op[1]);
1253 emit_dp(ir, result_dst, temp, temp, vector_elements);
1254 emit(ir, OPCODE_SNE, result_dst, result_src, src_reg_for_float(0.0));
1255 } else {
1256 emit(ir, OPCODE_SNE, result_dst, op[0], op[1]);
1257 }
1258 break;
1259
1260 case ir_unop_any:
1261 assert(ir->operands[0]->type->is_vector());
1262 emit_dp(ir, result_dst, op[0], op[0],
1263 ir->operands[0]->type->vector_elements);
1264 emit(ir, OPCODE_SNE, result_dst, result_src, src_reg_for_float(0.0));
1265 break;
1266
1267 case ir_binop_logic_xor:
1268 emit(ir, OPCODE_SNE, result_dst, op[0], op[1]);
1269 break;
1270
1271 case ir_binop_logic_or:
1272 /* This could be a saturated add and skip the SNE. */
1273 emit(ir, OPCODE_ADD, result_dst, op[0], op[1]);
1274 emit(ir, OPCODE_SNE, result_dst, result_src, src_reg_for_float(0.0));
1275 break;
1276
1277 case ir_binop_logic_and:
1278 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */
1279 emit(ir, OPCODE_MUL, result_dst, op[0], op[1]);
1280 break;
1281
1282 case ir_binop_dot:
1283 assert(ir->operands[0]->type->is_vector());
1284 assert(ir->operands[0]->type == ir->operands[1]->type);
1285 emit_dp(ir, result_dst, op[0], op[1],
1286 ir->operands[0]->type->vector_elements);
1287 break;
1288
1289 case ir_unop_sqrt:
1290 /* sqrt(x) = x * rsq(x). */
1291 emit_scalar(ir, OPCODE_RSQ, result_dst, op[0]);
1292 emit(ir, OPCODE_MUL, result_dst, result_src, op[0]);
1293 /* For incoming channels <= 0, set the result to 0. */
1294 op[0].negate = ~op[0].negate;
1295 emit(ir, OPCODE_CMP, result_dst,
1296 op[0], result_src, src_reg_for_float(0.0));
1297 break;
1298 case ir_unop_rsq:
1299 emit_scalar(ir, OPCODE_RSQ, result_dst, op[0]);
1300 break;
1301 case ir_unop_i2f:
1302 case ir_unop_b2f:
1303 case ir_unop_b2i:
1304 /* Mesa IR lacks types, ints are stored as truncated floats. */
1305 result_src = op[0];
1306 break;
1307 case ir_unop_f2i:
1308 emit(ir, OPCODE_TRUNC, result_dst, op[0]);
1309 break;
1310 case ir_unop_f2b:
1311 case ir_unop_i2b:
1312 emit(ir, OPCODE_SNE, result_dst,
1313 op[0], src_reg_for_float(0.0));
1314 break;
1315 case ir_unop_trunc:
1316 emit(ir, OPCODE_TRUNC, result_dst, op[0]);
1317 break;
1318 case ir_unop_ceil:
1319 op[0].negate = ~op[0].negate;
1320 emit(ir, OPCODE_FLR, result_dst, op[0]);
1321 result_src.negate = ~result_src.negate;
1322 break;
1323 case ir_unop_floor:
1324 emit(ir, OPCODE_FLR, result_dst, op[0]);
1325 break;
1326 case ir_unop_fract:
1327 emit(ir, OPCODE_FRC, result_dst, op[0]);
1328 break;
1329
1330 case ir_binop_min:
1331 emit(ir, OPCODE_MIN, result_dst, op[0], op[1]);
1332 break;
1333 case ir_binop_max:
1334 emit(ir, OPCODE_MAX, result_dst, op[0], op[1]);
1335 break;
1336 case ir_binop_pow:
1337 emit_scalar(ir, OPCODE_POW, result_dst, op[0], op[1]);
1338 break;
1339
1340 case ir_unop_bit_not:
1341 case ir_unop_u2f:
1342 case ir_binop_lshift:
1343 case ir_binop_rshift:
1344 case ir_binop_bit_and:
1345 case ir_binop_bit_xor:
1346 case ir_binop_bit_or:
1347 case ir_unop_round_even:
1348 assert(!"GLSL 1.30 features unsupported");
1349 break;
1350
1351 case ir_quadop_vector:
1352 /* This operation should have already been handled.
1353 */
1354 assert(!"Should not get here.");
1355 break;
1356 }
1357
1358 this->result = result_src;
1359 }
1360
1361
1362 void
1363 ir_to_mesa_visitor::visit(ir_swizzle *ir)
1364 {
1365 src_reg src;
1366 int i;
1367 int swizzle[4];
1368
1369 /* Note that this is only swizzles in expressions, not those on the left
1370 * hand side of an assignment, which do write masking. See ir_assignment
1371 * for that.
1372 */
1373
1374 ir->val->accept(this);
1375 src = this->result;
1376 assert(src.file != PROGRAM_UNDEFINED);
1377
1378 for (i = 0; i < 4; i++) {
1379 if (i < ir->type->vector_elements) {
1380 switch (i) {
1381 case 0:
1382 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
1383 break;
1384 case 1:
1385 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
1386 break;
1387 case 2:
1388 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
1389 break;
1390 case 3:
1391 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
1392 break;
1393 }
1394 } else {
1395 /* If the type is smaller than a vec4, replicate the last
1396 * channel out.
1397 */
1398 swizzle[i] = swizzle[ir->type->vector_elements - 1];
1399 }
1400 }
1401
1402 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1403
1404 this->result = src;
1405 }
1406
1407 void
1408 ir_to_mesa_visitor::visit(ir_dereference_variable *ir)
1409 {
1410 variable_storage *entry = find_variable_storage(ir->var);
1411 ir_variable *var = ir->var;
1412
1413 if (!entry) {
1414 switch (var->mode) {
1415 case ir_var_uniform:
1416 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
1417 var->location);
1418 this->variables.push_tail(entry);
1419 break;
1420 case ir_var_in:
1421 case ir_var_inout:
1422 /* The linker assigns locations for varyings and attributes,
1423 * including deprecated builtins (like gl_Color), user-assign
1424 * generic attributes (glBindVertexLocation), and
1425 * user-defined varyings.
1426 *
1427 * FINISHME: We would hit this path for function arguments. Fix!
1428 */
1429 assert(var->location != -1);
1430 entry = new(mem_ctx) variable_storage(var,
1431 PROGRAM_INPUT,
1432 var->location);
1433 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
1434 var->location >= VERT_ATTRIB_GENERIC0) {
1435 _mesa_add_attribute(this->prog->Attributes,
1436 var->name,
1437 _mesa_sizeof_glsl_type(var->type->gl_type),
1438 var->type->gl_type,
1439 var->location - VERT_ATTRIB_GENERIC0);
1440 }
1441 break;
1442 case ir_var_out:
1443 assert(var->location != -1);
1444 entry = new(mem_ctx) variable_storage(var,
1445 PROGRAM_OUTPUT,
1446 var->location);
1447 break;
1448 case ir_var_system_value:
1449 entry = new(mem_ctx) variable_storage(var,
1450 PROGRAM_SYSTEM_VALUE,
1451 var->location);
1452 break;
1453 case ir_var_auto:
1454 case ir_var_temporary:
1455 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY,
1456 this->next_temp);
1457 this->variables.push_tail(entry);
1458
1459 next_temp += type_size(var->type);
1460 break;
1461 }
1462
1463 if (!entry) {
1464 printf("Failed to make storage for %s\n", var->name);
1465 exit(1);
1466 }
1467 }
1468
1469 this->result = src_reg(entry->file, entry->index, var->type);
1470 }
1471
1472 void
1473 ir_to_mesa_visitor::visit(ir_dereference_array *ir)
1474 {
1475 ir_constant *index;
1476 src_reg src;
1477 int element_size = type_size(ir->type);
1478
1479 index = ir->array_index->constant_expression_value();
1480
1481 ir->array->accept(this);
1482 src = this->result;
1483
1484 if (index) {
1485 src.index += index->value.i[0] * element_size;
1486 } else {
1487 src_reg array_base = this->result;
1488 /* Variable index array dereference. It eats the "vec4" of the
1489 * base of the array and an index that offsets the Mesa register
1490 * index.
1491 */
1492 ir->array_index->accept(this);
1493
1494 src_reg index_reg;
1495
1496 if (element_size == 1) {
1497 index_reg = this->result;
1498 } else {
1499 index_reg = get_temp(glsl_type::float_type);
1500
1501 emit(ir, OPCODE_MUL, dst_reg(index_reg),
1502 this->result, src_reg_for_float(element_size));
1503 }
1504
1505 src.reladdr = ralloc(mem_ctx, src_reg);
1506 memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1507 }
1508
1509 /* If the type is smaller than a vec4, replicate the last channel out. */
1510 if (ir->type->is_scalar() || ir->type->is_vector())
1511 src.swizzle = swizzle_for_size(ir->type->vector_elements);
1512 else
1513 src.swizzle = SWIZZLE_NOOP;
1514
1515 this->result = src;
1516 }
1517
1518 void
1519 ir_to_mesa_visitor::visit(ir_dereference_record *ir)
1520 {
1521 unsigned int i;
1522 const glsl_type *struct_type = ir->record->type;
1523 int offset = 0;
1524
1525 ir->record->accept(this);
1526
1527 for (i = 0; i < struct_type->length; i++) {
1528 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1529 break;
1530 offset += type_size(struct_type->fields.structure[i].type);
1531 }
1532
1533 /* If the type is smaller than a vec4, replicate the last channel out. */
1534 if (ir->type->is_scalar() || ir->type->is_vector())
1535 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1536 else
1537 this->result.swizzle = SWIZZLE_NOOP;
1538
1539 this->result.index += offset;
1540 }
1541
1542 /**
1543 * We want to be careful in assignment setup to hit the actual storage
1544 * instead of potentially using a temporary like we might with the
1545 * ir_dereference handler.
1546 */
1547 static dst_reg
1548 get_assignment_lhs(ir_dereference *ir, ir_to_mesa_visitor *v)
1549 {
1550 /* The LHS must be a dereference. If the LHS is a variable indexed array
1551 * access of a vector, it must be separated into a series conditional moves
1552 * before reaching this point (see ir_vec_index_to_cond_assign).
1553 */
1554 assert(ir->as_dereference());
1555 ir_dereference_array *deref_array = ir->as_dereference_array();
1556 if (deref_array) {
1557 assert(!deref_array->array->type->is_vector());
1558 }
1559
1560 /* Use the rvalue deref handler for the most part. We'll ignore
1561 * swizzles in it and write swizzles using writemask, though.
1562 */
1563 ir->accept(v);
1564 return dst_reg(v->result);
1565 }
1566
1567 /**
1568 * Process the condition of a conditional assignment
1569 *
1570 * Examines the condition of a conditional assignment to generate the optimal
1571 * first operand of a \c CMP instruction. If the condition is a relational
1572 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
1573 * used as the source for the \c CMP instruction. Otherwise the comparison
1574 * is processed to a boolean result, and the boolean result is used as the
1575 * operand to the CMP instruction.
1576 */
1577 bool
1578 ir_to_mesa_visitor::process_move_condition(ir_rvalue *ir)
1579 {
1580 ir_rvalue *src_ir = ir;
1581 bool negate = true;
1582 bool switch_order = false;
1583
1584 ir_expression *const expr = ir->as_expression();
1585 if ((expr != NULL) && (expr->get_num_operands() == 2)) {
1586 bool zero_on_left = false;
1587
1588 if (expr->operands[0]->is_zero()) {
1589 src_ir = expr->operands[1];
1590 zero_on_left = true;
1591 } else if (expr->operands[1]->is_zero()) {
1592 src_ir = expr->operands[0];
1593 zero_on_left = false;
1594 }
1595
1596 /* a is - 0 + - 0 +
1597 * (a < 0) T F F ( a < 0) T F F
1598 * (0 < a) F F T (-a < 0) F F T
1599 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands)
1600 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands)
1601 * (a > 0) F F T (-a < 0) F F T
1602 * (0 > a) T F F ( a < 0) T F F
1603 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
1604 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
1605 *
1606 * Note that exchanging the order of 0 and 'a' in the comparison simply
1607 * means that the value of 'a' should be negated.
1608 */
1609 if (src_ir != ir) {
1610 switch (expr->operation) {
1611 case ir_binop_less:
1612 switch_order = false;
1613 negate = zero_on_left;
1614 break;
1615
1616 case ir_binop_greater:
1617 switch_order = false;
1618 negate = !zero_on_left;
1619 break;
1620
1621 case ir_binop_lequal:
1622 switch_order = true;
1623 negate = !zero_on_left;
1624 break;
1625
1626 case ir_binop_gequal:
1627 switch_order = true;
1628 negate = zero_on_left;
1629 break;
1630
1631 default:
1632 /* This isn't the right kind of comparison afterall, so make sure
1633 * the whole condition is visited.
1634 */
1635 src_ir = ir;
1636 break;
1637 }
1638 }
1639 }
1640
1641 src_ir->accept(this);
1642
1643 /* We use the OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
1644 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
1645 * choose which value OPCODE_CMP produces without an extra instruction
1646 * computing the condition.
1647 */
1648 if (negate)
1649 this->result.negate = ~this->result.negate;
1650
1651 return switch_order;
1652 }
1653
1654 void
1655 ir_to_mesa_visitor::visit(ir_assignment *ir)
1656 {
1657 dst_reg l;
1658 src_reg r;
1659 int i;
1660
1661 ir->rhs->accept(this);
1662 r = this->result;
1663
1664 l = get_assignment_lhs(ir->lhs, this);
1665
1666 /* FINISHME: This should really set to the correct maximal writemask for each
1667 * FINISHME: component written (in the loops below). This case can only
1668 * FINISHME: occur for matrices, arrays, and structures.
1669 */
1670 if (ir->write_mask == 0) {
1671 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
1672 l.writemask = WRITEMASK_XYZW;
1673 } else if (ir->lhs->type->is_scalar()) {
1674 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the
1675 * FINISHME: W component of fragment shader output zero, work correctly.
1676 */
1677 l.writemask = WRITEMASK_XYZW;
1678 } else {
1679 int swizzles[4];
1680 int first_enabled_chan = 0;
1681 int rhs_chan = 0;
1682
1683 assert(ir->lhs->type->is_vector());
1684 l.writemask = ir->write_mask;
1685
1686 for (int i = 0; i < 4; i++) {
1687 if (l.writemask & (1 << i)) {
1688 first_enabled_chan = GET_SWZ(r.swizzle, i);
1689 break;
1690 }
1691 }
1692
1693 /* Swizzle a small RHS vector into the channels being written.
1694 *
1695 * glsl ir treats write_mask as dictating how many channels are
1696 * present on the RHS while Mesa IR treats write_mask as just
1697 * showing which channels of the vec4 RHS get written.
1698 */
1699 for (int i = 0; i < 4; i++) {
1700 if (l.writemask & (1 << i))
1701 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
1702 else
1703 swizzles[i] = first_enabled_chan;
1704 }
1705 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
1706 swizzles[2], swizzles[3]);
1707 }
1708
1709 assert(l.file != PROGRAM_UNDEFINED);
1710 assert(r.file != PROGRAM_UNDEFINED);
1711
1712 if (ir->condition) {
1713 const bool switch_order = this->process_move_condition(ir->condition);
1714 src_reg condition = this->result;
1715
1716 for (i = 0; i < type_size(ir->lhs->type); i++) {
1717 if (switch_order) {
1718 emit(ir, OPCODE_CMP, l, condition, src_reg(l), r);
1719 } else {
1720 emit(ir, OPCODE_CMP, l, condition, r, src_reg(l));
1721 }
1722
1723 l.index++;
1724 r.index++;
1725 }
1726 } else {
1727 for (i = 0; i < type_size(ir->lhs->type); i++) {
1728 emit(ir, OPCODE_MOV, l, r);
1729 l.index++;
1730 r.index++;
1731 }
1732 }
1733 }
1734
1735
1736 void
1737 ir_to_mesa_visitor::visit(ir_constant *ir)
1738 {
1739 src_reg src;
1740 GLfloat stack_vals[4] = { 0 };
1741 GLfloat *values = stack_vals;
1742 unsigned int i;
1743
1744 /* Unfortunately, 4 floats is all we can get into
1745 * _mesa_add_unnamed_constant. So, make a temp to store an
1746 * aggregate constant and move each constant value into it. If we
1747 * get lucky, copy propagation will eliminate the extra moves.
1748 */
1749
1750 if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1751 src_reg temp_base = get_temp(ir->type);
1752 dst_reg temp = dst_reg(temp_base);
1753
1754 foreach_iter(exec_list_iterator, iter, ir->components) {
1755 ir_constant *field_value = (ir_constant *)iter.get();
1756 int size = type_size(field_value->type);
1757
1758 assert(size > 0);
1759
1760 field_value->accept(this);
1761 src = this->result;
1762
1763 for (i = 0; i < (unsigned int)size; i++) {
1764 emit(ir, OPCODE_MOV, temp, src);
1765
1766 src.index++;
1767 temp.index++;
1768 }
1769 }
1770 this->result = temp_base;
1771 return;
1772 }
1773
1774 if (ir->type->is_array()) {
1775 src_reg temp_base = get_temp(ir->type);
1776 dst_reg temp = dst_reg(temp_base);
1777 int size = type_size(ir->type->fields.array);
1778
1779 assert(size > 0);
1780
1781 for (i = 0; i < ir->type->length; i++) {
1782 ir->array_elements[i]->accept(this);
1783 src = this->result;
1784 for (int j = 0; j < size; j++) {
1785 emit(ir, OPCODE_MOV, temp, src);
1786
1787 src.index++;
1788 temp.index++;
1789 }
1790 }
1791 this->result = temp_base;
1792 return;
1793 }
1794
1795 if (ir->type->is_matrix()) {
1796 src_reg mat = get_temp(ir->type);
1797 dst_reg mat_column = dst_reg(mat);
1798
1799 for (i = 0; i < ir->type->matrix_columns; i++) {
1800 assert(ir->type->base_type == GLSL_TYPE_FLOAT);
1801 values = &ir->value.f[i * ir->type->vector_elements];
1802
1803 src = src_reg(PROGRAM_CONSTANT, -1, NULL);
1804 src.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1805 values,
1806 ir->type->vector_elements,
1807 &src.swizzle);
1808 emit(ir, OPCODE_MOV, mat_column, src);
1809
1810 mat_column.index++;
1811 }
1812
1813 this->result = mat;
1814 return;
1815 }
1816
1817 src.file = PROGRAM_CONSTANT;
1818 switch (ir->type->base_type) {
1819 case GLSL_TYPE_FLOAT:
1820 values = &ir->value.f[0];
1821 break;
1822 case GLSL_TYPE_UINT:
1823 for (i = 0; i < ir->type->vector_elements; i++) {
1824 values[i] = ir->value.u[i];
1825 }
1826 break;
1827 case GLSL_TYPE_INT:
1828 for (i = 0; i < ir->type->vector_elements; i++) {
1829 values[i] = ir->value.i[i];
1830 }
1831 break;
1832 case GLSL_TYPE_BOOL:
1833 for (i = 0; i < ir->type->vector_elements; i++) {
1834 values[i] = ir->value.b[i];
1835 }
1836 break;
1837 default:
1838 assert(!"Non-float/uint/int/bool constant");
1839 }
1840
1841 this->result = src_reg(PROGRAM_CONSTANT, -1, ir->type);
1842 this->result.index = _mesa_add_unnamed_constant(this->prog->Parameters,
1843 values,
1844 ir->type->vector_elements,
1845 &this->result.swizzle);
1846 }
1847
1848 function_entry *
1849 ir_to_mesa_visitor::get_function_signature(ir_function_signature *sig)
1850 {
1851 function_entry *entry;
1852
1853 foreach_iter(exec_list_iterator, iter, this->function_signatures) {
1854 entry = (function_entry *)iter.get();
1855
1856 if (entry->sig == sig)
1857 return entry;
1858 }
1859
1860 entry = ralloc(mem_ctx, function_entry);
1861 entry->sig = sig;
1862 entry->sig_id = this->next_signature_id++;
1863 entry->bgn_inst = NULL;
1864
1865 /* Allocate storage for all the parameters. */
1866 foreach_iter(exec_list_iterator, iter, sig->parameters) {
1867 ir_variable *param = (ir_variable *)iter.get();
1868 variable_storage *storage;
1869
1870 storage = find_variable_storage(param);
1871 assert(!storage);
1872
1873 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY,
1874 this->next_temp);
1875 this->variables.push_tail(storage);
1876
1877 this->next_temp += type_size(param->type);
1878 }
1879
1880 if (!sig->return_type->is_void()) {
1881 entry->return_reg = get_temp(sig->return_type);
1882 } else {
1883 entry->return_reg = ir_to_mesa_undef;
1884 }
1885
1886 this->function_signatures.push_tail(entry);
1887 return entry;
1888 }
1889
1890 void
1891 ir_to_mesa_visitor::visit(ir_call *ir)
1892 {
1893 ir_to_mesa_instruction *call_inst;
1894 ir_function_signature *sig = ir->get_callee();
1895 function_entry *entry = get_function_signature(sig);
1896 int i;
1897
1898 /* Process in parameters. */
1899 exec_list_iterator sig_iter = sig->parameters.iterator();
1900 foreach_iter(exec_list_iterator, iter, *ir) {
1901 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1902 ir_variable *param = (ir_variable *)sig_iter.get();
1903
1904 if (param->mode == ir_var_in ||
1905 param->mode == ir_var_inout) {
1906 variable_storage *storage = find_variable_storage(param);
1907 assert(storage);
1908
1909 param_rval->accept(this);
1910 src_reg r = this->result;
1911
1912 dst_reg l;
1913 l.file = storage->file;
1914 l.index = storage->index;
1915 l.reladdr = NULL;
1916 l.writemask = WRITEMASK_XYZW;
1917 l.cond_mask = COND_TR;
1918
1919 for (i = 0; i < type_size(param->type); i++) {
1920 emit(ir, OPCODE_MOV, l, r);
1921 l.index++;
1922 r.index++;
1923 }
1924 }
1925
1926 sig_iter.next();
1927 }
1928 assert(!sig_iter.has_next());
1929
1930 /* Emit call instruction */
1931 call_inst = emit(ir, OPCODE_CAL, ir_to_mesa_undef_dst, ir_to_mesa_undef);
1932 call_inst->function = entry;
1933
1934 /* Process out parameters. */
1935 sig_iter = sig->parameters.iterator();
1936 foreach_iter(exec_list_iterator, iter, *ir) {
1937 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
1938 ir_variable *param = (ir_variable *)sig_iter.get();
1939
1940 if (param->mode == ir_var_out ||
1941 param->mode == ir_var_inout) {
1942 variable_storage *storage = find_variable_storage(param);
1943 assert(storage);
1944
1945 src_reg r;
1946 r.file = storage->file;
1947 r.index = storage->index;
1948 r.reladdr = NULL;
1949 r.swizzle = SWIZZLE_NOOP;
1950 r.negate = 0;
1951
1952 param_rval->accept(this);
1953 dst_reg l = dst_reg(this->result);
1954
1955 for (i = 0; i < type_size(param->type); i++) {
1956 emit(ir, OPCODE_MOV, l, r);
1957 l.index++;
1958 r.index++;
1959 }
1960 }
1961
1962 sig_iter.next();
1963 }
1964 assert(!sig_iter.has_next());
1965
1966 /* Process return value. */
1967 this->result = entry->return_reg;
1968 }
1969
1970 void
1971 ir_to_mesa_visitor::visit(ir_texture *ir)
1972 {
1973 src_reg result_src, coord, lod_info, projector;
1974 dst_reg result_dst, coord_dst;
1975 ir_to_mesa_instruction *inst = NULL;
1976 prog_opcode opcode = OPCODE_NOP;
1977
1978 ir->coordinate->accept(this);
1979
1980 /* Put our coords in a temp. We'll need to modify them for shadow,
1981 * projection, or LOD, so the only case we'd use it as is is if
1982 * we're doing plain old texturing. Mesa IR optimization should
1983 * handle cleaning up our mess in that case.
1984 */
1985 coord = get_temp(glsl_type::vec4_type);
1986 coord_dst = dst_reg(coord);
1987 emit(ir, OPCODE_MOV, coord_dst, this->result);
1988
1989 if (ir->projector) {
1990 ir->projector->accept(this);
1991 projector = this->result;
1992 }
1993
1994 /* Storage for our result. Ideally for an assignment we'd be using
1995 * the actual storage for the result here, instead.
1996 */
1997 result_src = get_temp(glsl_type::vec4_type);
1998 result_dst = dst_reg(result_src);
1999
2000 switch (ir->op) {
2001 case ir_tex:
2002 opcode = OPCODE_TEX;
2003 break;
2004 case ir_txb:
2005 opcode = OPCODE_TXB;
2006 ir->lod_info.bias->accept(this);
2007 lod_info = this->result;
2008 break;
2009 case ir_txl:
2010 opcode = OPCODE_TXL;
2011 ir->lod_info.lod->accept(this);
2012 lod_info = this->result;
2013 break;
2014 case ir_txd:
2015 case ir_txf:
2016 assert(!"GLSL 1.30 features unsupported");
2017 break;
2018 }
2019
2020 if (ir->projector) {
2021 if (opcode == OPCODE_TEX) {
2022 /* Slot the projector in as the last component of the coord. */
2023 coord_dst.writemask = WRITEMASK_W;
2024 emit(ir, OPCODE_MOV, coord_dst, projector);
2025 coord_dst.writemask = WRITEMASK_XYZW;
2026 opcode = OPCODE_TXP;
2027 } else {
2028 src_reg coord_w = coord;
2029 coord_w.swizzle = SWIZZLE_WWWW;
2030
2031 /* For the other TEX opcodes there's no projective version
2032 * since the last slot is taken up by lod info. Do the
2033 * projective divide now.
2034 */
2035 coord_dst.writemask = WRITEMASK_W;
2036 emit(ir, OPCODE_RCP, coord_dst, projector);
2037
2038 /* In the case where we have to project the coordinates "by hand,"
2039 * the shadow comparitor value must also be projected.
2040 */
2041 src_reg tmp_src = coord;
2042 if (ir->shadow_comparitor) {
2043 /* Slot the shadow value in as the second to last component of the
2044 * coord.
2045 */
2046 ir->shadow_comparitor->accept(this);
2047
2048 tmp_src = get_temp(glsl_type::vec4_type);
2049 dst_reg tmp_dst = dst_reg(tmp_src);
2050
2051 tmp_dst.writemask = WRITEMASK_Z;
2052 emit(ir, OPCODE_MOV, tmp_dst, this->result);
2053
2054 tmp_dst.writemask = WRITEMASK_XY;
2055 emit(ir, OPCODE_MOV, tmp_dst, coord);
2056 }
2057
2058 coord_dst.writemask = WRITEMASK_XYZ;
2059 emit(ir, OPCODE_MUL, coord_dst, tmp_src, coord_w);
2060
2061 coord_dst.writemask = WRITEMASK_XYZW;
2062 coord.swizzle = SWIZZLE_XYZW;
2063 }
2064 }
2065
2066 /* If projection is done and the opcode is not OPCODE_TXP, then the shadow
2067 * comparitor was put in the correct place (and projected) by the code,
2068 * above, that handles by-hand projection.
2069 */
2070 if (ir->shadow_comparitor && (!ir->projector || opcode == OPCODE_TXP)) {
2071 /* Slot the shadow value in as the second to last component of the
2072 * coord.
2073 */
2074 ir->shadow_comparitor->accept(this);
2075 coord_dst.writemask = WRITEMASK_Z;
2076 emit(ir, OPCODE_MOV, coord_dst, this->result);
2077 coord_dst.writemask = WRITEMASK_XYZW;
2078 }
2079
2080 if (opcode == OPCODE_TXL || opcode == OPCODE_TXB) {
2081 /* Mesa IR stores lod or lod bias in the last channel of the coords. */
2082 coord_dst.writemask = WRITEMASK_W;
2083 emit(ir, OPCODE_MOV, coord_dst, lod_info);
2084 coord_dst.writemask = WRITEMASK_XYZW;
2085 }
2086
2087 inst = emit(ir, opcode, result_dst, coord);
2088
2089 if (ir->shadow_comparitor)
2090 inst->tex_shadow = GL_TRUE;
2091
2092 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler,
2093 this->shader_program,
2094 this->prog);
2095
2096 const glsl_type *sampler_type = ir->sampler->type;
2097
2098 switch (sampler_type->sampler_dimensionality) {
2099 case GLSL_SAMPLER_DIM_1D:
2100 inst->tex_target = (sampler_type->sampler_array)
2101 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
2102 break;
2103 case GLSL_SAMPLER_DIM_2D:
2104 inst->tex_target = (sampler_type->sampler_array)
2105 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
2106 break;
2107 case GLSL_SAMPLER_DIM_3D:
2108 inst->tex_target = TEXTURE_3D_INDEX;
2109 break;
2110 case GLSL_SAMPLER_DIM_CUBE:
2111 inst->tex_target = TEXTURE_CUBE_INDEX;
2112 break;
2113 case GLSL_SAMPLER_DIM_RECT:
2114 inst->tex_target = TEXTURE_RECT_INDEX;
2115 break;
2116 case GLSL_SAMPLER_DIM_BUF:
2117 assert(!"FINISHME: Implement ARB_texture_buffer_object");
2118 break;
2119 default:
2120 assert(!"Should not get here.");
2121 }
2122
2123 this->result = result_src;
2124 }
2125
2126 void
2127 ir_to_mesa_visitor::visit(ir_return *ir)
2128 {
2129 if (ir->get_value()) {
2130 dst_reg l;
2131 int i;
2132
2133 assert(current_function);
2134
2135 ir->get_value()->accept(this);
2136 src_reg r = this->result;
2137
2138 l = dst_reg(current_function->return_reg);
2139
2140 for (i = 0; i < type_size(current_function->sig->return_type); i++) {
2141 emit(ir, OPCODE_MOV, l, r);
2142 l.index++;
2143 r.index++;
2144 }
2145 }
2146
2147 emit(ir, OPCODE_RET);
2148 }
2149
2150 void
2151 ir_to_mesa_visitor::visit(ir_discard *ir)
2152 {
2153 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog;
2154
2155 if (ir->condition) {
2156 ir->condition->accept(this);
2157 this->result.negate = ~this->result.negate;
2158 emit(ir, OPCODE_KIL, ir_to_mesa_undef_dst, this->result);
2159 } else {
2160 emit(ir, OPCODE_KIL_NV);
2161 }
2162
2163 fp->UsesKill = GL_TRUE;
2164 }
2165
2166 void
2167 ir_to_mesa_visitor::visit(ir_if *ir)
2168 {
2169 ir_to_mesa_instruction *cond_inst, *if_inst, *else_inst = NULL;
2170 ir_to_mesa_instruction *prev_inst;
2171
2172 prev_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
2173
2174 ir->condition->accept(this);
2175 assert(this->result.file != PROGRAM_UNDEFINED);
2176
2177 if (this->options->EmitCondCodes) {
2178 cond_inst = (ir_to_mesa_instruction *)this->instructions.get_tail();
2179
2180 /* See if we actually generated any instruction for generating
2181 * the condition. If not, then cook up a move to a temp so we
2182 * have something to set cond_update on.
2183 */
2184 if (cond_inst == prev_inst) {
2185 src_reg temp = get_temp(glsl_type::bool_type);
2186 cond_inst = emit(ir->condition, OPCODE_MOV, dst_reg(temp), result);
2187 }
2188 cond_inst->cond_update = GL_TRUE;
2189
2190 if_inst = emit(ir->condition, OPCODE_IF);
2191 if_inst->dst.cond_mask = COND_NE;
2192 } else {
2193 if_inst = emit(ir->condition, OPCODE_IF, ir_to_mesa_undef_dst,
2194 this->result);
2195 }
2196
2197 this->instructions.push_tail(if_inst);
2198
2199 visit_exec_list(&ir->then_instructions, this);
2200
2201 if (!ir->else_instructions.is_empty()) {
2202 else_inst = emit(ir->condition, OPCODE_ELSE);
2203 visit_exec_list(&ir->else_instructions, this);
2204 }
2205
2206 if_inst = emit(ir->condition, OPCODE_ENDIF,
2207 ir_to_mesa_undef_dst, ir_to_mesa_undef);
2208 }
2209
2210 ir_to_mesa_visitor::ir_to_mesa_visitor()
2211 {
2212 result.file = PROGRAM_UNDEFINED;
2213 next_temp = 1;
2214 next_signature_id = 1;
2215 current_function = NULL;
2216 mem_ctx = ralloc_context(NULL);
2217 }
2218
2219 ir_to_mesa_visitor::~ir_to_mesa_visitor()
2220 {
2221 ralloc_free(mem_ctx);
2222 }
2223
2224 static struct prog_src_register
2225 mesa_src_reg_from_ir_src_reg(src_reg reg)
2226 {
2227 struct prog_src_register mesa_reg;
2228
2229 mesa_reg.File = reg.file;
2230 assert(reg.index < (1 << INST_INDEX_BITS));
2231 mesa_reg.Index = reg.index;
2232 mesa_reg.Swizzle = reg.swizzle;
2233 mesa_reg.RelAddr = reg.reladdr != NULL;
2234 mesa_reg.Negate = reg.negate;
2235 mesa_reg.Abs = 0;
2236 mesa_reg.HasIndex2 = GL_FALSE;
2237 mesa_reg.RelAddr2 = 0;
2238 mesa_reg.Index2 = 0;
2239
2240 return mesa_reg;
2241 }
2242
2243 static void
2244 set_branchtargets(ir_to_mesa_visitor *v,
2245 struct prog_instruction *mesa_instructions,
2246 int num_instructions)
2247 {
2248 int if_count = 0, loop_count = 0;
2249 int *if_stack, *loop_stack;
2250 int if_stack_pos = 0, loop_stack_pos = 0;
2251 int i, j;
2252
2253 for (i = 0; i < num_instructions; i++) {
2254 switch (mesa_instructions[i].Opcode) {
2255 case OPCODE_IF:
2256 if_count++;
2257 break;
2258 case OPCODE_BGNLOOP:
2259 loop_count++;
2260 break;
2261 case OPCODE_BRK:
2262 case OPCODE_CONT:
2263 mesa_instructions[i].BranchTarget = -1;
2264 break;
2265 default:
2266 break;
2267 }
2268 }
2269
2270 if_stack = rzalloc_array(v->mem_ctx, int, if_count);
2271 loop_stack = rzalloc_array(v->mem_ctx, int, loop_count);
2272
2273 for (i = 0; i < num_instructions; i++) {
2274 switch (mesa_instructions[i].Opcode) {
2275 case OPCODE_IF:
2276 if_stack[if_stack_pos] = i;
2277 if_stack_pos++;
2278 break;
2279 case OPCODE_ELSE:
2280 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
2281 if_stack[if_stack_pos - 1] = i;
2282 break;
2283 case OPCODE_ENDIF:
2284 mesa_instructions[if_stack[if_stack_pos - 1]].BranchTarget = i;
2285 if_stack_pos--;
2286 break;
2287 case OPCODE_BGNLOOP:
2288 loop_stack[loop_stack_pos] = i;
2289 loop_stack_pos++;
2290 break;
2291 case OPCODE_ENDLOOP:
2292 loop_stack_pos--;
2293 /* Rewrite any breaks/conts at this nesting level (haven't
2294 * already had a BranchTarget assigned) to point to the end
2295 * of the loop.
2296 */
2297 for (j = loop_stack[loop_stack_pos]; j < i; j++) {
2298 if (mesa_instructions[j].Opcode == OPCODE_BRK ||
2299 mesa_instructions[j].Opcode == OPCODE_CONT) {
2300 if (mesa_instructions[j].BranchTarget == -1) {
2301 mesa_instructions[j].BranchTarget = i;
2302 }
2303 }
2304 }
2305 /* The loop ends point at each other. */
2306 mesa_instructions[i].BranchTarget = loop_stack[loop_stack_pos];
2307 mesa_instructions[loop_stack[loop_stack_pos]].BranchTarget = i;
2308 break;
2309 case OPCODE_CAL:
2310 foreach_iter(exec_list_iterator, iter, v->function_signatures) {
2311 function_entry *entry = (function_entry *)iter.get();
2312
2313 if (entry->sig_id == mesa_instructions[i].BranchTarget) {
2314 mesa_instructions[i].BranchTarget = entry->inst;
2315 break;
2316 }
2317 }
2318 break;
2319 default:
2320 break;
2321 }
2322 }
2323 }
2324
2325 static void
2326 print_program(struct prog_instruction *mesa_instructions,
2327 ir_instruction **mesa_instruction_annotation,
2328 int num_instructions)
2329 {
2330 ir_instruction *last_ir = NULL;
2331 int i;
2332 int indent = 0;
2333
2334 for (i = 0; i < num_instructions; i++) {
2335 struct prog_instruction *mesa_inst = mesa_instructions + i;
2336 ir_instruction *ir = mesa_instruction_annotation[i];
2337
2338 fprintf(stdout, "%3d: ", i);
2339
2340 if (last_ir != ir && ir) {
2341 int j;
2342
2343 for (j = 0; j < indent; j++) {
2344 fprintf(stdout, " ");
2345 }
2346 ir->print();
2347 printf("\n");
2348 last_ir = ir;
2349
2350 fprintf(stdout, " "); /* line number spacing. */
2351 }
2352
2353 indent = _mesa_fprint_instruction_opt(stdout, mesa_inst, indent,
2354 PROG_PRINT_DEBUG, NULL);
2355 }
2356 }
2357
2358
2359 /**
2360 * Count resources used by the given gpu program (number of texture
2361 * samplers, etc).
2362 */
2363 static void
2364 count_resources(struct gl_program *prog)
2365 {
2366 unsigned int i;
2367
2368 prog->SamplersUsed = 0;
2369
2370 for (i = 0; i < prog->NumInstructions; i++) {
2371 struct prog_instruction *inst = &prog->Instructions[i];
2372
2373 if (_mesa_is_tex_instruction(inst->Opcode)) {
2374 prog->SamplerTargets[inst->TexSrcUnit] =
2375 (gl_texture_index)inst->TexSrcTarget;
2376 prog->SamplersUsed |= 1 << inst->TexSrcUnit;
2377 if (inst->TexShadow) {
2378 prog->ShadowSamplers |= 1 << inst->TexSrcUnit;
2379 }
2380 }
2381 }
2382
2383 _mesa_update_shader_textures_used(prog);
2384 }
2385
2386
2387 /**
2388 * Check if the given vertex/fragment/shader program is within the
2389 * resource limits of the context (number of texture units, etc).
2390 * If any of those checks fail, record a linker error.
2391 *
2392 * XXX more checks are needed...
2393 */
2394 static void
2395 check_resources(const struct gl_context *ctx,
2396 struct gl_shader_program *shader_program,
2397 struct gl_program *prog)
2398 {
2399 switch (prog->Target) {
2400 case GL_VERTEX_PROGRAM_ARB:
2401 if (_mesa_bitcount(prog->SamplersUsed) >
2402 ctx->Const.MaxVertexTextureImageUnits) {
2403 fail_link(shader_program, "Too many vertex shader texture samplers");
2404 }
2405 if (prog->Parameters->NumParameters > MAX_UNIFORMS) {
2406 fail_link(shader_program, "Too many vertex shader constants");
2407 }
2408 break;
2409 case MESA_GEOMETRY_PROGRAM:
2410 if (_mesa_bitcount(prog->SamplersUsed) >
2411 ctx->Const.MaxGeometryTextureImageUnits) {
2412 fail_link(shader_program, "Too many geometry shader texture samplers");
2413 }
2414 if (prog->Parameters->NumParameters >
2415 MAX_GEOMETRY_UNIFORM_COMPONENTS / 4) {
2416 fail_link(shader_program, "Too many geometry shader constants");
2417 }
2418 break;
2419 case GL_FRAGMENT_PROGRAM_ARB:
2420 if (_mesa_bitcount(prog->SamplersUsed) >
2421 ctx->Const.MaxTextureImageUnits) {
2422 fail_link(shader_program, "Too many fragment shader texture samplers");
2423 }
2424 if (prog->Parameters->NumParameters > MAX_UNIFORMS) {
2425 fail_link(shader_program, "Too many fragment shader constants");
2426 }
2427 break;
2428 default:
2429 _mesa_problem(ctx, "unexpected program type in check_resources()");
2430 }
2431 }
2432
2433
2434
2435 struct uniform_sort {
2436 struct gl_uniform *u;
2437 int pos;
2438 };
2439
2440 /* The shader_program->Uniforms list is almost sorted in increasing
2441 * uniform->{Frag,Vert}Pos locations, but not quite when there are
2442 * uniforms shared between targets. We need to add parameters in
2443 * increasing order for the targets.
2444 */
2445 static int
2446 sort_uniforms(const void *a, const void *b)
2447 {
2448 struct uniform_sort *u1 = (struct uniform_sort *)a;
2449 struct uniform_sort *u2 = (struct uniform_sort *)b;
2450
2451 return u1->pos - u2->pos;
2452 }
2453
2454 /* Add the uniforms to the parameters. The linker chose locations
2455 * in our parameters lists (which weren't created yet), which the
2456 * uniforms code will use to poke values into our parameters list
2457 * when uniforms are updated.
2458 */
2459 static void
2460 add_uniforms_to_parameters_list(struct gl_shader_program *shader_program,
2461 struct gl_shader *shader,
2462 struct gl_program *prog)
2463 {
2464 unsigned int i;
2465 unsigned int next_sampler = 0, num_uniforms = 0;
2466 struct uniform_sort *sorted_uniforms;
2467
2468 sorted_uniforms = ralloc_array(NULL, struct uniform_sort,
2469 shader_program->Uniforms->NumUniforms);
2470
2471 for (i = 0; i < shader_program->Uniforms->NumUniforms; i++) {
2472 struct gl_uniform *uniform = shader_program->Uniforms->Uniforms + i;
2473 int parameter_index = -1;
2474
2475 switch (shader->Type) {
2476 case GL_VERTEX_SHADER:
2477 parameter_index = uniform->VertPos;
2478 break;
2479 case GL_FRAGMENT_SHADER:
2480 parameter_index = uniform->FragPos;
2481 break;
2482 case GL_GEOMETRY_SHADER:
2483 parameter_index = uniform->GeomPos;
2484 break;
2485 }
2486
2487 /* Only add uniforms used in our target. */
2488 if (parameter_index != -1) {
2489 sorted_uniforms[num_uniforms].pos = parameter_index;
2490 sorted_uniforms[num_uniforms].u = uniform;
2491 num_uniforms++;
2492 }
2493 }
2494
2495 qsort(sorted_uniforms, num_uniforms, sizeof(struct uniform_sort),
2496 sort_uniforms);
2497
2498 for (i = 0; i < num_uniforms; i++) {
2499 struct gl_uniform *uniform = sorted_uniforms[i].u;
2500 int parameter_index = sorted_uniforms[i].pos;
2501 const glsl_type *type = uniform->Type;
2502 unsigned int size;
2503
2504 if (type->is_vector() ||
2505 type->is_scalar()) {
2506 size = type->vector_elements;
2507 } else {
2508 size = type_size(type) * 4;
2509 }
2510
2511 gl_register_file file;
2512 if (type->is_sampler() ||
2513 (type->is_array() && type->fields.array->is_sampler())) {
2514 file = PROGRAM_SAMPLER;
2515 } else {
2516 file = PROGRAM_UNIFORM;
2517 }
2518
2519 GLint index = _mesa_lookup_parameter_index(prog->Parameters, -1,
2520 uniform->Name);
2521
2522 if (index < 0) {
2523 index = _mesa_add_parameter(prog->Parameters, file,
2524 uniform->Name, size, type->gl_type,
2525 NULL, NULL, 0x0);
2526
2527 /* Sampler uniform values are stored in prog->SamplerUnits,
2528 * and the entry in that array is selected by this index we
2529 * store in ParameterValues[].
2530 */
2531 if (file == PROGRAM_SAMPLER) {
2532 for (unsigned int j = 0; j < size / 4; j++)
2533 prog->Parameters->ParameterValues[index + j][0] = next_sampler++;
2534 }
2535
2536 /* The location chosen in the Parameters list here (returned
2537 * from _mesa_add_uniform) has to match what the linker chose.
2538 */
2539 if (index != parameter_index) {
2540 fail_link(shader_program, "Allocation of uniform `%s' to target "
2541 "failed (%d vs %d)\n",
2542 uniform->Name, index, parameter_index);
2543 }
2544 }
2545 }
2546
2547 ralloc_free(sorted_uniforms);
2548 }
2549
2550 static void
2551 set_uniform_initializer(struct gl_context *ctx, void *mem_ctx,
2552 struct gl_shader_program *shader_program,
2553 const char *name, const glsl_type *type,
2554 ir_constant *val)
2555 {
2556 if (type->is_record()) {
2557 ir_constant *field_constant;
2558
2559 field_constant = (ir_constant *)val->components.get_head();
2560
2561 for (unsigned int i = 0; i < type->length; i++) {
2562 const glsl_type *field_type = type->fields.structure[i].type;
2563 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
2564 type->fields.structure[i].name);
2565 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name,
2566 field_type, field_constant);
2567 field_constant = (ir_constant *)field_constant->next;
2568 }
2569 return;
2570 }
2571
2572 int loc = _mesa_get_uniform_location(ctx, shader_program, name);
2573
2574 if (loc == -1) {
2575 fail_link(shader_program,
2576 "Couldn't find uniform for initializer %s\n", name);
2577 return;
2578 }
2579
2580 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) {
2581 ir_constant *element;
2582 const glsl_type *element_type;
2583 if (type->is_array()) {
2584 element = val->array_elements[i];
2585 element_type = type->fields.array;
2586 } else {
2587 element = val;
2588 element_type = type;
2589 }
2590
2591 void *values;
2592
2593 if (element_type->base_type == GLSL_TYPE_BOOL) {
2594 int *conv = ralloc_array(mem_ctx, int, element_type->components());
2595 for (unsigned int j = 0; j < element_type->components(); j++) {
2596 conv[j] = element->value.b[j];
2597 }
2598 values = (void *)conv;
2599 element_type = glsl_type::get_instance(GLSL_TYPE_INT,
2600 element_type->vector_elements,
2601 1);
2602 } else {
2603 values = &element->value;
2604 }
2605
2606 if (element_type->is_matrix()) {
2607 _mesa_uniform_matrix(ctx, shader_program,
2608 element_type->matrix_columns,
2609 element_type->vector_elements,
2610 loc, 1, GL_FALSE, (GLfloat *)values);
2611 loc += element_type->matrix_columns;
2612 } else {
2613 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns,
2614 values, element_type->gl_type);
2615 loc += type_size(element_type);
2616 }
2617 }
2618 }
2619
2620 static void
2621 set_uniform_initializers(struct gl_context *ctx,
2622 struct gl_shader_program *shader_program)
2623 {
2624 void *mem_ctx = NULL;
2625
2626 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2627 struct gl_shader *shader = shader_program->_LinkedShaders[i];
2628
2629 if (shader == NULL)
2630 continue;
2631
2632 foreach_iter(exec_list_iterator, iter, *shader->ir) {
2633 ir_instruction *ir = (ir_instruction *)iter.get();
2634 ir_variable *var = ir->as_variable();
2635
2636 if (!var || var->mode != ir_var_uniform || !var->constant_value)
2637 continue;
2638
2639 if (!mem_ctx)
2640 mem_ctx = ralloc_context(NULL);
2641
2642 set_uniform_initializer(ctx, mem_ctx, shader_program, var->name,
2643 var->type, var->constant_value);
2644 }
2645 }
2646
2647 ralloc_free(mem_ctx);
2648 }
2649
2650 /*
2651 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
2652 * channels for copy propagation and updates following instructions to
2653 * use the original versions.
2654 *
2655 * The ir_to_mesa_visitor lazily produces code assuming that this pass
2656 * will occur. As an example, a TXP production before this pass:
2657 *
2658 * 0: MOV TEMP[1], INPUT[4].xyyy;
2659 * 1: MOV TEMP[1].w, INPUT[4].wwww;
2660 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
2661 *
2662 * and after:
2663 *
2664 * 0: MOV TEMP[1], INPUT[4].xyyy;
2665 * 1: MOV TEMP[1].w, INPUT[4].wwww;
2666 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
2667 *
2668 * which allows for dead code elimination on TEMP[1]'s writes.
2669 */
2670 void
2671 ir_to_mesa_visitor::copy_propagate(void)
2672 {
2673 ir_to_mesa_instruction **acp = rzalloc_array(mem_ctx,
2674 ir_to_mesa_instruction *,
2675 this->next_temp * 4);
2676 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
2677 int level = 0;
2678
2679 foreach_iter(exec_list_iterator, iter, this->instructions) {
2680 ir_to_mesa_instruction *inst = (ir_to_mesa_instruction *)iter.get();
2681
2682 assert(inst->dst.file != PROGRAM_TEMPORARY
2683 || inst->dst.index < this->next_temp);
2684
2685 /* First, do any copy propagation possible into the src regs. */
2686 for (int r = 0; r < 3; r++) {
2687 ir_to_mesa_instruction *first = NULL;
2688 bool good = true;
2689 int acp_base = inst->src[r].index * 4;
2690
2691 if (inst->src[r].file != PROGRAM_TEMPORARY ||
2692 inst->src[r].reladdr)
2693 continue;
2694
2695 /* See if we can find entries in the ACP consisting of MOVs
2696 * from the same src register for all the swizzled channels
2697 * of this src register reference.
2698 */
2699 for (int i = 0; i < 4; i++) {
2700 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
2701 ir_to_mesa_instruction *copy_chan = acp[acp_base + src_chan];
2702
2703 if (!copy_chan) {
2704 good = false;
2705 break;
2706 }
2707
2708 assert(acp_level[acp_base + src_chan] <= level);
2709
2710 if (!first) {
2711 first = copy_chan;
2712 } else {
2713 if (first->src[0].file != copy_chan->src[0].file ||
2714 first->src[0].index != copy_chan->src[0].index) {
2715 good = false;
2716 break;
2717 }
2718 }
2719 }
2720
2721 if (good) {
2722 /* We've now validated that we can copy-propagate to
2723 * replace this src register reference. Do it.
2724 */
2725 inst->src[r].file = first->src[0].file;
2726 inst->src[r].index = first->src[0].index;
2727
2728 int swizzle = 0;
2729 for (int i = 0; i < 4; i++) {
2730 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
2731 ir_to_mesa_instruction *copy_inst = acp[acp_base + src_chan];
2732 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) <<
2733 (3 * i));
2734 }
2735 inst->src[r].swizzle = swizzle;
2736 }
2737 }
2738
2739 switch (inst->op) {
2740 case OPCODE_BGNLOOP:
2741 case OPCODE_ENDLOOP:
2742 /* End of a basic block, clear the ACP entirely. */
2743 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
2744 break;
2745
2746 case OPCODE_IF:
2747 ++level;
2748 break;
2749
2750 case OPCODE_ENDIF:
2751 case OPCODE_ELSE:
2752 /* Clear all channels written inside the block from the ACP, but
2753 * leaving those that were not touched.
2754 */
2755 for (int r = 0; r < this->next_temp; r++) {
2756 for (int c = 0; c < 4; c++) {
2757 if (!acp[4 * r + c])
2758 continue;
2759
2760 if (acp_level[4 * r + c] >= level)
2761 acp[4 * r + c] = NULL;
2762 }
2763 }
2764 if (inst->op == OPCODE_ENDIF)
2765 --level;
2766 break;
2767
2768 default:
2769 /* Continuing the block, clear any written channels from
2770 * the ACP.
2771 */
2772 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) {
2773 /* Any temporary might be written, so no copy propagation
2774 * across this instruction.
2775 */
2776 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
2777 } else if (inst->dst.file == PROGRAM_OUTPUT &&
2778 inst->dst.reladdr) {
2779 /* Any output might be written, so no copy propagation
2780 * from outputs across this instruction.
2781 */
2782 for (int r = 0; r < this->next_temp; r++) {
2783 for (int c = 0; c < 4; c++) {
2784 if (!acp[4 * r + c])
2785 continue;
2786
2787 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
2788 acp[4 * r + c] = NULL;
2789 }
2790 }
2791 } else if (inst->dst.file == PROGRAM_TEMPORARY ||
2792 inst->dst.file == PROGRAM_OUTPUT) {
2793 /* Clear where it's used as dst. */
2794 if (inst->dst.file == PROGRAM_TEMPORARY) {
2795 for (int c = 0; c < 4; c++) {
2796 if (inst->dst.writemask & (1 << c)) {
2797 acp[4 * inst->dst.index + c] = NULL;
2798 }
2799 }
2800 }
2801
2802 /* Clear where it's used as src. */
2803 for (int r = 0; r < this->next_temp; r++) {
2804 for (int c = 0; c < 4; c++) {
2805 if (!acp[4 * r + c])
2806 continue;
2807
2808 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
2809
2810 if (acp[4 * r + c]->src[0].file == inst->dst.file &&
2811 acp[4 * r + c]->src[0].index == inst->dst.index &&
2812 inst->dst.writemask & (1 << src_chan))
2813 {
2814 acp[4 * r + c] = NULL;
2815 }
2816 }
2817 }
2818 }
2819 break;
2820 }
2821
2822 /* If this is a copy, add it to the ACP. */
2823 if (inst->op == OPCODE_MOV &&
2824 inst->dst.file == PROGRAM_TEMPORARY &&
2825 !inst->dst.reladdr &&
2826 !inst->saturate &&
2827 !inst->src[0].reladdr &&
2828 !inst->src[0].negate) {
2829 for (int i = 0; i < 4; i++) {
2830 if (inst->dst.writemask & (1 << i)) {
2831 acp[4 * inst->dst.index + i] = inst;
2832 acp_level[4 * inst->dst.index + i] = level;
2833 }
2834 }
2835 }
2836 }
2837
2838 ralloc_free(acp_level);
2839 ralloc_free(acp);
2840 }
2841
2842
2843 /**
2844 * Convert a shader's GLSL IR into a Mesa gl_program.
2845 */
2846 static struct gl_program *
2847 get_mesa_program(struct gl_context *ctx,
2848 struct gl_shader_program *shader_program,
2849 struct gl_shader *shader)
2850 {
2851 ir_to_mesa_visitor v;
2852 struct prog_instruction *mesa_instructions, *mesa_inst;
2853 ir_instruction **mesa_instruction_annotation;
2854 int i;
2855 struct gl_program *prog;
2856 GLenum target;
2857 const char *target_string;
2858 GLboolean progress;
2859 struct gl_shader_compiler_options *options =
2860 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)];
2861
2862 switch (shader->Type) {
2863 case GL_VERTEX_SHADER:
2864 target = GL_VERTEX_PROGRAM_ARB;
2865 target_string = "vertex";
2866 break;
2867 case GL_FRAGMENT_SHADER:
2868 target = GL_FRAGMENT_PROGRAM_ARB;
2869 target_string = "fragment";
2870 break;
2871 case GL_GEOMETRY_SHADER:
2872 target = GL_GEOMETRY_PROGRAM_NV;
2873 target_string = "geometry";
2874 break;
2875 default:
2876 assert(!"should not be reached");
2877 return NULL;
2878 }
2879
2880 validate_ir_tree(shader->ir);
2881
2882 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name);
2883 if (!prog)
2884 return NULL;
2885 prog->Parameters = _mesa_new_parameter_list();
2886 prog->Varying = _mesa_new_parameter_list();
2887 prog->Attributes = _mesa_new_parameter_list();
2888 v.ctx = ctx;
2889 v.prog = prog;
2890 v.shader_program = shader_program;
2891 v.options = options;
2892
2893 add_uniforms_to_parameters_list(shader_program, shader, prog);
2894
2895 /* Emit Mesa IR for main(). */
2896 visit_exec_list(shader->ir, &v);
2897 v.emit(NULL, OPCODE_END);
2898
2899 /* Now emit bodies for any functions that were used. */
2900 do {
2901 progress = GL_FALSE;
2902
2903 foreach_iter(exec_list_iterator, iter, v.function_signatures) {
2904 function_entry *entry = (function_entry *)iter.get();
2905
2906 if (!entry->bgn_inst) {
2907 v.current_function = entry;
2908
2909 entry->bgn_inst = v.emit(NULL, OPCODE_BGNSUB);
2910 entry->bgn_inst->function = entry;
2911
2912 visit_exec_list(&entry->sig->body, &v);
2913
2914 ir_to_mesa_instruction *last;
2915 last = (ir_to_mesa_instruction *)v.instructions.get_tail();
2916 if (last->op != OPCODE_RET)
2917 v.emit(NULL, OPCODE_RET);
2918
2919 ir_to_mesa_instruction *end;
2920 end = v.emit(NULL, OPCODE_ENDSUB);
2921 end->function = entry;
2922
2923 progress = GL_TRUE;
2924 }
2925 }
2926 } while (progress);
2927
2928 prog->NumTemporaries = v.next_temp;
2929
2930 int num_instructions = 0;
2931 foreach_iter(exec_list_iterator, iter, v.instructions) {
2932 num_instructions++;
2933 }
2934
2935 mesa_instructions =
2936 (struct prog_instruction *)calloc(num_instructions,
2937 sizeof(*mesa_instructions));
2938 mesa_instruction_annotation = ralloc_array(v.mem_ctx, ir_instruction *,
2939 num_instructions);
2940
2941 v.copy_propagate();
2942
2943 /* Convert ir_mesa_instructions into prog_instructions.
2944 */
2945 mesa_inst = mesa_instructions;
2946 i = 0;
2947 foreach_iter(exec_list_iterator, iter, v.instructions) {
2948 const ir_to_mesa_instruction *inst = (ir_to_mesa_instruction *)iter.get();
2949
2950 mesa_inst->Opcode = inst->op;
2951 mesa_inst->CondUpdate = inst->cond_update;
2952 if (inst->saturate)
2953 mesa_inst->SaturateMode = SATURATE_ZERO_ONE;
2954 mesa_inst->DstReg.File = inst->dst.file;
2955 mesa_inst->DstReg.Index = inst->dst.index;
2956 mesa_inst->DstReg.CondMask = inst->dst.cond_mask;
2957 mesa_inst->DstReg.WriteMask = inst->dst.writemask;
2958 mesa_inst->DstReg.RelAddr = inst->dst.reladdr != NULL;
2959 mesa_inst->SrcReg[0] = mesa_src_reg_from_ir_src_reg(inst->src[0]);
2960 mesa_inst->SrcReg[1] = mesa_src_reg_from_ir_src_reg(inst->src[1]);
2961 mesa_inst->SrcReg[2] = mesa_src_reg_from_ir_src_reg(inst->src[2]);
2962 mesa_inst->TexSrcUnit = inst->sampler;
2963 mesa_inst->TexSrcTarget = inst->tex_target;
2964 mesa_inst->TexShadow = inst->tex_shadow;
2965 mesa_instruction_annotation[i] = inst->ir;
2966
2967 /* Set IndirectRegisterFiles. */
2968 if (mesa_inst->DstReg.RelAddr)
2969 prog->IndirectRegisterFiles |= 1 << mesa_inst->DstReg.File;
2970
2971 /* Update program's bitmask of indirectly accessed register files */
2972 for (unsigned src = 0; src < 3; src++)
2973 if (mesa_inst->SrcReg[src].RelAddr)
2974 prog->IndirectRegisterFiles |= 1 << mesa_inst->SrcReg[src].File;
2975
2976 if (options->EmitNoIfs && mesa_inst->Opcode == OPCODE_IF) {
2977 fail_link(shader_program, "Couldn't flatten if statement\n");
2978 }
2979
2980 switch (mesa_inst->Opcode) {
2981 case OPCODE_BGNSUB:
2982 inst->function->inst = i;
2983 mesa_inst->Comment = strdup(inst->function->sig->function_name());
2984 break;
2985 case OPCODE_ENDSUB:
2986 mesa_inst->Comment = strdup(inst->function->sig->function_name());
2987 break;
2988 case OPCODE_CAL:
2989 mesa_inst->BranchTarget = inst->function->sig_id; /* rewritten later */
2990 break;
2991 case OPCODE_ARL:
2992 prog->NumAddressRegs = 1;
2993 break;
2994 default:
2995 break;
2996 }
2997
2998 mesa_inst++;
2999 i++;
3000
3001 if (!shader_program->LinkStatus)
3002 break;
3003 }
3004
3005 if (!shader_program->LinkStatus) {
3006 free(mesa_instructions);
3007 _mesa_reference_program(ctx, &shader->Program, NULL);
3008 return NULL;
3009 }
3010
3011 set_branchtargets(&v, mesa_instructions, num_instructions);
3012
3013 if (ctx->Shader.Flags & GLSL_DUMP) {
3014 printf("\n");
3015 printf("GLSL IR for linked %s program %d:\n", target_string,
3016 shader_program->Name);
3017 _mesa_print_ir(shader->ir, NULL);
3018 printf("\n");
3019 printf("\n");
3020 printf("Mesa IR for linked %s program %d:\n", target_string,
3021 shader_program->Name);
3022 print_program(mesa_instructions, mesa_instruction_annotation,
3023 num_instructions);
3024 }
3025
3026 prog->Instructions = mesa_instructions;
3027 prog->NumInstructions = num_instructions;
3028
3029 do_set_program_inouts(shader->ir, prog);
3030 count_resources(prog);
3031
3032 check_resources(ctx, shader_program, prog);
3033
3034 _mesa_reference_program(ctx, &shader->Program, prog);
3035
3036 if ((ctx->Shader.Flags & GLSL_NO_OPT) == 0) {
3037 _mesa_optimize_program(ctx, prog);
3038 }
3039
3040 return prog;
3041 }
3042
3043 extern "C" {
3044
3045 /**
3046 * Link a shader.
3047 * Called via ctx->Driver.LinkShader()
3048 * This actually involves converting GLSL IR into Mesa gl_programs with
3049 * code lowering and other optimizations.
3050 */
3051 GLboolean
3052 _mesa_ir_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
3053 {
3054 assert(prog->LinkStatus);
3055
3056 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
3057 if (prog->_LinkedShaders[i] == NULL)
3058 continue;
3059
3060 bool progress;
3061 exec_list *ir = prog->_LinkedShaders[i]->ir;
3062 const struct gl_shader_compiler_options *options =
3063 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)];
3064
3065 do {
3066 progress = false;
3067
3068 /* Lowering */
3069 do_mat_op_to_vec(ir);
3070 lower_instructions(ir, (MOD_TO_FRACT | DIV_TO_MUL_RCP | EXP_TO_EXP2
3071 | LOG_TO_LOG2
3072 | ((options->EmitNoPow) ? POW_TO_EXP2 : 0)));
3073
3074 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress;
3075
3076 progress = do_common_optimization(ir, true, options->MaxUnrollIterations) || progress;
3077
3078 progress = lower_quadop_vector(ir, true) || progress;
3079
3080 if (options->EmitNoIfs) {
3081 progress = lower_discard(ir) || progress;
3082 progress = lower_if_to_cond_assign(ir) || progress;
3083 }
3084
3085 if (options->EmitNoNoise)
3086 progress = lower_noise(ir) || progress;
3087
3088 /* If there are forms of indirect addressing that the driver
3089 * cannot handle, perform the lowering pass.
3090 */
3091 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput
3092 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform)
3093 progress =
3094 lower_variable_index_to_cond_assign(ir,
3095 options->EmitNoIndirectInput,
3096 options->EmitNoIndirectOutput,
3097 options->EmitNoIndirectTemp,
3098 options->EmitNoIndirectUniform)
3099 || progress;
3100
3101 progress = do_vec_index_to_cond_assign(ir) || progress;
3102 } while (progress);
3103
3104 validate_ir_tree(ir);
3105 }
3106
3107 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
3108 struct gl_program *linked_prog;
3109
3110 if (prog->_LinkedShaders[i] == NULL)
3111 continue;
3112
3113 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i]);
3114
3115 if (linked_prog) {
3116 bool ok = true;
3117
3118 switch (prog->_LinkedShaders[i]->Type) {
3119 case GL_VERTEX_SHADER:
3120 _mesa_reference_vertprog(ctx, &prog->VertexProgram,
3121 (struct gl_vertex_program *)linked_prog);
3122 ok = ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB,
3123 linked_prog);
3124 break;
3125 case GL_FRAGMENT_SHADER:
3126 _mesa_reference_fragprog(ctx, &prog->FragmentProgram,
3127 (struct gl_fragment_program *)linked_prog);
3128 ok = ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB,
3129 linked_prog);
3130 break;
3131 case GL_GEOMETRY_SHADER:
3132 _mesa_reference_geomprog(ctx, &prog->GeometryProgram,
3133 (struct gl_geometry_program *)linked_prog);
3134 ok = ctx->Driver.ProgramStringNotify(ctx, GL_GEOMETRY_PROGRAM_NV,
3135 linked_prog);
3136 break;
3137 }
3138 if (!ok) {
3139 return GL_FALSE;
3140 }
3141 }
3142
3143 _mesa_reference_program(ctx, &linked_prog, NULL);
3144 }
3145
3146 return GL_TRUE;
3147 }
3148
3149
3150 /**
3151 * Compile a GLSL shader. Called via glCompileShader().
3152 */
3153 void
3154 _mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader)
3155 {
3156 struct _mesa_glsl_parse_state *state =
3157 new(shader) _mesa_glsl_parse_state(ctx, shader->Type, shader);
3158
3159 const char *source = shader->Source;
3160 /* Check if the user called glCompileShader without first calling
3161 * glShaderSource. This should fail to compile, but not raise a GL_ERROR.
3162 */
3163 if (source == NULL) {
3164 shader->CompileStatus = GL_FALSE;
3165 return;
3166 }
3167
3168 state->error = preprocess(state, &source, &state->info_log,
3169 &ctx->Extensions, ctx->API);
3170
3171 if (ctx->Shader.Flags & GLSL_DUMP) {
3172 printf("GLSL source for shader %d:\n", shader->Name);
3173 printf("%s\n", shader->Source);
3174 }
3175
3176 if (!state->error) {
3177 _mesa_glsl_lexer_ctor(state, source);
3178 _mesa_glsl_parse(state);
3179 _mesa_glsl_lexer_dtor(state);
3180 }
3181
3182 ralloc_free(shader->ir);
3183 shader->ir = new(shader) exec_list;
3184 if (!state->error && !state->translation_unit.is_empty())
3185 _mesa_ast_to_hir(shader->ir, state);
3186
3187 if (!state->error && !shader->ir->is_empty()) {
3188 validate_ir_tree(shader->ir);
3189
3190 /* Do some optimization at compile time to reduce shader IR size
3191 * and reduce later work if the same shader is linked multiple times
3192 */
3193 while (do_common_optimization(shader->ir, false, 32))
3194 ;
3195
3196 validate_ir_tree(shader->ir);
3197 }
3198
3199 shader->symbols = state->symbols;
3200
3201 shader->CompileStatus = !state->error;
3202 shader->InfoLog = state->info_log;
3203 shader->Version = state->language_version;
3204 memcpy(shader->builtins_to_link, state->builtins_to_link,
3205 sizeof(shader->builtins_to_link[0]) * state->num_builtins_to_link);
3206 shader->num_builtins_to_link = state->num_builtins_to_link;
3207
3208 if (ctx->Shader.Flags & GLSL_LOG) {
3209 _mesa_write_shader_to_file(shader);
3210 }
3211
3212 if (ctx->Shader.Flags & GLSL_DUMP) {
3213 if (shader->CompileStatus) {
3214 printf("GLSL IR for shader %d:\n", shader->Name);
3215 _mesa_print_ir(shader->ir, NULL);
3216 printf("\n\n");
3217 } else {
3218 printf("GLSL shader %d failed to compile.\n", shader->Name);
3219 }
3220 if (shader->InfoLog && shader->InfoLog[0] != 0) {
3221 printf("GLSL shader %d info log:\n", shader->Name);
3222 printf("%s\n", shader->InfoLog);
3223 }
3224 }
3225
3226 /* Retain any live IR, but trash the rest. */
3227 reparent_ir(shader->ir, shader->ir);
3228
3229 ralloc_free(state);
3230 }
3231
3232
3233 /**
3234 * Link a GLSL shader program. Called via glLinkProgram().
3235 */
3236 void
3237 _mesa_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
3238 {
3239 unsigned int i;
3240
3241 _mesa_clear_shader_program_data(ctx, prog);
3242
3243 prog->LinkStatus = GL_TRUE;
3244
3245 for (i = 0; i < prog->NumShaders; i++) {
3246 if (!prog->Shaders[i]->CompileStatus) {
3247 fail_link(prog, "linking with uncompiled shader");
3248 prog->LinkStatus = GL_FALSE;
3249 }
3250 }
3251
3252 prog->Varying = _mesa_new_parameter_list();
3253 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL);
3254 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL);
3255 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, NULL);
3256
3257 if (prog->LinkStatus) {
3258 link_shaders(ctx, prog);
3259 }
3260
3261 if (prog->LinkStatus) {
3262 if (!ctx->Driver.LinkShader(ctx, prog)) {
3263 prog->LinkStatus = GL_FALSE;
3264 }
3265 }
3266
3267 set_uniform_initializers(ctx, prog);
3268
3269 if (ctx->Shader.Flags & GLSL_DUMP) {
3270 if (!prog->LinkStatus) {
3271 printf("GLSL shader program %d failed to link\n", prog->Name);
3272 }
3273
3274 if (prog->InfoLog && prog->InfoLog[0] != 0) {
3275 printf("GLSL shader program %d info log:\n", prog->Name);
3276 printf("%s\n", prog->InfoLog);
3277 }
3278 }
3279 }
3280
3281 } /* extern "C" */