2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include <sys/types.h>
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
46 #include "../glsl/glsl_types.h"
47 #include "../glsl/ir_optimization.h"
48 #include "../glsl/ir_print_visitor.h"
51 ARF
= BRW_ARCHITECTURE_REGISTER_FILE
,
52 GRF
= BRW_GENERAL_REGISTER_FILE
,
53 MRF
= BRW_MESSAGE_REGISTER_FILE
,
54 IMM
= BRW_IMMEDIATE_VALUE
,
55 FIXED_HW_REG
, /* a struct brw_reg */
56 UNIFORM
, /* prog_data->params[hw_reg] */
61 FS_OPCODE_FB_WRITE
= 256,
79 static int using_new_fs
= -1;
80 static struct brw_reg
brw_reg_from_fs_reg(class fs_reg
*reg
);
83 brw_new_shader(GLcontext
*ctx
, GLuint name
, GLuint type
)
85 struct brw_shader
*shader
;
87 shader
= talloc_zero(NULL
, struct brw_shader
);
89 shader
->base
.Type
= type
;
90 shader
->base
.Name
= name
;
91 _mesa_init_shader(ctx
, &shader
->base
);
97 struct gl_shader_program
*
98 brw_new_shader_program(GLcontext
*ctx
, GLuint name
)
100 struct brw_shader_program
*prog
;
101 prog
= talloc_zero(NULL
, struct brw_shader_program
);
103 prog
->base
.Name
= name
;
104 _mesa_init_shader_program(ctx
, &prog
->base
);
110 brw_compile_shader(GLcontext
*ctx
, struct gl_shader
*shader
)
112 if (!_mesa_ir_compile_shader(ctx
, shader
))
119 brw_link_shader(GLcontext
*ctx
, struct gl_shader_program
*prog
)
121 if (using_new_fs
== -1)
122 using_new_fs
= getenv("INTEL_NEW_FS") != NULL
;
124 for (unsigned i
= 0; i
< prog
->_NumLinkedShaders
; i
++) {
125 struct brw_shader
*shader
= (struct brw_shader
*)prog
->_LinkedShaders
[i
];
127 if (using_new_fs
&& shader
->base
.Type
== GL_FRAGMENT_SHADER
) {
128 void *mem_ctx
= talloc_new(NULL
);
132 talloc_free(shader
->ir
);
133 shader
->ir
= new(shader
) exec_list
;
134 clone_ir_list(mem_ctx
, shader
->ir
, shader
->base
.ir
);
136 do_mat_op_to_vec(shader
->ir
);
137 do_mod_to_fract(shader
->ir
);
138 do_div_to_mul_rcp(shader
->ir
);
139 do_sub_to_add_neg(shader
->ir
);
140 do_explog_to_explog2(shader
->ir
);
145 brw_do_channel_expressions(shader
->ir
);
146 brw_do_vector_splitting(shader
->ir
);
148 progress
= do_lower_jumps(shader
->ir
, true, true,
149 true, /* main return */
150 false, /* continue */
154 progress
= do_common_optimization(shader
->ir
, true, 32) || progress
;
156 progress
= lower_noise(shader
->ir
) || progress
;
158 lower_variable_index_to_cond_assign(shader
->ir
,
160 GL_TRUE
, /* output */
162 GL_TRUE
/* uniform */
166 validate_ir_tree(shader
->ir
);
168 reparent_ir(shader
->ir
, shader
->ir
);
169 talloc_free(mem_ctx
);
173 if (!_mesa_ir_link_shader(ctx
, prog
))
180 type_size(const struct glsl_type
*type
)
182 unsigned int size
, i
;
184 switch (type
->base_type
) {
187 case GLSL_TYPE_FLOAT
:
189 return type
->components();
190 case GLSL_TYPE_ARRAY
:
191 return type_size(type
->fields
.array
) * type
->length
;
192 case GLSL_TYPE_STRUCT
:
194 for (i
= 0; i
< type
->length
; i
++) {
195 size
+= type_size(type
->fields
.structure
[i
].type
);
198 case GLSL_TYPE_SAMPLER
:
199 /* Samplers take up no register space, since they're baked in at
204 assert(!"not reached");
211 /* Callers of this talloc-based new need not call delete. It's
212 * easier to just talloc_free 'ctx' (or any of its ancestors). */
213 static void* operator new(size_t size
, void *ctx
)
217 node
= talloc_size(ctx
, size
);
218 assert(node
!= NULL
);
226 this->reg_offset
= 0;
232 /** Generic unset register constructor. */
236 this->file
= BAD_FILE
;
239 /** Immediate value constructor. */
244 this->type
= BRW_REGISTER_TYPE_F
;
248 /** Immediate value constructor. */
253 this->type
= BRW_REGISTER_TYPE_D
;
257 /** Immediate value constructor. */
262 this->type
= BRW_REGISTER_TYPE_UD
;
266 /** Fixed brw_reg Immediate value constructor. */
267 fs_reg(struct brw_reg fixed_hw_reg
)
270 this->file
= FIXED_HW_REG
;
271 this->fixed_hw_reg
= fixed_hw_reg
;
272 this->type
= fixed_hw_reg
.type
;
275 fs_reg(enum register_file file
, int hw_reg
);
276 fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
);
278 /** Register file: ARF, GRF, MRF, IMM. */
279 enum register_file file
;
280 /** virtual register number. 0 = fixed hw reg */
282 /** Offset within the virtual register. */
284 /** HW register number. Generally unset until register allocation. */
286 /** Register type. BRW_REGISTER_TYPE_* */
290 struct brw_reg fixed_hw_reg
;
292 /** Value for file == BRW_IMMMEDIATE_FILE */
300 static const fs_reg reg_undef
;
301 static const fs_reg
reg_null(ARF
, BRW_ARF_NULL
);
303 class fs_inst
: public exec_node
{
305 /* Callers of this talloc-based new need not call delete. It's
306 * easier to just talloc_free 'ctx' (or any of its ancestors). */
307 static void* operator new(size_t size
, void *ctx
)
311 node
= talloc_zero_size(ctx
, size
);
312 assert(node
!= NULL
);
319 this->opcode
= BRW_OPCODE_NOP
;
320 this->saturate
= false;
321 this->conditional_mod
= BRW_CONDITIONAL_NONE
;
322 this->predicated
= false;
326 this->shadow_compare
= false;
337 this->opcode
= opcode
;
340 fs_inst(int opcode
, fs_reg dst
, fs_reg src0
)
343 this->opcode
= opcode
;
348 fs_inst(int opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
)
351 this->opcode
= opcode
;
357 fs_inst(int opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
, fs_reg src2
)
360 this->opcode
= opcode
;
367 int opcode
; /* BRW_OPCODE_* or FS_OPCODE_* */
372 int conditional_mod
; /**< BRW_CONDITIONAL_* */
374 int mlen
; /**< SEND message length */
376 int target
; /**< MRT target. */
381 * Annotation for the generated IR. One of the two can be set.
384 const char *annotation
;
388 class fs_visitor
: public ir_visitor
392 fs_visitor(struct brw_wm_compile
*c
, struct brw_shader
*shader
)
397 this->fp
= brw
->fragment_program
;
398 this->intel
= &brw
->intel
;
399 this->ctx
= &intel
->ctx
;
400 this->mem_ctx
= talloc_new(NULL
);
401 this->shader
= shader
;
403 this->variable_ht
= hash_table_ctor(0,
404 hash_table_pointer_hash
,
405 hash_table_pointer_compare
);
407 this->frag_color
= NULL
;
408 this->frag_data
= NULL
;
409 this->frag_depth
= NULL
;
410 this->first_non_payload_grf
= 0;
412 this->current_annotation
= NULL
;
413 this->annotation_string
= NULL
;
414 this->annotation_ir
= NULL
;
415 this->base_ir
= NULL
;
417 this->virtual_grf_sizes
= NULL
;
418 this->virtual_grf_next
= 1;
419 this->virtual_grf_array_size
= 0;
423 talloc_free(this->mem_ctx
);
424 hash_table_dtor(this->variable_ht
);
427 fs_reg
*variable_storage(ir_variable
*var
);
428 int virtual_grf_alloc(int size
);
430 void visit(ir_variable
*ir
);
431 void visit(ir_assignment
*ir
);
432 void visit(ir_dereference_variable
*ir
);
433 void visit(ir_dereference_record
*ir
);
434 void visit(ir_dereference_array
*ir
);
435 void visit(ir_expression
*ir
);
436 void visit(ir_texture
*ir
);
437 void visit(ir_if
*ir
);
438 void visit(ir_constant
*ir
);
439 void visit(ir_swizzle
*ir
);
440 void visit(ir_return
*ir
);
441 void visit(ir_loop
*ir
);
442 void visit(ir_loop_jump
*ir
);
443 void visit(ir_discard
*ir
);
444 void visit(ir_call
*ir
);
445 void visit(ir_function
*ir
);
446 void visit(ir_function_signature
*ir
);
448 fs_inst
*emit(fs_inst inst
);
449 void assign_curb_setup();
450 void assign_urb_setup();
452 void assign_regs_trivial();
453 void generate_code();
454 void generate_fb_write(fs_inst
*inst
);
455 void generate_linterp(fs_inst
*inst
, struct brw_reg dst
,
456 struct brw_reg
*src
);
457 void generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
458 void generate_math(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg
*src
);
459 void generate_discard(fs_inst
*inst
, struct brw_reg temp
);
460 void generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
461 void generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
463 void emit_dummy_fs();
464 void emit_fragcoord_interpolation(ir_variable
*ir
);
465 void emit_general_interpolation(ir_variable
*ir
);
466 void emit_interpolation_setup();
467 void emit_fb_writes();
469 struct brw_reg
interp_reg(int location
, int channel
);
470 int setup_uniform_values(int loc
, const glsl_type
*type
);
471 void setup_builtin_uniform_values(ir_variable
*ir
);
473 struct brw_context
*brw
;
474 const struct gl_fragment_program
*fp
;
475 struct intel_context
*intel
;
477 struct brw_wm_compile
*c
;
478 struct brw_compile
*p
;
479 struct brw_shader
*shader
;
481 exec_list instructions
;
483 int *virtual_grf_sizes
;
484 int virtual_grf_next
;
485 int virtual_grf_array_size
;
487 struct hash_table
*variable_ht
;
488 ir_variable
*frag_color
, *frag_data
, *frag_depth
;
489 int first_non_payload_grf
;
491 /** @{ debug annotation info */
492 const char *current_annotation
;
493 ir_instruction
*base_ir
;
494 const char **annotation_string
;
495 ir_instruction
**annotation_ir
;
500 /* Result of last visit() method. */
515 fs_visitor::virtual_grf_alloc(int size
)
517 if (virtual_grf_array_size
<= virtual_grf_next
) {
518 if (virtual_grf_array_size
== 0)
519 virtual_grf_array_size
= 16;
521 virtual_grf_array_size
*= 2;
522 virtual_grf_sizes
= talloc_realloc(mem_ctx
, virtual_grf_sizes
,
523 int, virtual_grf_array_size
);
525 /* This slot is always unused. */
526 virtual_grf_sizes
[0] = 0;
528 virtual_grf_sizes
[virtual_grf_next
] = size
;
529 return virtual_grf_next
++;
532 /** Fixed HW reg constructor. */
533 fs_reg::fs_reg(enum register_file file
, int hw_reg
)
537 this->hw_reg
= hw_reg
;
538 this->type
= BRW_REGISTER_TYPE_F
;
542 brw_type_for_base_type(const struct glsl_type
*type
)
544 switch (type
->base_type
) {
545 case GLSL_TYPE_FLOAT
:
546 return BRW_REGISTER_TYPE_F
;
549 return BRW_REGISTER_TYPE_D
;
551 return BRW_REGISTER_TYPE_UD
;
552 case GLSL_TYPE_ARRAY
:
553 case GLSL_TYPE_STRUCT
:
554 /* These should be overridden with the type of the member when
555 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
556 * way to trip up if we don't.
558 return BRW_REGISTER_TYPE_UD
;
560 assert(!"not reached");
561 return BRW_REGISTER_TYPE_F
;
565 /** Automatic reg constructor. */
566 fs_reg::fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
)
571 this->reg
= v
->virtual_grf_alloc(type_size(type
));
572 this->reg_offset
= 0;
573 this->type
= brw_type_for_base_type(type
);
577 fs_visitor::variable_storage(ir_variable
*var
)
579 return (fs_reg
*)hash_table_find(this->variable_ht
, var
);
582 /* Our support for uniforms is piggy-backed on the struct
583 * gl_fragment_program, because that's where the values actually
584 * get stored, rather than in some global gl_shader_program uniform
588 fs_visitor::setup_uniform_values(int loc
, const glsl_type
*type
)
590 unsigned int offset
= 0;
593 if (type
->is_matrix()) {
594 const glsl_type
*column
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
595 type
->vector_elements
,
598 for (unsigned int i
= 0; i
< type
->matrix_columns
; i
++) {
599 offset
+= setup_uniform_values(loc
+ offset
, column
);
605 switch (type
->base_type
) {
606 case GLSL_TYPE_FLOAT
:
610 vec_values
= fp
->Base
.Parameters
->ParameterValues
[loc
];
611 for (unsigned int i
= 0; i
< type
->vector_elements
; i
++) {
612 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[i
];
616 case GLSL_TYPE_STRUCT
:
617 for (unsigned int i
= 0; i
< type
->length
; i
++) {
618 offset
+= setup_uniform_values(loc
+ offset
,
619 type
->fields
.structure
[i
].type
);
623 case GLSL_TYPE_ARRAY
:
624 for (unsigned int i
= 0; i
< type
->length
; i
++) {
625 offset
+= setup_uniform_values(loc
+ offset
, type
->fields
.array
);
629 case GLSL_TYPE_SAMPLER
:
630 /* The sampler takes up a slot, but we don't use any values from it. */
634 assert(!"not reached");
640 /* Our support for builtin uniforms is even scarier than non-builtin.
641 * It sits on top of the PROG_STATE_VAR parameters that are
642 * automatically updated from GL context state.
645 fs_visitor::setup_builtin_uniform_values(ir_variable
*ir
)
647 const struct gl_builtin_uniform_desc
*statevar
= NULL
;
649 for (unsigned int i
= 0; _mesa_builtin_uniform_desc
[i
].name
; i
++) {
650 statevar
= &_mesa_builtin_uniform_desc
[i
];
651 if (strcmp(ir
->name
, _mesa_builtin_uniform_desc
[i
].name
) == 0)
655 if (!statevar
->name
) {
657 printf("Failed to find builtin uniform `%s'\n", ir
->name
);
662 if (ir
->type
->is_array()) {
663 array_count
= ir
->type
->length
;
668 for (int a
= 0; a
< array_count
; a
++) {
669 for (unsigned int i
= 0; i
< statevar
->num_elements
; i
++) {
670 struct gl_builtin_uniform_element
*element
= &statevar
->elements
[i
];
671 int tokens
[STATE_LENGTH
];
673 memcpy(tokens
, element
->tokens
, sizeof(element
->tokens
));
674 if (ir
->type
->is_array()) {
678 /* This state reference has already been setup by ir_to_mesa,
679 * but we'll get the same index back here.
681 int index
= _mesa_add_state_reference(this->fp
->Base
.Parameters
,
682 (gl_state_index
*)tokens
);
683 float *vec_values
= this->fp
->Base
.Parameters
->ParameterValues
[index
];
685 /* Add each of the unique swizzles of the element as a
686 * parameter. This'll end up matching the expected layout of
687 * the array/matrix/structure we're trying to fill in.
690 for (unsigned int i
= 0; i
< 4; i
++) {
691 int this_swiz
= GET_SWZ(element
->swizzle
, i
);
692 if (this_swiz
== last_swiz
)
694 last_swiz
= this_swiz
;
696 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[i
];
703 fs_visitor::emit_fragcoord_interpolation(ir_variable
*ir
)
705 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
707 fs_reg neg_y
= this->pixel_y
;
711 if (ir
->pixel_center_integer
) {
712 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_x
));
714 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, this->pixel_x
, fs_reg(0.5f
)));
719 if (ir
->origin_upper_left
&& ir
->pixel_center_integer
) {
720 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_y
));
722 fs_reg pixel_y
= this->pixel_y
;
723 float offset
= (ir
->pixel_center_integer
? 0.0 : 0.5);
725 if (!ir
->origin_upper_left
) {
726 pixel_y
.negate
= true;
727 offset
+= c
->key
.drawable_height
- 1.0;
730 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, pixel_y
, fs_reg(offset
)));
735 emit(fs_inst(FS_OPCODE_LINTERP
, wpos
, this->delta_x
, this->delta_y
,
736 interp_reg(FRAG_ATTRIB_WPOS
, 2)));
739 /* gl_FragCoord.w: Already set up in emit_interpolation */
740 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->wpos_w
));
742 hash_table_insert(this->variable_ht
, reg
, ir
);
747 fs_visitor::emit_general_interpolation(ir_variable
*ir
)
749 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
750 /* Interpolation is always in floating point regs. */
751 reg
->type
= BRW_REGISTER_TYPE_F
;
754 unsigned int array_elements
;
755 const glsl_type
*type
;
757 if (ir
->type
->is_array()) {
758 array_elements
= ir
->type
->length
;
759 if (array_elements
== 0) {
762 type
= ir
->type
->fields
.array
;
768 int location
= ir
->location
;
769 for (unsigned int i
= 0; i
< array_elements
; i
++) {
770 for (unsigned int j
= 0; j
< type
->matrix_columns
; j
++) {
771 if (!(fp
->Base
.InputsRead
& BITFIELD64_BIT(location
))) {
772 /* If there's no incoming setup data for this slot, don't
773 * emit interpolation for it (since it's not used, and
774 * we'd fall over later trying to find the setup data.
776 attr
.reg_offset
+= type
->vector_elements
;
780 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
781 struct brw_reg interp
= interp_reg(location
, c
);
782 emit(fs_inst(FS_OPCODE_LINTERP
,
789 attr
.reg_offset
-= type
->vector_elements
;
791 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
792 emit(fs_inst(BRW_OPCODE_MUL
,
802 hash_table_insert(this->variable_ht
, reg
, ir
);
806 fs_visitor::visit(ir_variable
*ir
)
810 if (variable_storage(ir
))
813 if (strcmp(ir
->name
, "gl_FragColor") == 0) {
814 this->frag_color
= ir
;
815 } else if (strcmp(ir
->name
, "gl_FragData") == 0) {
816 this->frag_data
= ir
;
817 } else if (strcmp(ir
->name
, "gl_FragDepth") == 0) {
818 this->frag_depth
= ir
;
821 if (ir
->mode
== ir_var_in
) {
822 if (!strcmp(ir
->name
, "gl_FragCoord")) {
823 emit_fragcoord_interpolation(ir
);
825 } else if (!strcmp(ir
->name
, "gl_FrontFacing")) {
826 reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
827 struct brw_reg r1_6ud
= retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
);
828 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
831 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
,
835 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
836 emit(fs_inst(BRW_OPCODE_AND
, *reg
, *reg
, fs_reg(1u)));
838 emit_general_interpolation(ir
);
843 if (ir
->mode
== ir_var_uniform
) {
844 int param_index
= c
->prog_data
.nr_params
;
846 if (!strncmp(ir
->name
, "gl_", 3)) {
847 setup_builtin_uniform_values(ir
);
849 setup_uniform_values(ir
->location
, ir
->type
);
852 reg
= new(this->mem_ctx
) fs_reg(UNIFORM
, param_index
);
856 reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
858 hash_table_insert(this->variable_ht
, reg
, ir
);
862 fs_visitor::visit(ir_dereference_variable
*ir
)
864 fs_reg
*reg
= variable_storage(ir
->var
);
869 fs_visitor::visit(ir_dereference_record
*ir
)
871 const glsl_type
*struct_type
= ir
->record
->type
;
873 ir
->record
->accept(this);
875 unsigned int offset
= 0;
876 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
877 if (strcmp(struct_type
->fields
.structure
[i
].name
, ir
->field
) == 0)
879 offset
+= type_size(struct_type
->fields
.structure
[i
].type
);
881 this->result
.reg_offset
+= offset
;
882 this->result
.type
= brw_type_for_base_type(ir
->type
);
886 fs_visitor::visit(ir_dereference_array
*ir
)
891 ir
->array
->accept(this);
892 index
= ir
->array_index
->as_constant();
894 element_size
= type_size(ir
->type
);
895 this->result
.type
= brw_type_for_base_type(ir
->type
);
898 assert(this->result
.file
== UNIFORM
||
899 (this->result
.file
== GRF
&&
900 this->result
.reg
!= 0));
901 this->result
.reg_offset
+= index
->value
.i
[0] * element_size
;
903 assert(!"FINISHME: non-constant array element");
908 fs_visitor::visit(ir_expression
*ir
)
910 unsigned int operand
;
915 for (operand
= 0; operand
< ir
->get_num_operands(); operand
++) {
916 ir
->operands
[operand
]->accept(this);
917 if (this->result
.file
== BAD_FILE
) {
919 printf("Failed to get tree for expression operand:\n");
920 ir
->operands
[operand
]->accept(&v
);
923 op
[operand
] = this->result
;
925 /* Matrix expression operands should have been broken down to vector
926 * operations already.
928 assert(!ir
->operands
[operand
]->type
->is_matrix());
929 /* And then those vector operands should have been broken down to scalar.
931 assert(!ir
->operands
[operand
]->type
->is_vector());
934 /* Storage for our result. If our result goes into an assignment, it will
935 * just get copy-propagated out, so no worries.
937 this->result
= fs_reg(this, ir
->type
);
939 switch (ir
->operation
) {
940 case ir_unop_logic_not
:
941 emit(fs_inst(BRW_OPCODE_ADD
, this->result
, op
[0], fs_reg(-1)));
944 op
[0].negate
= !op
[0].negate
;
945 this->result
= op
[0];
949 this->result
= op
[0];
952 temp
= fs_reg(this, ir
->type
);
954 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(0.0f
)));
956 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null
, op
[0], fs_reg(0.0f
)));
957 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
958 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(1.0f
)));
959 inst
->predicated
= true;
961 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null
, op
[0], fs_reg(0.0f
)));
962 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
963 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(-1.0f
)));
964 inst
->predicated
= true;
968 emit(fs_inst(FS_OPCODE_RCP
, this->result
, op
[0]));
972 emit(fs_inst(FS_OPCODE_EXP2
, this->result
, op
[0]));
975 emit(fs_inst(FS_OPCODE_LOG2
, this->result
, op
[0]));
979 assert(!"not reached: should be handled by ir_explog_to_explog2");
982 emit(fs_inst(FS_OPCODE_SIN
, this->result
, op
[0]));
985 emit(fs_inst(FS_OPCODE_COS
, this->result
, op
[0]));
989 emit(fs_inst(FS_OPCODE_DDX
, this->result
, op
[0]));
992 emit(fs_inst(FS_OPCODE_DDY
, this->result
, op
[0]));
996 emit(fs_inst(BRW_OPCODE_ADD
, this->result
, op
[0], op
[1]));
999 assert(!"not reached: should be handled by ir_sub_to_add_neg");
1003 emit(fs_inst(BRW_OPCODE_MUL
, this->result
, op
[0], op
[1]));
1006 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
1009 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1013 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1014 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1015 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1017 case ir_binop_greater
:
1018 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1019 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1020 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1022 case ir_binop_lequal
:
1023 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1024 inst
->conditional_mod
= BRW_CONDITIONAL_LE
;
1025 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1027 case ir_binop_gequal
:
1028 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1029 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1030 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1032 case ir_binop_equal
:
1033 case ir_binop_all_equal
: /* same as nequal for scalars */
1034 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1035 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1036 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1038 case ir_binop_nequal
:
1039 case ir_binop_any_nequal
: /* same as nequal for scalars */
1040 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1041 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1042 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
1045 case ir_binop_logic_xor
:
1046 emit(fs_inst(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]));
1049 case ir_binop_logic_or
:
1050 emit(fs_inst(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]));
1053 case ir_binop_logic_and
:
1054 emit(fs_inst(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]));
1058 case ir_binop_cross
:
1060 assert(!"not reached: should be handled by brw_fs_channel_expressions");
1064 assert(!"not reached: should be handled by lower_noise");
1068 emit(fs_inst(FS_OPCODE_SQRT
, this->result
, op
[0]));
1072 emit(fs_inst(FS_OPCODE_RSQ
, this->result
, op
[0]));
1078 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, op
[0]));
1081 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, op
[0]));
1085 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], fs_reg(0.0f
)));
1086 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1089 emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
1092 op
[0].negate
= ~op
[0].negate
;
1093 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
1094 this->result
.negate
= true;
1097 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
1100 inst
= emit(fs_inst(BRW_OPCODE_FRC
, this->result
, op
[0]));
1104 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1105 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1107 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
1108 inst
->predicated
= true;
1111 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1112 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1114 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
1115 inst
->predicated
= true;
1119 inst
= emit(fs_inst(FS_OPCODE_POW
, this->result
, op
[0], op
[1]));
1122 case ir_unop_bit_not
:
1124 case ir_binop_lshift
:
1125 case ir_binop_rshift
:
1126 case ir_binop_bit_and
:
1127 case ir_binop_bit_xor
:
1128 case ir_binop_bit_or
:
1129 assert(!"GLSL 1.30 features unsupported");
1135 fs_visitor::visit(ir_assignment
*ir
)
1142 /* FINISHME: arrays on the lhs */
1143 ir
->lhs
->accept(this);
1146 ir
->rhs
->accept(this);
1149 /* FINISHME: This should really set to the correct maximal writemask for each
1150 * FINISHME: component written (in the loops below). This case can only
1151 * FINISHME: occur for matrices, arrays, and structures.
1153 if (ir
->write_mask
== 0) {
1154 assert(!ir
->lhs
->type
->is_scalar() && !ir
->lhs
->type
->is_vector());
1155 write_mask
= WRITEMASK_XYZW
;
1157 assert(ir
->lhs
->type
->is_vector() || ir
->lhs
->type
->is_scalar());
1158 write_mask
= ir
->write_mask
;
1161 assert(l
.file
!= BAD_FILE
);
1162 assert(r
.file
!= BAD_FILE
);
1164 if (ir
->condition
) {
1165 /* Get the condition bool into the predicate. */
1166 ir
->condition
->accept(this);
1167 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null
, this->result
, fs_reg(0)));
1168 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1171 for (i
= 0; i
< type_size(ir
->lhs
->type
); i
++) {
1172 if (i
>= 4 || (write_mask
& (1 << i
))) {
1173 inst
= emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
1175 inst
->predicated
= true;
1183 fs_visitor::visit(ir_texture
*ir
)
1186 fs_inst
*inst
= NULL
;
1187 unsigned int mlen
= 0;
1189 ir
->coordinate
->accept(this);
1190 fs_reg coordinate
= this->result
;
1192 if (ir
->projector
) {
1193 fs_reg inv_proj
= fs_reg(this, glsl_type::float_type
);
1195 ir
->projector
->accept(this);
1196 emit(fs_inst(FS_OPCODE_RCP
, inv_proj
, this->result
));
1198 fs_reg proj_coordinate
= fs_reg(this, ir
->coordinate
->type
);
1199 for (unsigned int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1200 emit(fs_inst(BRW_OPCODE_MUL
, proj_coordinate
, coordinate
, inv_proj
));
1201 coordinate
.reg_offset
++;
1202 proj_coordinate
.reg_offset
++;
1204 proj_coordinate
.reg_offset
= 0;
1206 coordinate
= proj_coordinate
;
1209 for (mlen
= 0; mlen
< ir
->coordinate
->type
->vector_elements
; mlen
++) {
1210 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), coordinate
));
1211 coordinate
.reg_offset
++;
1214 /* Pre-Ironlake, the 8-wide sampler always took u,v,r. */
1218 if (ir
->shadow_comparitor
) {
1219 /* For shadow comparisons, we have to supply u,v,r. */
1222 ir
->shadow_comparitor
->accept(this);
1223 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1227 /* Do we ever want to handle writemasking on texture samples? Is it
1228 * performance relevant?
1230 fs_reg dst
= fs_reg(this, glsl_type::vec4_type
);
1234 inst
= emit(fs_inst(FS_OPCODE_TEX
, dst
, fs_reg(MRF
, base_mrf
)));
1237 ir
->lod_info
.bias
->accept(this);
1238 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1241 inst
= emit(fs_inst(FS_OPCODE_TXB
, dst
, fs_reg(MRF
, base_mrf
)));
1244 ir
->lod_info
.lod
->accept(this);
1245 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1248 inst
= emit(fs_inst(FS_OPCODE_TXL
, dst
, fs_reg(MRF
, base_mrf
)));
1252 assert(!"GLSL 1.30 features unsupported");
1257 _mesa_get_sampler_uniform_value(ir
->sampler
,
1258 ctx
->Shader
.CurrentProgram
,
1259 &brw
->fragment_program
->Base
);
1260 inst
->sampler
= c
->fp
->program
.Base
.SamplerUnits
[inst
->sampler
];
1264 if (ir
->shadow_comparitor
)
1265 inst
->shadow_compare
= true;
1270 fs_visitor::visit(ir_swizzle
*ir
)
1272 ir
->val
->accept(this);
1273 fs_reg val
= this->result
;
1275 fs_reg result
= fs_reg(this, ir
->type
);
1276 this->result
= result
;
1278 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1279 fs_reg channel
= val
;
1297 channel
.reg_offset
+= swiz
;
1298 emit(fs_inst(BRW_OPCODE_MOV
, result
, channel
));
1299 result
.reg_offset
++;
1304 fs_visitor::visit(ir_discard
*ir
)
1306 fs_reg temp
= fs_reg(this, glsl_type::uint_type
);
1308 assert(ir
->condition
== NULL
); /* FINISHME */
1310 emit(fs_inst(FS_OPCODE_DISCARD
, temp
, temp
));
1314 fs_visitor::visit(ir_constant
*ir
)
1316 fs_reg
reg(this, ir
->type
);
1319 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1320 switch (ir
->type
->base_type
) {
1321 case GLSL_TYPE_FLOAT
:
1322 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.f
[i
])));
1324 case GLSL_TYPE_UINT
:
1325 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.u
[i
])));
1328 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.i
[i
])));
1330 case GLSL_TYPE_BOOL
:
1331 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg((int)ir
->value
.b
[i
])));
1334 assert(!"Non-float/uint/int/bool constant");
1341 fs_visitor::visit(ir_if
*ir
)
1345 /* Don't point the annotation at the if statement, because then it plus
1346 * the then and else blocks get printed.
1348 this->base_ir
= ir
->condition
;
1350 /* Generate the condition into the condition code. */
1351 ir
->condition
->accept(this);
1352 inst
= emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(brw_null_reg()), this->result
));
1353 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1355 inst
= emit(fs_inst(BRW_OPCODE_IF
));
1356 inst
->predicated
= true;
1358 foreach_iter(exec_list_iterator
, iter
, ir
->then_instructions
) {
1359 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1365 if (!ir
->else_instructions
.is_empty()) {
1366 emit(fs_inst(BRW_OPCODE_ELSE
));
1368 foreach_iter(exec_list_iterator
, iter
, ir
->else_instructions
) {
1369 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1376 emit(fs_inst(BRW_OPCODE_ENDIF
));
1380 fs_visitor::visit(ir_loop
*ir
)
1382 fs_reg counter
= reg_undef
;
1385 this->base_ir
= ir
->counter
;
1386 ir
->counter
->accept(this);
1387 counter
= *(variable_storage(ir
->counter
));
1390 this->base_ir
= ir
->from
;
1391 ir
->from
->accept(this);
1393 emit(fs_inst(BRW_OPCODE_MOV
, counter
, this->result
));
1397 /* Start a safety counter. If the user messed up their loop
1398 * counting, we don't want to hang the GPU.
1400 fs_reg max_iter
= fs_reg(this, glsl_type::int_type
);
1401 emit(fs_inst(BRW_OPCODE_MOV
, max_iter
, fs_reg(10000)));
1403 emit(fs_inst(BRW_OPCODE_DO
));
1406 this->base_ir
= ir
->to
;
1407 ir
->to
->accept(this);
1409 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null
,
1410 counter
, this->result
));
1412 case ir_binop_equal
:
1413 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1415 case ir_binop_nequal
:
1416 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1418 case ir_binop_gequal
:
1419 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1421 case ir_binop_lequal
:
1422 inst
->conditional_mod
= BRW_CONDITIONAL_LE
;
1424 case ir_binop_greater
:
1425 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1428 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1431 assert(!"not reached: unknown loop condition");
1436 inst
= emit(fs_inst(BRW_OPCODE_BREAK
));
1437 inst
->predicated
= true;
1440 foreach_iter(exec_list_iterator
, iter
, ir
->body_instructions
) {
1441 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1447 /* Check the maximum loop iters counter. */
1448 inst
= emit(fs_inst(BRW_OPCODE_ADD
, max_iter
, max_iter
, fs_reg(-1)));
1449 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1451 inst
= emit(fs_inst(BRW_OPCODE_BREAK
));
1452 inst
->predicated
= true;
1455 if (ir
->increment
) {
1456 this->base_ir
= ir
->increment
;
1457 ir
->increment
->accept(this);
1458 emit(fs_inst(BRW_OPCODE_ADD
, counter
, counter
, this->result
));
1461 emit(fs_inst(BRW_OPCODE_WHILE
));
1465 fs_visitor::visit(ir_loop_jump
*ir
)
1468 case ir_loop_jump::jump_break
:
1469 emit(fs_inst(BRW_OPCODE_BREAK
));
1471 case ir_loop_jump::jump_continue
:
1472 emit(fs_inst(BRW_OPCODE_CONTINUE
));
1478 fs_visitor::visit(ir_call
*ir
)
1480 assert(!"FINISHME");
1484 fs_visitor::visit(ir_return
*ir
)
1486 assert(!"FINISHME");
1490 fs_visitor::visit(ir_function
*ir
)
1492 /* Ignore function bodies other than main() -- we shouldn't see calls to
1493 * them since they should all be inlined before we get to ir_to_mesa.
1495 if (strcmp(ir
->name
, "main") == 0) {
1496 const ir_function_signature
*sig
;
1499 sig
= ir
->matching_signature(&empty
);
1503 foreach_iter(exec_list_iterator
, iter
, sig
->body
) {
1504 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1513 fs_visitor::visit(ir_function_signature
*ir
)
1515 assert(!"not reached");
1520 fs_visitor::emit(fs_inst inst
)
1522 fs_inst
*list_inst
= new(mem_ctx
) fs_inst
;
1525 list_inst
->annotation
= this->current_annotation
;
1526 list_inst
->ir
= this->base_ir
;
1528 this->instructions
.push_tail(list_inst
);
1533 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1535 fs_visitor::emit_dummy_fs()
1537 /* Everyone's favorite color. */
1538 emit(fs_inst(BRW_OPCODE_MOV
,
1541 emit(fs_inst(BRW_OPCODE_MOV
,
1544 emit(fs_inst(BRW_OPCODE_MOV
,
1547 emit(fs_inst(BRW_OPCODE_MOV
,
1552 write
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1557 /* The register location here is relative to the start of the URB
1558 * data. It will get adjusted to be a real location before
1559 * generate_code() time.
1562 fs_visitor::interp_reg(int location
, int channel
)
1564 int regnr
= location
* 2 + channel
/ 2;
1565 int stride
= (channel
& 1) * 4;
1567 return brw_vec1_grf(regnr
, stride
);
1570 /** Emits the interpolation for the varying inputs. */
1572 fs_visitor::emit_interpolation_setup()
1574 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
1576 this->current_annotation
= "compute pixel centers";
1577 this->pixel_x
= fs_reg(this, glsl_type::uint_type
);
1578 this->pixel_y
= fs_reg(this, glsl_type::uint_type
);
1579 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
1580 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
1581 emit(fs_inst(BRW_OPCODE_ADD
,
1583 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
1584 fs_reg(brw_imm_v(0x10101010))));
1585 emit(fs_inst(BRW_OPCODE_ADD
,
1587 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
1588 fs_reg(brw_imm_v(0x11001100))));
1590 this->current_annotation
= "compute pixel deltas from v0";
1591 this->delta_x
= fs_reg(this, glsl_type::float_type
);
1592 this->delta_y
= fs_reg(this, glsl_type::float_type
);
1593 emit(fs_inst(BRW_OPCODE_ADD
,
1596 fs_reg(negate(brw_vec1_grf(1, 0)))));
1597 emit(fs_inst(BRW_OPCODE_ADD
,
1600 fs_reg(negate(brw_vec1_grf(1, 1)))));
1602 this->current_annotation
= "compute pos.w and 1/pos.w";
1603 /* Compute wpos.w. It's always in our setup, since it's needed to
1604 * interpolate the other attributes.
1606 this->wpos_w
= fs_reg(this, glsl_type::float_type
);
1607 emit(fs_inst(FS_OPCODE_LINTERP
, wpos_w
, this->delta_x
, this->delta_y
,
1608 interp_reg(FRAG_ATTRIB_WPOS
, 3)));
1609 /* Compute the pixel 1/W value from wpos.w. */
1610 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
1611 emit(fs_inst(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
));
1612 this->current_annotation
= NULL
;
1616 fs_visitor::emit_fb_writes()
1618 this->current_annotation
= "FB write header";
1624 if (c
->key
.aa_dest_stencil_reg
) {
1625 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1626 fs_reg(brw_vec8_grf(c
->key
.aa_dest_stencil_reg
, 0))));
1629 /* Reserve space for color. It'll be filled in per MRT below. */
1633 if (c
->key
.source_depth_to_render_target
) {
1634 if (c
->key
.computes_depth
) {
1635 /* Hand over gl_FragDepth. */
1636 assert(this->frag_depth
);
1637 fs_reg depth
= *(variable_storage(this->frag_depth
));
1639 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++), depth
));
1641 /* Pass through the payload depth. */
1642 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1643 fs_reg(brw_vec8_grf(c
->key
.source_depth_reg
, 0))));
1647 if (c
->key
.dest_depth_reg
) {
1648 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1649 fs_reg(brw_vec8_grf(c
->key
.dest_depth_reg
, 0))));
1652 fs_reg color
= reg_undef
;
1653 if (this->frag_color
)
1654 color
= *(variable_storage(this->frag_color
));
1655 else if (this->frag_data
)
1656 color
= *(variable_storage(this->frag_data
));
1658 for (int target
= 0; target
< c
->key
.nr_color_regions
; target
++) {
1659 this->current_annotation
= talloc_asprintf(this->mem_ctx
,
1660 "FB write target %d",
1662 if (this->frag_color
|| this->frag_data
) {
1663 for (int i
= 0; i
< 4; i
++) {
1664 emit(fs_inst(BRW_OPCODE_MOV
,
1665 fs_reg(MRF
, color_mrf
+ i
),
1671 if (this->frag_color
)
1672 color
.reg_offset
-= 4;
1674 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1675 reg_undef
, reg_undef
));
1676 inst
->target
= target
;
1678 if (target
== c
->key
.nr_color_regions
- 1)
1682 if (c
->key
.nr_color_regions
== 0) {
1683 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1684 reg_undef
, reg_undef
));
1689 this->current_annotation
= NULL
;
1693 fs_visitor::generate_fb_write(fs_inst
*inst
)
1695 GLboolean eot
= inst
->eot
;
1697 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
1700 brw_push_insn_state(p
);
1701 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
1702 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
1705 brw_vec8_grf(1, 0));
1706 brw_pop_insn_state(p
);
1709 8, /* dispatch_width */
1710 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW
),
1712 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
),
1720 fs_visitor::generate_linterp(fs_inst
*inst
,
1721 struct brw_reg dst
, struct brw_reg
*src
)
1723 struct brw_reg delta_x
= src
[0];
1724 struct brw_reg delta_y
= src
[1];
1725 struct brw_reg interp
= src
[2];
1728 delta_y
.nr
== delta_x
.nr
+ 1 &&
1729 (intel
->gen
>= 6 || (delta_x
.nr
& 1) == 0)) {
1730 brw_PLN(p
, dst
, interp
, delta_x
);
1732 brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
1733 brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
1738 fs_visitor::generate_math(fs_inst
*inst
,
1739 struct brw_reg dst
, struct brw_reg
*src
)
1743 switch (inst
->opcode
) {
1745 op
= BRW_MATH_FUNCTION_INV
;
1748 op
= BRW_MATH_FUNCTION_RSQ
;
1750 case FS_OPCODE_SQRT
:
1751 op
= BRW_MATH_FUNCTION_SQRT
;
1753 case FS_OPCODE_EXP2
:
1754 op
= BRW_MATH_FUNCTION_EXP
;
1756 case FS_OPCODE_LOG2
:
1757 op
= BRW_MATH_FUNCTION_LOG
;
1760 op
= BRW_MATH_FUNCTION_POW
;
1763 op
= BRW_MATH_FUNCTION_SIN
;
1766 op
= BRW_MATH_FUNCTION_COS
;
1769 assert(!"not reached: unknown math function");
1774 if (inst
->opcode
== FS_OPCODE_POW
) {
1775 brw_MOV(p
, brw_message_reg(3), src
[1]);
1780 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
1781 BRW_MATH_SATURATE_NONE
,
1783 BRW_MATH_DATA_VECTOR
,
1784 BRW_MATH_PRECISION_FULL
);
1788 fs_visitor::generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
1793 if (intel
->gen
== 5) {
1794 switch (inst
->opcode
) {
1796 if (inst
->shadow_compare
) {
1797 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5
;
1799 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_GEN5
;
1803 if (inst
->shadow_compare
) {
1804 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5
;
1806 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5
;
1811 switch (inst
->opcode
) {
1813 /* Note that G45 and older determines shadow compare and dispatch width
1814 * from message length for most messages.
1816 if (inst
->shadow_compare
) {
1817 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE
;
1819 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE
;
1822 if (inst
->shadow_compare
) {
1823 assert(!"FINISHME: shadow compare with bias.");
1824 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
1826 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
1832 assert(msg_type
!= -1);
1838 retype(dst
, BRW_REGISTER_TYPE_UW
),
1840 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
),
1841 SURF_INDEX_TEXTURE(inst
->sampler
),
1849 BRW_SAMPLER_SIMD_MODE_SIMD8
);
1853 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1856 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1858 * and we're trying to produce:
1861 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1862 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1863 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1864 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1865 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1866 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1867 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1868 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1870 * and add another set of two more subspans if in 16-pixel dispatch mode.
1872 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1873 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1874 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
1875 * between each other. We could probably do it like ddx and swizzle the right
1876 * order later, but bail for now and just produce
1877 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
1880 fs_visitor::generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
1882 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 1,
1883 BRW_REGISTER_TYPE_F
,
1884 BRW_VERTICAL_STRIDE_2
,
1886 BRW_HORIZONTAL_STRIDE_0
,
1887 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1888 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
1889 BRW_REGISTER_TYPE_F
,
1890 BRW_VERTICAL_STRIDE_2
,
1892 BRW_HORIZONTAL_STRIDE_0
,
1893 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1894 brw_ADD(p
, dst
, src0
, negate(src1
));
1898 fs_visitor::generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
1900 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
1901 BRW_REGISTER_TYPE_F
,
1902 BRW_VERTICAL_STRIDE_4
,
1904 BRW_HORIZONTAL_STRIDE_0
,
1905 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1906 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 2,
1907 BRW_REGISTER_TYPE_F
,
1908 BRW_VERTICAL_STRIDE_4
,
1910 BRW_HORIZONTAL_STRIDE_0
,
1911 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
1912 brw_ADD(p
, dst
, src0
, negate(src1
));
1916 fs_visitor::generate_discard(fs_inst
*inst
, struct brw_reg temp
)
1918 struct brw_reg g0
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
1919 temp
= brw_uw1_reg(temp
.file
, temp
.nr
, 0);
1921 brw_push_insn_state(p
);
1922 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
1923 brw_NOT(p
, temp
, brw_mask_reg(1)); /* IMASK */
1924 brw_AND(p
, g0
, temp
, g0
);
1925 brw_pop_insn_state(p
);
1929 fs_visitor::assign_curb_setup()
1931 c
->prog_data
.first_curbe_grf
= c
->key
.nr_payload_regs
;
1932 c
->prog_data
.curb_read_length
= ALIGN(c
->prog_data
.nr_params
, 8) / 8;
1934 if (intel
->gen
== 5 && (c
->prog_data
.first_curbe_grf
+
1935 c
->prog_data
.curb_read_length
) & 1) {
1936 /* Align the start of the interpolation coefficients so that we can use
1937 * the PLN instruction.
1939 c
->prog_data
.first_curbe_grf
++;
1942 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1943 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
1944 fs_inst
*inst
= (fs_inst
*)iter
.get();
1946 for (unsigned int i
= 0; i
< 3; i
++) {
1947 if (inst
->src
[i
].file
== UNIFORM
) {
1948 int constant_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
1949 struct brw_reg brw_reg
= brw_vec1_grf(c
->prog_data
.first_curbe_grf
+
1953 inst
->src
[i
].file
= FIXED_HW_REG
;
1954 inst
->src
[i
].fixed_hw_reg
= brw_reg
;
1961 fs_visitor::assign_urb_setup()
1963 int urb_start
= c
->prog_data
.first_curbe_grf
+ c
->prog_data
.curb_read_length
;
1964 int interp_reg_nr
[FRAG_ATTRIB_MAX
];
1966 c
->prog_data
.urb_read_length
= 0;
1968 /* Figure out where each of the incoming setup attributes lands. */
1969 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
1970 interp_reg_nr
[i
] = -1;
1972 if (i
!= FRAG_ATTRIB_WPOS
&&
1973 !(brw
->fragment_program
->Base
.InputsRead
& BITFIELD64_BIT(i
)))
1976 /* Each attribute is 4 setup channels, each of which is half a reg. */
1977 interp_reg_nr
[i
] = urb_start
+ c
->prog_data
.urb_read_length
;
1978 c
->prog_data
.urb_read_length
+= 2;
1981 /* Map the register numbers for FS_OPCODE_LINTERP so that it uses
1982 * the correct setup input.
1984 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
1985 fs_inst
*inst
= (fs_inst
*)iter
.get();
1987 if (inst
->opcode
!= FS_OPCODE_LINTERP
)
1990 assert(inst
->src
[2].file
== FIXED_HW_REG
);
1992 int location
= inst
->src
[2].fixed_hw_reg
.nr
/ 2;
1993 assert(interp_reg_nr
[location
] != -1);
1994 inst
->src
[2].fixed_hw_reg
.nr
= (interp_reg_nr
[location
] +
1995 (inst
->src
[2].fixed_hw_reg
.nr
& 1));
1998 this->first_non_payload_grf
= urb_start
+ c
->prog_data
.urb_read_length
;
2002 assign_reg(int *reg_hw_locations
, fs_reg
*reg
)
2004 if (reg
->file
== GRF
&& reg
->reg
!= 0) {
2005 reg
->hw_reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
;
2011 fs_visitor::assign_regs_trivial()
2014 int hw_reg_mapping
[this->virtual_grf_next
];
2017 hw_reg_mapping
[0] = 0;
2018 hw_reg_mapping
[1] = this->first_non_payload_grf
;
2019 for (i
= 2; i
< this->virtual_grf_next
; i
++) {
2020 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
2021 this->virtual_grf_sizes
[i
- 1]);
2023 last_grf
= hw_reg_mapping
[i
- 1] + this->virtual_grf_sizes
[i
- 1];
2025 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2026 fs_inst
*inst
= (fs_inst
*)iter
.get();
2028 assign_reg(hw_reg_mapping
, &inst
->dst
);
2029 assign_reg(hw_reg_mapping
, &inst
->src
[0]);
2030 assign_reg(hw_reg_mapping
, &inst
->src
[1]);
2033 this->grf_used
= last_grf
+ 1;
2037 fs_visitor::assign_regs()
2040 int hw_reg_mapping
[this->virtual_grf_next
+ 1];
2041 int base_reg_count
= BRW_MAX_GRF
- this->first_non_payload_grf
;
2042 int class_sizes
[base_reg_count
];
2043 int class_count
= 0;
2045 /* Set up the register classes.
2047 * The base registers store a scalar value. For texture samples,
2048 * we get virtual GRFs composed of 4 contiguous hw register. For
2049 * structures and arrays, we store them as contiguous larger things
2050 * than that, though we should be able to do better most of the
2053 class_sizes
[class_count
++] = 1;
2054 for (int r
= 1; r
< this->virtual_grf_next
; r
++) {
2057 for (i
= 0; i
< class_count
; i
++) {
2058 if (class_sizes
[i
] == this->virtual_grf_sizes
[r
])
2061 if (i
== class_count
) {
2062 class_sizes
[class_count
++] = this->virtual_grf_sizes
[r
];
2066 int ra_reg_count
= 0;
2067 int class_base_reg
[class_count
];
2068 int class_reg_count
[class_count
];
2069 int classes
[class_count
];
2071 for (int i
= 0; i
< class_count
; i
++) {
2072 class_base_reg
[i
] = ra_reg_count
;
2073 class_reg_count
[i
] = base_reg_count
- (class_sizes
[i
] - 1);
2074 ra_reg_count
+= class_reg_count
[i
];
2077 struct ra_regs
*regs
= ra_alloc_reg_set(ra_reg_count
);
2078 for (int i
= 0; i
< class_count
; i
++) {
2079 classes
[i
] = ra_alloc_reg_class(regs
);
2081 for (int i_r
= 0; i_r
< class_reg_count
[i
]; i_r
++) {
2082 ra_class_add_reg(regs
, classes
[i
], class_base_reg
[i
] + i_r
);
2085 /* Add conflicts between our contiguous registers aliasing
2086 * base regs and other register classes' contiguous registers
2087 * that alias base regs, or the base regs themselves for classes[0].
2089 for (int c
= 0; c
<= i
; c
++) {
2090 for (int i_r
= 0; i_r
< class_reg_count
[i
] - 1; i_r
++) {
2091 for (int c_r
= MAX2(0, i_r
- (class_sizes
[c
] - 1));
2092 c_r
<= MIN2(class_reg_count
[c
] - 1, i_r
+ class_sizes
[i
] - 1);
2096 printf("%d/%d conflicts %d/%d\n",
2097 class_sizes
[i
], i_r
,
2098 class_sizes
[c
], c_r
);
2101 ra_add_reg_conflict(regs
,
2102 class_base_reg
[i
] + i_r
,
2103 class_base_reg
[c
] + c_r
);
2109 ra_set_finalize(regs
);
2111 struct ra_graph
*g
= ra_alloc_interference_graph(regs
,
2112 this->virtual_grf_next
);
2113 /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1
2116 ra_set_node_class(g
, 0, classes
[0]);
2118 /* FINISHME: Proper interference (live interval analysis) */
2119 for (int i
= 1; i
< this->virtual_grf_next
; i
++) {
2120 for (int c
= 0; c
< class_count
; c
++) {
2121 if (class_sizes
[c
] == this->virtual_grf_sizes
[i
]) {
2122 ra_set_node_class(g
, i
, classes
[c
]);
2127 for (int j
= 1; j
< i
; j
++) {
2128 ra_add_node_interference(g
, i
, j
);
2132 /* FINISHME: Handle spilling */
2133 if (!ra_allocate_no_spills(g
)) {
2134 fprintf(stderr
, "Failed to allocate registers.\n");
2139 /* Get the chosen virtual registers for each node, and map virtual
2140 * regs in the register classes back down to real hardware reg
2143 hw_reg_mapping
[0] = 0; /* unused */
2144 for (int i
= 1; i
< this->virtual_grf_next
; i
++) {
2145 int reg
= ra_get_node_reg(g
, i
);
2148 for (int c
= 0; c
< class_count
; c
++) {
2149 if (reg
>= class_base_reg
[c
] &&
2150 reg
< class_base_reg
[c
] + class_reg_count
[c
] - 1) {
2151 hw_reg
= reg
- class_base_reg
[c
];
2156 assert(hw_reg
!= -1);
2157 hw_reg_mapping
[i
] = this->first_non_payload_grf
+ hw_reg
;
2158 last_grf
= MAX2(last_grf
,
2159 hw_reg_mapping
[i
] + this->virtual_grf_sizes
[i
] - 1);
2162 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2163 fs_inst
*inst
= (fs_inst
*)iter
.get();
2165 assign_reg(hw_reg_mapping
, &inst
->dst
);
2166 assign_reg(hw_reg_mapping
, &inst
->src
[0]);
2167 assign_reg(hw_reg_mapping
, &inst
->src
[1]);
2170 this->grf_used
= last_grf
+ 1;
2176 static struct brw_reg
brw_reg_from_fs_reg(fs_reg
*reg
)
2178 struct brw_reg brw_reg
;
2180 switch (reg
->file
) {
2184 brw_reg
= brw_vec8_reg(reg
->file
,
2186 brw_reg
= retype(brw_reg
, reg
->type
);
2189 switch (reg
->type
) {
2190 case BRW_REGISTER_TYPE_F
:
2191 brw_reg
= brw_imm_f(reg
->imm
.f
);
2193 case BRW_REGISTER_TYPE_D
:
2194 brw_reg
= brw_imm_d(reg
->imm
.i
);
2196 case BRW_REGISTER_TYPE_UD
:
2197 brw_reg
= brw_imm_ud(reg
->imm
.u
);
2200 assert(!"not reached");
2205 brw_reg
= reg
->fixed_hw_reg
;
2208 /* Probably unused. */
2209 brw_reg
= brw_null_reg();
2212 assert(!"not reached");
2213 brw_reg
= brw_null_reg();
2217 brw_reg
= brw_abs(brw_reg
);
2219 brw_reg
= negate(brw_reg
);
2225 fs_visitor::generate_code()
2227 unsigned int annotation_len
= 0;
2228 int last_native_inst
= 0;
2229 struct brw_instruction
*if_stack
[16], *loop_stack
[16];
2230 int if_stack_depth
= 0, loop_stack_depth
= 0;
2231 int if_depth_in_loop
[16];
2233 if_depth_in_loop
[loop_stack_depth
] = 0;
2235 memset(&if_stack
, 0, sizeof(if_stack
));
2236 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2237 fs_inst
*inst
= (fs_inst
*)iter
.get();
2238 struct brw_reg src
[3], dst
;
2240 for (unsigned int i
= 0; i
< 3; i
++) {
2241 src
[i
] = brw_reg_from_fs_reg(&inst
->src
[i
]);
2243 dst
= brw_reg_from_fs_reg(&inst
->dst
);
2245 brw_set_conditionalmod(p
, inst
->conditional_mod
);
2246 brw_set_predicate_control(p
, inst
->predicated
);
2248 switch (inst
->opcode
) {
2249 case BRW_OPCODE_MOV
:
2250 brw_MOV(p
, dst
, src
[0]);
2252 case BRW_OPCODE_ADD
:
2253 brw_ADD(p
, dst
, src
[0], src
[1]);
2255 case BRW_OPCODE_MUL
:
2256 brw_MUL(p
, dst
, src
[0], src
[1]);
2259 case BRW_OPCODE_FRC
:
2260 brw_FRC(p
, dst
, src
[0]);
2262 case BRW_OPCODE_RNDD
:
2263 brw_RNDD(p
, dst
, src
[0]);
2265 case BRW_OPCODE_RNDZ
:
2266 brw_RNDZ(p
, dst
, src
[0]);
2269 case BRW_OPCODE_AND
:
2270 brw_AND(p
, dst
, src
[0], src
[1]);
2273 brw_OR(p
, dst
, src
[0], src
[1]);
2275 case BRW_OPCODE_XOR
:
2276 brw_XOR(p
, dst
, src
[0], src
[1]);
2279 case BRW_OPCODE_CMP
:
2280 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
2282 case BRW_OPCODE_SEL
:
2283 brw_SEL(p
, dst
, src
[0], src
[1]);
2287 assert(if_stack_depth
< 16);
2288 if_stack
[if_stack_depth
] = brw_IF(p
, BRW_EXECUTE_8
);
2289 if_depth_in_loop
[loop_stack_depth
]++;
2292 case BRW_OPCODE_ELSE
:
2293 if_stack
[if_stack_depth
- 1] =
2294 brw_ELSE(p
, if_stack
[if_stack_depth
- 1]);
2296 case BRW_OPCODE_ENDIF
:
2298 brw_ENDIF(p
, if_stack
[if_stack_depth
]);
2299 if_depth_in_loop
[loop_stack_depth
]--;
2303 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
2304 if_depth_in_loop
[loop_stack_depth
] = 0;
2307 case BRW_OPCODE_BREAK
:
2308 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
2309 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
2311 case BRW_OPCODE_CONTINUE
:
2312 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
2313 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
2316 case BRW_OPCODE_WHILE
: {
2317 struct brw_instruction
*inst0
, *inst1
;
2320 if (intel
->gen
== 5)
2323 assert(loop_stack_depth
> 0);
2325 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
2326 /* patch all the BREAK/CONT instructions from last BGNLOOP */
2327 while (inst0
> loop_stack
[loop_stack_depth
]) {
2329 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
2330 inst0
->bits3
.if_else
.jump_count
== 0) {
2331 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
2333 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
2334 inst0
->bits3
.if_else
.jump_count
== 0) {
2335 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
2343 case FS_OPCODE_SQRT
:
2344 case FS_OPCODE_EXP2
:
2345 case FS_OPCODE_LOG2
:
2349 generate_math(inst
, dst
, src
);
2351 case FS_OPCODE_LINTERP
:
2352 generate_linterp(inst
, dst
, src
);
2357 generate_tex(inst
, dst
, src
[0]);
2359 case FS_OPCODE_DISCARD
:
2360 generate_discard(inst
, dst
/* src0 == dst */);
2363 generate_ddx(inst
, dst
, src
[0]);
2366 generate_ddy(inst
, dst
, src
[0]);
2368 case FS_OPCODE_FB_WRITE
:
2369 generate_fb_write(inst
);
2372 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
2373 _mesa_problem(ctx
, "Unsupported opcode `%s' in FS",
2374 brw_opcodes
[inst
->opcode
].name
);
2376 _mesa_problem(ctx
, "Unsupported opcode %d in FS", inst
->opcode
);
2381 if (annotation_len
< p
->nr_insn
) {
2382 annotation_len
*= 2;
2383 if (annotation_len
< 16)
2384 annotation_len
= 16;
2386 this->annotation_string
= talloc_realloc(this->mem_ctx
,
2390 this->annotation_ir
= talloc_realloc(this->mem_ctx
,
2396 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
2397 this->annotation_string
[i
] = inst
->annotation
;
2398 this->annotation_ir
[i
] = inst
->ir
;
2400 last_native_inst
= p
->nr_insn
;
2405 brw_wm_fs_emit(struct brw_context
*brw
, struct brw_wm_compile
*c
)
2407 struct brw_compile
*p
= &c
->func
;
2408 struct intel_context
*intel
= &brw
->intel
;
2409 GLcontext
*ctx
= &intel
->ctx
;
2410 struct brw_shader
*shader
= NULL
;
2411 struct gl_shader_program
*prog
= ctx
->Shader
.CurrentProgram
;
2419 for (unsigned int i
= 0; i
< prog
->_NumLinkedShaders
; i
++) {
2420 if (prog
->_LinkedShaders
[i
]->Type
== GL_FRAGMENT_SHADER
) {
2421 shader
= (struct brw_shader
*)prog
->_LinkedShaders
[i
];
2428 /* We always use 8-wide mode, at least for now. For one, flow
2429 * control only works in 8-wide. Also, when we're fragment shader
2430 * bound, we're almost always under register pressure as well, so
2431 * 8-wide would save us from the performance cliff of spilling
2434 c
->dispatch_width
= 8;
2436 if (INTEL_DEBUG
& DEBUG_WM
) {
2437 printf("GLSL IR for native fragment shader %d:\n", prog
->Name
);
2438 _mesa_print_ir(shader
->ir
, NULL
);
2442 /* Now the main event: Visit the shader IR and generate our FS IR for it.
2444 fs_visitor
v(c
, shader
);
2449 v
.emit_interpolation_setup();
2451 /* Generate FS IR for main(). (the visitor only descends into
2452 * functions called "main").
2454 foreach_iter(exec_list_iterator
, iter
, *shader
->ir
) {
2455 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
2461 v
.assign_curb_setup();
2462 v
.assign_urb_setup();
2464 v
.assign_regs_trivial();
2471 assert(!v
.fail
); /* FINISHME: Cleanly fail, tested at link time, etc. */
2476 if (INTEL_DEBUG
& DEBUG_WM
) {
2477 const char *last_annotation_string
= NULL
;
2478 ir_instruction
*last_annotation_ir
= NULL
;
2480 printf("Native code for fragment shader %d:\n", prog
->Name
);
2481 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
2482 if (last_annotation_ir
!= v
.annotation_ir
[i
]) {
2483 last_annotation_ir
= v
.annotation_ir
[i
];
2484 if (last_annotation_ir
) {
2486 last_annotation_ir
->print();
2490 if (last_annotation_string
!= v
.annotation_string
[i
]) {
2491 last_annotation_string
= v
.annotation_string
[i
];
2492 if (last_annotation_string
)
2493 printf(" %s\n", last_annotation_string
);
2495 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
2500 c
->prog_data
.total_grf
= v
.grf_used
;
2501 c
->prog_data
.total_scratch
= 0;