2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include <sys/types.h>
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
47 #include "../glsl/glsl_types.h"
48 #include "../glsl/ir_optimization.h"
49 #include "../glsl/ir_print_visitor.h"
51 static struct brw_reg
brw_reg_from_fs_reg(class fs_reg
*reg
);
54 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
56 struct brw_shader
*shader
;
58 shader
= talloc_zero(NULL
, struct brw_shader
);
60 shader
->base
.Type
= type
;
61 shader
->base
.Name
= name
;
62 _mesa_init_shader(ctx
, &shader
->base
);
68 struct gl_shader_program
*
69 brw_new_shader_program(struct gl_context
*ctx
, GLuint name
)
71 struct brw_shader_program
*prog
;
72 prog
= talloc_zero(NULL
, struct brw_shader_program
);
74 prog
->base
.Name
= name
;
75 _mesa_init_shader_program(ctx
, &prog
->base
);
81 brw_compile_shader(struct gl_context
*ctx
, struct gl_shader
*shader
)
83 if (!_mesa_ir_compile_shader(ctx
, shader
))
90 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
92 struct intel_context
*intel
= intel_context(ctx
);
94 struct brw_shader
*shader
=
95 (struct brw_shader
*)prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
97 void *mem_ctx
= talloc_new(NULL
);
101 talloc_free(shader
->ir
);
102 shader
->ir
= new(shader
) exec_list
;
103 clone_ir_list(mem_ctx
, shader
->ir
, shader
->base
.ir
);
105 do_mat_op_to_vec(shader
->ir
);
106 do_mod_to_fract(shader
->ir
);
107 do_div_to_mul_rcp(shader
->ir
);
108 do_sub_to_add_neg(shader
->ir
);
109 do_explog_to_explog2(shader
->ir
);
110 do_lower_texture_projection(shader
->ir
);
111 brw_do_cubemap_normalize(shader
->ir
);
116 brw_do_channel_expressions(shader
->ir
);
117 brw_do_vector_splitting(shader
->ir
);
119 progress
= do_lower_jumps(shader
->ir
, true, true,
120 true, /* main return */
121 false, /* continue */
125 progress
= do_common_optimization(shader
->ir
, true, 32) || progress
;
127 progress
= lower_noise(shader
->ir
) || progress
;
129 lower_variable_index_to_cond_assign(shader
->ir
,
131 GL_TRUE
, /* output */
133 GL_TRUE
/* uniform */
135 if (intel
->gen
== 6) {
136 progress
= do_if_to_cond_assign(shader
->ir
) || progress
;
140 validate_ir_tree(shader
->ir
);
142 reparent_ir(shader
->ir
, shader
->ir
);
143 talloc_free(mem_ctx
);
146 if (!_mesa_ir_link_shader(ctx
, prog
))
153 type_size(const struct glsl_type
*type
)
155 unsigned int size
, i
;
157 switch (type
->base_type
) {
160 case GLSL_TYPE_FLOAT
:
162 return type
->components();
163 case GLSL_TYPE_ARRAY
:
164 return type_size(type
->fields
.array
) * type
->length
;
165 case GLSL_TYPE_STRUCT
:
167 for (i
= 0; i
< type
->length
; i
++) {
168 size
+= type_size(type
->fields
.structure
[i
].type
);
171 case GLSL_TYPE_SAMPLER
:
172 /* Samplers take up no register space, since they're baked in at
177 assert(!"not reached");
182 static const fs_reg reg_undef
;
183 static const fs_reg
reg_null_f(ARF
, BRW_ARF_NULL
, BRW_REGISTER_TYPE_F
);
184 static const fs_reg
reg_null_d(ARF
, BRW_ARF_NULL
, BRW_REGISTER_TYPE_D
);
187 fs_visitor::virtual_grf_alloc(int size
)
189 if (virtual_grf_array_size
<= virtual_grf_next
) {
190 if (virtual_grf_array_size
== 0)
191 virtual_grf_array_size
= 16;
193 virtual_grf_array_size
*= 2;
194 virtual_grf_sizes
= talloc_realloc(mem_ctx
, virtual_grf_sizes
,
195 int, virtual_grf_array_size
);
197 /* This slot is always unused. */
198 virtual_grf_sizes
[0] = 0;
200 virtual_grf_sizes
[virtual_grf_next
] = size
;
201 return virtual_grf_next
++;
204 /** Fixed HW reg constructor. */
205 fs_reg::fs_reg(enum register_file file
, int hw_reg
)
209 this->hw_reg
= hw_reg
;
210 this->type
= BRW_REGISTER_TYPE_F
;
213 /** Fixed HW reg constructor. */
214 fs_reg::fs_reg(enum register_file file
, int hw_reg
, uint32_t type
)
218 this->hw_reg
= hw_reg
;
223 brw_type_for_base_type(const struct glsl_type
*type
)
225 switch (type
->base_type
) {
226 case GLSL_TYPE_FLOAT
:
227 return BRW_REGISTER_TYPE_F
;
230 return BRW_REGISTER_TYPE_D
;
232 return BRW_REGISTER_TYPE_UD
;
233 case GLSL_TYPE_ARRAY
:
234 case GLSL_TYPE_STRUCT
:
235 /* These should be overridden with the type of the member when
236 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
237 * way to trip up if we don't.
239 return BRW_REGISTER_TYPE_UD
;
241 assert(!"not reached");
242 return BRW_REGISTER_TYPE_F
;
246 /** Automatic reg constructor. */
247 fs_reg::fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
)
252 this->reg
= v
->virtual_grf_alloc(type_size(type
));
253 this->reg_offset
= 0;
254 this->type
= brw_type_for_base_type(type
);
258 fs_visitor::variable_storage(ir_variable
*var
)
260 return (fs_reg
*)hash_table_find(this->variable_ht
, var
);
263 /* Our support for uniforms is piggy-backed on the struct
264 * gl_fragment_program, because that's where the values actually
265 * get stored, rather than in some global gl_shader_program uniform
269 fs_visitor::setup_uniform_values(int loc
, const glsl_type
*type
)
271 unsigned int offset
= 0;
274 if (type
->is_matrix()) {
275 const glsl_type
*column
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
276 type
->vector_elements
,
279 for (unsigned int i
= 0; i
< type
->matrix_columns
; i
++) {
280 offset
+= setup_uniform_values(loc
+ offset
, column
);
286 switch (type
->base_type
) {
287 case GLSL_TYPE_FLOAT
:
291 vec_values
= fp
->Base
.Parameters
->ParameterValues
[loc
];
292 for (unsigned int i
= 0; i
< type
->vector_elements
; i
++) {
293 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[i
];
297 case GLSL_TYPE_STRUCT
:
298 for (unsigned int i
= 0; i
< type
->length
; i
++) {
299 offset
+= setup_uniform_values(loc
+ offset
,
300 type
->fields
.structure
[i
].type
);
304 case GLSL_TYPE_ARRAY
:
305 for (unsigned int i
= 0; i
< type
->length
; i
++) {
306 offset
+= setup_uniform_values(loc
+ offset
, type
->fields
.array
);
310 case GLSL_TYPE_SAMPLER
:
311 /* The sampler takes up a slot, but we don't use any values from it. */
315 assert(!"not reached");
321 /* Our support for builtin uniforms is even scarier than non-builtin.
322 * It sits on top of the PROG_STATE_VAR parameters that are
323 * automatically updated from GL context state.
326 fs_visitor::setup_builtin_uniform_values(ir_variable
*ir
)
328 const struct gl_builtin_uniform_desc
*statevar
= NULL
;
330 for (unsigned int i
= 0; _mesa_builtin_uniform_desc
[i
].name
; i
++) {
331 statevar
= &_mesa_builtin_uniform_desc
[i
];
332 if (strcmp(ir
->name
, _mesa_builtin_uniform_desc
[i
].name
) == 0)
336 if (!statevar
->name
) {
338 printf("Failed to find builtin uniform `%s'\n", ir
->name
);
343 if (ir
->type
->is_array()) {
344 array_count
= ir
->type
->length
;
349 for (int a
= 0; a
< array_count
; a
++) {
350 for (unsigned int i
= 0; i
< statevar
->num_elements
; i
++) {
351 struct gl_builtin_uniform_element
*element
= &statevar
->elements
[i
];
352 int tokens
[STATE_LENGTH
];
354 memcpy(tokens
, element
->tokens
, sizeof(element
->tokens
));
355 if (ir
->type
->is_array()) {
359 /* This state reference has already been setup by ir_to_mesa,
360 * but we'll get the same index back here.
362 int index
= _mesa_add_state_reference(this->fp
->Base
.Parameters
,
363 (gl_state_index
*)tokens
);
364 float *vec_values
= this->fp
->Base
.Parameters
->ParameterValues
[index
];
366 /* Add each of the unique swizzles of the element as a
367 * parameter. This'll end up matching the expected layout of
368 * the array/matrix/structure we're trying to fill in.
371 for (unsigned int i
= 0; i
< 4; i
++) {
372 int swiz
= GET_SWZ(element
->swizzle
, i
);
373 if (swiz
== last_swiz
)
377 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[swiz
];
384 fs_visitor::emit_fragcoord_interpolation(ir_variable
*ir
)
386 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
388 fs_reg neg_y
= this->pixel_y
;
392 if (ir
->pixel_center_integer
) {
393 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_x
));
395 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, this->pixel_x
, fs_reg(0.5f
)));
400 if (ir
->origin_upper_left
&& ir
->pixel_center_integer
) {
401 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_y
));
403 fs_reg pixel_y
= this->pixel_y
;
404 float offset
= (ir
->pixel_center_integer
? 0.0 : 0.5);
406 if (!ir
->origin_upper_left
) {
407 pixel_y
.negate
= true;
408 offset
+= c
->key
.drawable_height
- 1.0;
411 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, pixel_y
, fs_reg(offset
)));
416 emit(fs_inst(FS_OPCODE_LINTERP
, wpos
, this->delta_x
, this->delta_y
,
417 interp_reg(FRAG_ATTRIB_WPOS
, 2)));
420 /* gl_FragCoord.w: Already set up in emit_interpolation */
421 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->wpos_w
));
427 fs_visitor::emit_general_interpolation(ir_variable
*ir
)
429 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
430 /* Interpolation is always in floating point regs. */
431 reg
->type
= BRW_REGISTER_TYPE_F
;
434 unsigned int array_elements
;
435 const glsl_type
*type
;
437 if (ir
->type
->is_array()) {
438 array_elements
= ir
->type
->length
;
439 if (array_elements
== 0) {
442 type
= ir
->type
->fields
.array
;
448 int location
= ir
->location
;
449 for (unsigned int i
= 0; i
< array_elements
; i
++) {
450 for (unsigned int j
= 0; j
< type
->matrix_columns
; j
++) {
451 if (urb_setup
[location
] == -1) {
452 /* If there's no incoming setup data for this slot, don't
453 * emit interpolation for it.
455 attr
.reg_offset
+= type
->vector_elements
;
460 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
461 struct brw_reg interp
= interp_reg(location
, c
);
462 emit(fs_inst(FS_OPCODE_LINTERP
,
470 if (intel
->gen
< 6) {
471 attr
.reg_offset
-= type
->vector_elements
;
472 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
473 emit(fs_inst(BRW_OPCODE_MUL
,
488 fs_visitor::emit_frontfacing_interpolation(ir_variable
*ir
)
490 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
492 /* The frontfacing comes in as a bit in the thread payload. */
493 if (intel
->gen
>= 6) {
494 emit(fs_inst(BRW_OPCODE_ASR
,
496 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D
)),
498 emit(fs_inst(BRW_OPCODE_NOT
,
501 emit(fs_inst(BRW_OPCODE_AND
,
506 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
507 struct brw_reg r1_6ud
= retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
);
508 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
511 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
,
515 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
516 emit(fs_inst(BRW_OPCODE_AND
, *reg
, *reg
, fs_reg(1u)));
523 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src
)
535 assert(!"not reached: bad math opcode");
539 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
540 * might be able to do better by doing execsize = 1 math and then
541 * expanding that result out, but we would need to be careful with
544 if (intel
->gen
>= 6 && src
.file
== UNIFORM
) {
545 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
546 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src
));
550 fs_inst
*inst
= emit(fs_inst(opcode
, dst
, src
));
552 if (intel
->gen
< 6) {
561 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
)
566 assert(opcode
== FS_OPCODE_POW
);
568 if (intel
->gen
>= 6) {
569 /* Can't do hstride == 0 args to gen6 math, so expand it out. */
570 if (src0
.file
== UNIFORM
) {
571 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
572 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src0
));
576 if (src1
.file
== UNIFORM
) {
577 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
578 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src1
));
582 inst
= emit(fs_inst(opcode
, dst
, src0
, src1
));
584 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ 1), src1
));
585 inst
= emit(fs_inst(opcode
, dst
, src0
, reg_null_f
));
587 inst
->base_mrf
= base_mrf
;
594 fs_visitor::visit(ir_variable
*ir
)
598 if (variable_storage(ir
))
601 if (strcmp(ir
->name
, "gl_FragColor") == 0) {
602 this->frag_color
= ir
;
603 } else if (strcmp(ir
->name
, "gl_FragData") == 0) {
604 this->frag_data
= ir
;
605 } else if (strcmp(ir
->name
, "gl_FragDepth") == 0) {
606 this->frag_depth
= ir
;
609 if (ir
->mode
== ir_var_in
) {
610 if (!strcmp(ir
->name
, "gl_FragCoord")) {
611 reg
= emit_fragcoord_interpolation(ir
);
612 } else if (!strcmp(ir
->name
, "gl_FrontFacing")) {
613 reg
= emit_frontfacing_interpolation(ir
);
615 reg
= emit_general_interpolation(ir
);
618 hash_table_insert(this->variable_ht
, reg
, ir
);
622 if (ir
->mode
== ir_var_uniform
) {
623 int param_index
= c
->prog_data
.nr_params
;
625 if (!strncmp(ir
->name
, "gl_", 3)) {
626 setup_builtin_uniform_values(ir
);
628 setup_uniform_values(ir
->location
, ir
->type
);
631 reg
= new(this->mem_ctx
) fs_reg(UNIFORM
, param_index
);
635 reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
637 hash_table_insert(this->variable_ht
, reg
, ir
);
641 fs_visitor::visit(ir_dereference_variable
*ir
)
643 fs_reg
*reg
= variable_storage(ir
->var
);
648 fs_visitor::visit(ir_dereference_record
*ir
)
650 const glsl_type
*struct_type
= ir
->record
->type
;
652 ir
->record
->accept(this);
654 unsigned int offset
= 0;
655 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
656 if (strcmp(struct_type
->fields
.structure
[i
].name
, ir
->field
) == 0)
658 offset
+= type_size(struct_type
->fields
.structure
[i
].type
);
660 this->result
.reg_offset
+= offset
;
661 this->result
.type
= brw_type_for_base_type(ir
->type
);
665 fs_visitor::visit(ir_dereference_array
*ir
)
670 ir
->array
->accept(this);
671 index
= ir
->array_index
->as_constant();
673 element_size
= type_size(ir
->type
);
674 this->result
.type
= brw_type_for_base_type(ir
->type
);
677 assert(this->result
.file
== UNIFORM
||
678 (this->result
.file
== GRF
&&
679 this->result
.reg
!= 0));
680 this->result
.reg_offset
+= index
->value
.i
[0] * element_size
;
682 assert(!"FINISHME: non-constant array element");
687 fs_visitor::visit(ir_expression
*ir
)
689 unsigned int operand
;
694 for (operand
= 0; operand
< ir
->get_num_operands(); operand
++) {
695 ir
->operands
[operand
]->accept(this);
696 if (this->result
.file
== BAD_FILE
) {
698 printf("Failed to get tree for expression operand:\n");
699 ir
->operands
[operand
]->accept(&v
);
702 op
[operand
] = this->result
;
704 /* Matrix expression operands should have been broken down to vector
705 * operations already.
707 assert(!ir
->operands
[operand
]->type
->is_matrix());
708 /* And then those vector operands should have been broken down to scalar.
710 assert(!ir
->operands
[operand
]->type
->is_vector());
713 /* Storage for our result. If our result goes into an assignment, it will
714 * just get copy-propagated out, so no worries.
716 this->result
= fs_reg(this, ir
->type
);
718 switch (ir
->operation
) {
719 case ir_unop_logic_not
:
720 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
721 * ones complement of the whole register, not just bit 0.
723 emit(fs_inst(BRW_OPCODE_ADD
, this->result
, op
[0], fs_reg(-1)));
726 op
[0].negate
= !op
[0].negate
;
727 this->result
= op
[0];
731 this->result
= op
[0];
734 temp
= fs_reg(this, ir
->type
);
736 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(0.0f
)));
738 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
)));
739 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
740 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(1.0f
)));
741 inst
->predicated
= true;
743 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
)));
744 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
745 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(-1.0f
)));
746 inst
->predicated
= true;
750 emit_math(FS_OPCODE_RCP
, this->result
, op
[0]);
754 emit_math(FS_OPCODE_EXP2
, this->result
, op
[0]);
757 emit_math(FS_OPCODE_LOG2
, this->result
, op
[0]);
761 assert(!"not reached: should be handled by ir_explog_to_explog2");
764 emit_math(FS_OPCODE_SIN
, this->result
, op
[0]);
767 emit_math(FS_OPCODE_COS
, this->result
, op
[0]);
771 emit(fs_inst(FS_OPCODE_DDX
, this->result
, op
[0]));
774 emit(fs_inst(FS_OPCODE_DDY
, this->result
, op
[0]));
778 emit(fs_inst(BRW_OPCODE_ADD
, this->result
, op
[0], op
[1]));
781 assert(!"not reached: should be handled by ir_sub_to_add_neg");
785 emit(fs_inst(BRW_OPCODE_MUL
, this->result
, op
[0], op
[1]));
788 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
791 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
795 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
796 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
797 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
799 case ir_binop_greater
:
800 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
801 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
802 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
804 case ir_binop_lequal
:
805 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
806 inst
->conditional_mod
= BRW_CONDITIONAL_LE
;
807 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
809 case ir_binop_gequal
:
810 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
811 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
812 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
815 case ir_binop_all_equal
: /* same as nequal for scalars */
816 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
817 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
818 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
820 case ir_binop_nequal
:
821 case ir_binop_any_nequal
: /* same as nequal for scalars */
822 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
823 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
824 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
827 case ir_binop_logic_xor
:
828 emit(fs_inst(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]));
831 case ir_binop_logic_or
:
832 emit(fs_inst(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]));
835 case ir_binop_logic_and
:
836 emit(fs_inst(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]));
842 assert(!"not reached: should be handled by brw_fs_channel_expressions");
846 assert(!"not reached: should be handled by lower_noise");
850 emit_math(FS_OPCODE_SQRT
, this->result
, op
[0]);
854 emit_math(FS_OPCODE_RSQ
, this->result
, op
[0]);
861 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, op
[0]));
865 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], fs_reg(0.0f
)));
866 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
867 inst
= emit(fs_inst(BRW_OPCODE_AND
, this->result
,
868 this->result
, fs_reg(1)));
872 emit(fs_inst(BRW_OPCODE_RNDZ
, this->result
, op
[0]));
875 op
[0].negate
= !op
[0].negate
;
876 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
877 this->result
.negate
= true;
880 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
883 inst
= emit(fs_inst(BRW_OPCODE_FRC
, this->result
, op
[0]));
885 case ir_unop_round_even
:
886 emit(fs_inst(BRW_OPCODE_RNDE
, this->result
, op
[0]));
890 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
891 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
893 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
894 inst
->predicated
= true;
897 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
898 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
900 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
901 inst
->predicated
= true;
905 emit_math(FS_OPCODE_POW
, this->result
, op
[0], op
[1]);
908 case ir_unop_bit_not
:
910 case ir_binop_lshift
:
911 case ir_binop_rshift
:
912 case ir_binop_bit_and
:
913 case ir_binop_bit_xor
:
914 case ir_binop_bit_or
:
915 assert(!"GLSL 1.30 features unsupported");
921 fs_visitor::emit_assignment_writes(fs_reg
&l
, fs_reg
&r
,
922 const glsl_type
*type
, bool predicated
)
924 switch (type
->base_type
) {
925 case GLSL_TYPE_FLOAT
:
929 for (unsigned int i
= 0; i
< type
->components(); i
++) {
930 l
.type
= brw_type_for_base_type(type
);
931 r
.type
= brw_type_for_base_type(type
);
933 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
934 inst
->predicated
= predicated
;
940 case GLSL_TYPE_ARRAY
:
941 for (unsigned int i
= 0; i
< type
->length
; i
++) {
942 emit_assignment_writes(l
, r
, type
->fields
.array
, predicated
);
945 case GLSL_TYPE_STRUCT
:
946 for (unsigned int i
= 0; i
< type
->length
; i
++) {
947 emit_assignment_writes(l
, r
, type
->fields
.structure
[i
].type
,
952 case GLSL_TYPE_SAMPLER
:
956 assert(!"not reached");
962 fs_visitor::visit(ir_assignment
*ir
)
967 /* FINISHME: arrays on the lhs */
968 ir
->lhs
->accept(this);
971 ir
->rhs
->accept(this);
974 assert(l
.file
!= BAD_FILE
);
975 assert(r
.file
!= BAD_FILE
);
978 emit_bool_to_cond_code(ir
->condition
);
981 if (ir
->lhs
->type
->is_scalar() ||
982 ir
->lhs
->type
->is_vector()) {
983 for (int i
= 0; i
< ir
->lhs
->type
->vector_elements
; i
++) {
984 if (ir
->write_mask
& (1 << i
)) {
985 inst
= emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
987 inst
->predicated
= true;
993 emit_assignment_writes(l
, r
, ir
->lhs
->type
, ir
->condition
!= NULL
);
998 fs_visitor::emit_texture_gen4(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1002 bool simd16
= false;
1008 if (ir
->shadow_comparitor
) {
1009 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1010 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1012 coordinate
.reg_offset
++;
1014 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1017 if (ir
->op
== ir_tex
) {
1018 /* There's no plain shadow compare message, so we use shadow
1019 * compare with a bias of 0.0.
1021 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1024 } else if (ir
->op
== ir_txb
) {
1025 ir
->lod_info
.bias
->accept(this);
1026 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1030 assert(ir
->op
== ir_txl
);
1031 ir
->lod_info
.lod
->accept(this);
1032 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1037 ir
->shadow_comparitor
->accept(this);
1038 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1040 } else if (ir
->op
== ir_tex
) {
1041 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1042 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1044 coordinate
.reg_offset
++;
1046 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1049 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1050 * instructions. We'll need to do SIMD16 here.
1052 assert(ir
->op
== ir_txb
|| ir
->op
== ir_txl
);
1054 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1055 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
* 2),
1057 coordinate
.reg_offset
++;
1060 /* lod/bias appears after u/v/r. */
1063 if (ir
->op
== ir_txb
) {
1064 ir
->lod_info
.bias
->accept(this);
1065 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1069 ir
->lod_info
.lod
->accept(this);
1070 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1075 /* The unused upper half. */
1078 /* Now, since we're doing simd16, the return is 2 interleaved
1079 * vec4s where the odd-indexed ones are junk. We'll need to move
1080 * this weirdness around to the expected layout.
1084 dst
= fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
,
1086 dst
.type
= BRW_REGISTER_TYPE_F
;
1089 fs_inst
*inst
= NULL
;
1092 inst
= emit(fs_inst(FS_OPCODE_TEX
, dst
));
1095 inst
= emit(fs_inst(FS_OPCODE_TXB
, dst
));
1098 inst
= emit(fs_inst(FS_OPCODE_TXL
, dst
));
1102 assert(!"GLSL 1.30 features unsupported");
1105 inst
->base_mrf
= base_mrf
;
1109 for (int i
= 0; i
< 4; i
++) {
1110 emit(fs_inst(BRW_OPCODE_MOV
, orig_dst
, dst
));
1111 orig_dst
.reg_offset
++;
1112 dst
.reg_offset
+= 2;
1120 fs_visitor::emit_texture_gen5(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1122 /* gen5's SIMD8 sampler has slots for u, v, r, array index, then
1123 * optional parameters like shadow comparitor or LOD bias. If
1124 * optional parameters aren't present, those base slots are
1125 * optional and don't need to be included in the message.
1127 * We don't fill in the unnecessary slots regardless, which may
1128 * look surprising in the disassembly.
1130 int mlen
= 1; /* g0 header always present. */
1133 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1134 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1136 coordinate
.reg_offset
++;
1138 mlen
+= ir
->coordinate
->type
->vector_elements
;
1140 if (ir
->shadow_comparitor
) {
1141 mlen
= MAX2(mlen
, 5);
1143 ir
->shadow_comparitor
->accept(this);
1144 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1148 fs_inst
*inst
= NULL
;
1151 inst
= emit(fs_inst(FS_OPCODE_TEX
, dst
));
1154 ir
->lod_info
.bias
->accept(this);
1155 mlen
= MAX2(mlen
, 5);
1156 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1159 inst
= emit(fs_inst(FS_OPCODE_TXB
, dst
));
1162 ir
->lod_info
.lod
->accept(this);
1163 mlen
= MAX2(mlen
, 5);
1164 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1167 inst
= emit(fs_inst(FS_OPCODE_TXL
, dst
));
1171 assert(!"GLSL 1.30 features unsupported");
1174 inst
->base_mrf
= base_mrf
;
1181 fs_visitor::visit(ir_texture
*ir
)
1184 fs_inst
*inst
= NULL
;
1186 ir
->coordinate
->accept(this);
1187 fs_reg coordinate
= this->result
;
1189 /* Should be lowered by do_lower_texture_projection */
1190 assert(!ir
->projector
);
1192 sampler
= _mesa_get_sampler_uniform_value(ir
->sampler
,
1193 ctx
->Shader
.CurrentProgram
,
1194 &brw
->fragment_program
->Base
);
1195 sampler
= c
->fp
->program
.Base
.SamplerUnits
[sampler
];
1197 /* The 965 requires the EU to do the normalization of GL rectangle
1198 * texture coordinates. We use the program parameter state
1199 * tracking to get the scaling factor.
1201 if (ir
->sampler
->type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_RECT
) {
1202 struct gl_program_parameter_list
*params
= c
->fp
->program
.Base
.Parameters
;
1203 int tokens
[STATE_LENGTH
] = {
1205 STATE_TEXRECT_SCALE
,
1211 fs_reg scale_x
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
);
1212 fs_reg scale_y
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
+ 1);
1213 GLuint index
= _mesa_add_state_reference(params
,
1214 (gl_state_index
*)tokens
);
1215 float *vec_values
= this->fp
->Base
.Parameters
->ParameterValues
[index
];
1217 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[0];
1218 c
->prog_data
.param
[c
->prog_data
.nr_params
++] = &vec_values
[1];
1220 fs_reg dst
= fs_reg(this, ir
->coordinate
->type
);
1221 fs_reg src
= coordinate
;
1224 emit(fs_inst(BRW_OPCODE_MUL
, dst
, src
, scale_x
));
1227 emit(fs_inst(BRW_OPCODE_MUL
, dst
, src
, scale_y
));
1230 /* Writemasking doesn't eliminate channels on SIMD8 texture
1231 * samples, so don't worry about them.
1233 fs_reg dst
= fs_reg(this, glsl_type::vec4_type
);
1235 if (intel
->gen
< 5) {
1236 inst
= emit_texture_gen4(ir
, dst
, coordinate
);
1238 inst
= emit_texture_gen5(ir
, dst
, coordinate
);
1241 inst
->sampler
= sampler
;
1245 if (ir
->shadow_comparitor
)
1246 inst
->shadow_compare
= true;
1248 if (c
->key
.tex_swizzles
[inst
->sampler
] != SWIZZLE_NOOP
) {
1249 fs_reg swizzle_dst
= fs_reg(this, glsl_type::vec4_type
);
1251 for (int i
= 0; i
< 4; i
++) {
1252 int swiz
= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1253 fs_reg l
= swizzle_dst
;
1256 if (swiz
== SWIZZLE_ZERO
) {
1257 emit(fs_inst(BRW_OPCODE_MOV
, l
, fs_reg(0.0f
)));
1258 } else if (swiz
== SWIZZLE_ONE
) {
1259 emit(fs_inst(BRW_OPCODE_MOV
, l
, fs_reg(1.0f
)));
1262 r
.reg_offset
+= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1263 emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
1266 this->result
= swizzle_dst
;
1271 fs_visitor::visit(ir_swizzle
*ir
)
1273 ir
->val
->accept(this);
1274 fs_reg val
= this->result
;
1276 if (ir
->type
->vector_elements
== 1) {
1277 this->result
.reg_offset
+= ir
->mask
.x
;
1281 fs_reg result
= fs_reg(this, ir
->type
);
1282 this->result
= result
;
1284 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1285 fs_reg channel
= val
;
1303 channel
.reg_offset
+= swiz
;
1304 emit(fs_inst(BRW_OPCODE_MOV
, result
, channel
));
1305 result
.reg_offset
++;
1310 fs_visitor::visit(ir_discard
*ir
)
1312 fs_reg temp
= fs_reg(this, glsl_type::uint_type
);
1314 assert(ir
->condition
== NULL
); /* FINISHME */
1316 emit(fs_inst(FS_OPCODE_DISCARD_NOT
, temp
, reg_null_d
));
1317 emit(fs_inst(FS_OPCODE_DISCARD_AND
, reg_null_d
, temp
));
1318 kill_emitted
= true;
1322 fs_visitor::visit(ir_constant
*ir
)
1324 fs_reg
reg(this, ir
->type
);
1327 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1328 switch (ir
->type
->base_type
) {
1329 case GLSL_TYPE_FLOAT
:
1330 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.f
[i
])));
1332 case GLSL_TYPE_UINT
:
1333 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.u
[i
])));
1336 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg(ir
->value
.i
[i
])));
1338 case GLSL_TYPE_BOOL
:
1339 emit(fs_inst(BRW_OPCODE_MOV
, reg
, fs_reg((int)ir
->value
.b
[i
])));
1342 assert(!"Non-float/uint/int/bool constant");
1349 fs_visitor::emit_bool_to_cond_code(ir_rvalue
*ir
)
1351 ir_expression
*expr
= ir
->as_expression();
1357 for (unsigned int i
= 0; i
< expr
->get_num_operands(); i
++) {
1358 assert(expr
->operands
[i
]->type
->is_scalar());
1360 expr
->operands
[i
]->accept(this);
1361 op
[i
] = this->result
;
1364 switch (expr
->operation
) {
1365 case ir_unop_logic_not
:
1366 inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
, op
[0], fs_reg(1)));
1367 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1370 case ir_binop_logic_xor
:
1371 inst
= emit(fs_inst(BRW_OPCODE_XOR
, reg_null_d
, op
[0], op
[1]));
1372 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1375 case ir_binop_logic_or
:
1376 inst
= emit(fs_inst(BRW_OPCODE_OR
, reg_null_d
, op
[0], op
[1]));
1377 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1380 case ir_binop_logic_and
:
1381 inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
, op
[0], op
[1]));
1382 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1386 if (intel
->gen
>= 6) {
1387 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
,
1388 op
[0], fs_reg(0.0f
)));
1390 inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_d
, op
[0]));
1392 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1396 if (intel
->gen
>= 6) {
1397 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], fs_reg(0)));
1399 inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_d
, op
[0]));
1401 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1404 case ir_binop_greater
:
1405 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1406 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1408 case ir_binop_gequal
:
1409 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1410 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1413 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1414 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1416 case ir_binop_lequal
:
1417 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1418 inst
->conditional_mod
= BRW_CONDITIONAL_LE
;
1420 case ir_binop_equal
:
1421 case ir_binop_all_equal
:
1422 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1423 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1425 case ir_binop_nequal
:
1426 case ir_binop_any_nequal
:
1427 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], op
[1]));
1428 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1431 assert(!"not reached");
1440 if (intel
->gen
>= 6) {
1441 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
,
1442 this->result
, fs_reg(1)));
1443 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1445 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_d
, this->result
));
1446 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1451 fs_visitor::visit(ir_if
*ir
)
1455 /* Don't point the annotation at the if statement, because then it plus
1456 * the then and else blocks get printed.
1458 this->base_ir
= ir
->condition
;
1460 emit_bool_to_cond_code(ir
->condition
);
1462 inst
= emit(fs_inst(BRW_OPCODE_IF
));
1463 inst
->predicated
= true;
1465 foreach_iter(exec_list_iterator
, iter
, ir
->then_instructions
) {
1466 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1472 if (!ir
->else_instructions
.is_empty()) {
1473 emit(fs_inst(BRW_OPCODE_ELSE
));
1475 foreach_iter(exec_list_iterator
, iter
, ir
->else_instructions
) {
1476 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1483 emit(fs_inst(BRW_OPCODE_ENDIF
));
1487 fs_visitor::visit(ir_loop
*ir
)
1489 fs_reg counter
= reg_undef
;
1492 this->base_ir
= ir
->counter
;
1493 ir
->counter
->accept(this);
1494 counter
= *(variable_storage(ir
->counter
));
1497 this->base_ir
= ir
->from
;
1498 ir
->from
->accept(this);
1500 emit(fs_inst(BRW_OPCODE_MOV
, counter
, this->result
));
1504 emit(fs_inst(BRW_OPCODE_DO
));
1507 this->base_ir
= ir
->to
;
1508 ir
->to
->accept(this);
1510 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
,
1511 counter
, this->result
));
1513 case ir_binop_equal
:
1514 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1516 case ir_binop_nequal
:
1517 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1519 case ir_binop_gequal
:
1520 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1522 case ir_binop_lequal
:
1523 inst
->conditional_mod
= BRW_CONDITIONAL_LE
;
1525 case ir_binop_greater
:
1526 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1529 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1532 assert(!"not reached: unknown loop condition");
1537 inst
= emit(fs_inst(BRW_OPCODE_BREAK
));
1538 inst
->predicated
= true;
1541 foreach_iter(exec_list_iterator
, iter
, ir
->body_instructions
) {
1542 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1548 if (ir
->increment
) {
1549 this->base_ir
= ir
->increment
;
1550 ir
->increment
->accept(this);
1551 emit(fs_inst(BRW_OPCODE_ADD
, counter
, counter
, this->result
));
1554 emit(fs_inst(BRW_OPCODE_WHILE
));
1558 fs_visitor::visit(ir_loop_jump
*ir
)
1561 case ir_loop_jump::jump_break
:
1562 emit(fs_inst(BRW_OPCODE_BREAK
));
1564 case ir_loop_jump::jump_continue
:
1565 emit(fs_inst(BRW_OPCODE_CONTINUE
));
1571 fs_visitor::visit(ir_call
*ir
)
1573 assert(!"FINISHME");
1577 fs_visitor::visit(ir_return
*ir
)
1579 assert(!"FINISHME");
1583 fs_visitor::visit(ir_function
*ir
)
1585 /* Ignore function bodies other than main() -- we shouldn't see calls to
1586 * them since they should all be inlined before we get to ir_to_mesa.
1588 if (strcmp(ir
->name
, "main") == 0) {
1589 const ir_function_signature
*sig
;
1592 sig
= ir
->matching_signature(&empty
);
1596 foreach_iter(exec_list_iterator
, iter
, sig
->body
) {
1597 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1606 fs_visitor::visit(ir_function_signature
*ir
)
1608 assert(!"not reached");
1613 fs_visitor::emit(fs_inst inst
)
1615 fs_inst
*list_inst
= new(mem_ctx
) fs_inst
;
1618 list_inst
->annotation
= this->current_annotation
;
1619 list_inst
->ir
= this->base_ir
;
1621 this->instructions
.push_tail(list_inst
);
1626 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1628 fs_visitor::emit_dummy_fs()
1630 /* Everyone's favorite color. */
1631 emit(fs_inst(BRW_OPCODE_MOV
,
1634 emit(fs_inst(BRW_OPCODE_MOV
,
1637 emit(fs_inst(BRW_OPCODE_MOV
,
1640 emit(fs_inst(BRW_OPCODE_MOV
,
1645 write
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1648 write
->base_mrf
= 0;
1651 /* The register location here is relative to the start of the URB
1652 * data. It will get adjusted to be a real location before
1653 * generate_code() time.
1656 fs_visitor::interp_reg(int location
, int channel
)
1658 int regnr
= urb_setup
[location
] * 2 + channel
/ 2;
1659 int stride
= (channel
& 1) * 4;
1661 assert(urb_setup
[location
] != -1);
1663 return brw_vec1_grf(regnr
, stride
);
1666 /** Emits the interpolation for the varying inputs. */
1668 fs_visitor::emit_interpolation_setup_gen4()
1670 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
1672 this->current_annotation
= "compute pixel centers";
1673 this->pixel_x
= fs_reg(this, glsl_type::uint_type
);
1674 this->pixel_y
= fs_reg(this, glsl_type::uint_type
);
1675 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
1676 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
1677 emit(fs_inst(BRW_OPCODE_ADD
,
1679 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
1680 fs_reg(brw_imm_v(0x10101010))));
1681 emit(fs_inst(BRW_OPCODE_ADD
,
1683 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
1684 fs_reg(brw_imm_v(0x11001100))));
1686 this->current_annotation
= "compute pixel deltas from v0";
1688 this->delta_x
= fs_reg(this, glsl_type::vec2_type
);
1689 this->delta_y
= this->delta_x
;
1690 this->delta_y
.reg_offset
++;
1692 this->delta_x
= fs_reg(this, glsl_type::float_type
);
1693 this->delta_y
= fs_reg(this, glsl_type::float_type
);
1695 emit(fs_inst(BRW_OPCODE_ADD
,
1698 fs_reg(negate(brw_vec1_grf(1, 0)))));
1699 emit(fs_inst(BRW_OPCODE_ADD
,
1702 fs_reg(negate(brw_vec1_grf(1, 1)))));
1704 this->current_annotation
= "compute pos.w and 1/pos.w";
1705 /* Compute wpos.w. It's always in our setup, since it's needed to
1706 * interpolate the other attributes.
1708 this->wpos_w
= fs_reg(this, glsl_type::float_type
);
1709 emit(fs_inst(FS_OPCODE_LINTERP
, wpos_w
, this->delta_x
, this->delta_y
,
1710 interp_reg(FRAG_ATTRIB_WPOS
, 3)));
1711 /* Compute the pixel 1/W value from wpos.w. */
1712 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
1713 emit_math(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
);
1714 this->current_annotation
= NULL
;
1717 /** Emits the interpolation for the varying inputs. */
1719 fs_visitor::emit_interpolation_setup_gen6()
1721 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
1723 /* If the pixel centers end up used, the setup is the same as for gen4. */
1724 this->current_annotation
= "compute pixel centers";
1725 fs_reg int_pixel_x
= fs_reg(this, glsl_type::uint_type
);
1726 fs_reg int_pixel_y
= fs_reg(this, glsl_type::uint_type
);
1727 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
1728 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
1729 emit(fs_inst(BRW_OPCODE_ADD
,
1731 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
1732 fs_reg(brw_imm_v(0x10101010))));
1733 emit(fs_inst(BRW_OPCODE_ADD
,
1735 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
1736 fs_reg(brw_imm_v(0x11001100))));
1738 /* As of gen6, we can no longer mix float and int sources. We have
1739 * to turn the integer pixel centers into floats for their actual
1742 this->pixel_x
= fs_reg(this, glsl_type::float_type
);
1743 this->pixel_y
= fs_reg(this, glsl_type::float_type
);
1744 emit(fs_inst(BRW_OPCODE_MOV
, this->pixel_x
, int_pixel_x
));
1745 emit(fs_inst(BRW_OPCODE_MOV
, this->pixel_y
, int_pixel_y
));
1747 this->current_annotation
= "compute 1/pos.w";
1748 this->wpos_w
= fs_reg(brw_vec8_grf(c
->key
.source_w_reg
, 0));
1749 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
1750 emit_math(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
);
1752 this->delta_x
= fs_reg(brw_vec8_grf(2, 0));
1753 this->delta_y
= fs_reg(brw_vec8_grf(3, 0));
1755 this->current_annotation
= NULL
;
1759 fs_visitor::emit_fb_writes()
1761 this->current_annotation
= "FB write header";
1762 GLboolean header_present
= GL_TRUE
;
1765 if (intel
->gen
>= 6 &&
1766 !this->kill_emitted
&&
1767 c
->key
.nr_color_regions
== 1) {
1768 header_present
= false;
1771 if (header_present
) {
1776 if (c
->key
.aa_dest_stencil_reg
) {
1777 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1778 fs_reg(brw_vec8_grf(c
->key
.aa_dest_stencil_reg
, 0))));
1781 /* Reserve space for color. It'll be filled in per MRT below. */
1785 if (c
->key
.source_depth_to_render_target
) {
1786 if (c
->key
.computes_depth
) {
1787 /* Hand over gl_FragDepth. */
1788 assert(this->frag_depth
);
1789 fs_reg depth
= *(variable_storage(this->frag_depth
));
1791 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++), depth
));
1793 /* Pass through the payload depth. */
1794 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1795 fs_reg(brw_vec8_grf(c
->key
.source_depth_reg
, 0))));
1799 if (c
->key
.dest_depth_reg
) {
1800 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
1801 fs_reg(brw_vec8_grf(c
->key
.dest_depth_reg
, 0))));
1804 fs_reg color
= reg_undef
;
1805 if (this->frag_color
)
1806 color
= *(variable_storage(this->frag_color
));
1807 else if (this->frag_data
)
1808 color
= *(variable_storage(this->frag_data
));
1810 for (int target
= 0; target
< c
->key
.nr_color_regions
; target
++) {
1811 this->current_annotation
= talloc_asprintf(this->mem_ctx
,
1812 "FB write target %d",
1814 if (this->frag_color
|| this->frag_data
) {
1815 for (int i
= 0; i
< 4; i
++) {
1816 emit(fs_inst(BRW_OPCODE_MOV
,
1817 fs_reg(MRF
, color_mrf
+ i
),
1823 if (this->frag_color
)
1824 color
.reg_offset
-= 4;
1826 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1827 reg_undef
, reg_undef
));
1828 inst
->target
= target
;
1831 if (target
== c
->key
.nr_color_regions
- 1)
1833 inst
->header_present
= header_present
;
1836 if (c
->key
.nr_color_regions
== 0) {
1837 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1838 reg_undef
, reg_undef
));
1842 inst
->header_present
= header_present
;
1845 this->current_annotation
= NULL
;
1849 fs_visitor::generate_fb_write(fs_inst
*inst
)
1851 GLboolean eot
= inst
->eot
;
1852 struct brw_reg implied_header
;
1854 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
1857 brw_push_insn_state(p
);
1858 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
1859 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
1861 if (inst
->header_present
) {
1862 if (intel
->gen
>= 6) {
1864 brw_message_reg(inst
->base_mrf
),
1865 brw_vec8_grf(0, 0));
1866 implied_header
= brw_null_reg();
1868 implied_header
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
1872 brw_message_reg(inst
->base_mrf
+ 1),
1873 brw_vec8_grf(1, 0));
1875 implied_header
= brw_null_reg();
1878 brw_pop_insn_state(p
);
1881 8, /* dispatch_width */
1882 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW
),
1892 fs_visitor::generate_linterp(fs_inst
*inst
,
1893 struct brw_reg dst
, struct brw_reg
*src
)
1895 struct brw_reg delta_x
= src
[0];
1896 struct brw_reg delta_y
= src
[1];
1897 struct brw_reg interp
= src
[2];
1900 delta_y
.nr
== delta_x
.nr
+ 1 &&
1901 (intel
->gen
>= 6 || (delta_x
.nr
& 1) == 0)) {
1902 brw_PLN(p
, dst
, interp
, delta_x
);
1904 brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
1905 brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
1910 fs_visitor::generate_math(fs_inst
*inst
,
1911 struct brw_reg dst
, struct brw_reg
*src
)
1915 switch (inst
->opcode
) {
1917 op
= BRW_MATH_FUNCTION_INV
;
1920 op
= BRW_MATH_FUNCTION_RSQ
;
1922 case FS_OPCODE_SQRT
:
1923 op
= BRW_MATH_FUNCTION_SQRT
;
1925 case FS_OPCODE_EXP2
:
1926 op
= BRW_MATH_FUNCTION_EXP
;
1928 case FS_OPCODE_LOG2
:
1929 op
= BRW_MATH_FUNCTION_LOG
;
1932 op
= BRW_MATH_FUNCTION_POW
;
1935 op
= BRW_MATH_FUNCTION_SIN
;
1938 op
= BRW_MATH_FUNCTION_COS
;
1941 assert(!"not reached: unknown math function");
1946 if (intel
->gen
>= 6) {
1947 assert(inst
->mlen
== 0);
1949 if (inst
->opcode
== FS_OPCODE_POW
) {
1950 brw_math2(p
, dst
, op
, src
[0], src
[1]);
1954 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
1955 BRW_MATH_SATURATE_NONE
,
1957 BRW_MATH_DATA_VECTOR
,
1958 BRW_MATH_PRECISION_FULL
);
1961 assert(inst
->mlen
>= 1);
1965 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
1966 BRW_MATH_SATURATE_NONE
,
1967 inst
->base_mrf
, src
[0],
1968 BRW_MATH_DATA_VECTOR
,
1969 BRW_MATH_PRECISION_FULL
);
1974 fs_visitor::generate_tex(fs_inst
*inst
, struct brw_reg dst
)
1978 uint32_t simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
1980 if (intel
->gen
>= 5) {
1981 switch (inst
->opcode
) {
1983 if (inst
->shadow_compare
) {
1984 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5
;
1986 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_GEN5
;
1990 if (inst
->shadow_compare
) {
1991 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5
;
1993 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5
;
1998 switch (inst
->opcode
) {
2000 /* Note that G45 and older determines shadow compare and dispatch width
2001 * from message length for most messages.
2003 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
2004 if (inst
->shadow_compare
) {
2005 assert(inst
->mlen
== 6);
2007 assert(inst
->mlen
<= 4);
2011 if (inst
->shadow_compare
) {
2012 assert(inst
->mlen
== 6);
2013 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
2015 assert(inst
->mlen
== 9);
2016 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
2017 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2022 assert(msg_type
!= -1);
2024 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
2030 retype(dst
, BRW_REGISTER_TYPE_UW
),
2032 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
),
2033 SURF_INDEX_TEXTURE(inst
->sampler
),
2045 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2048 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2050 * and we're trying to produce:
2053 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2054 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2055 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2056 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2057 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2058 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2059 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2060 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2062 * and add another set of two more subspans if in 16-pixel dispatch mode.
2064 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2065 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2066 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2067 * between each other. We could probably do it like ddx and swizzle the right
2068 * order later, but bail for now and just produce
2069 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2072 fs_visitor::generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2074 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 1,
2075 BRW_REGISTER_TYPE_F
,
2076 BRW_VERTICAL_STRIDE_2
,
2078 BRW_HORIZONTAL_STRIDE_0
,
2079 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2080 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
2081 BRW_REGISTER_TYPE_F
,
2082 BRW_VERTICAL_STRIDE_2
,
2084 BRW_HORIZONTAL_STRIDE_0
,
2085 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2086 brw_ADD(p
, dst
, src0
, negate(src1
));
2090 fs_visitor::generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2092 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
2093 BRW_REGISTER_TYPE_F
,
2094 BRW_VERTICAL_STRIDE_4
,
2096 BRW_HORIZONTAL_STRIDE_0
,
2097 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2098 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 2,
2099 BRW_REGISTER_TYPE_F
,
2100 BRW_VERTICAL_STRIDE_4
,
2102 BRW_HORIZONTAL_STRIDE_0
,
2103 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2104 brw_ADD(p
, dst
, src0
, negate(src1
));
2108 fs_visitor::generate_discard_not(fs_inst
*inst
, struct brw_reg mask
)
2110 brw_push_insn_state(p
);
2111 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2112 brw_NOT(p
, mask
, brw_mask_reg(1)); /* IMASK */
2113 brw_pop_insn_state(p
);
2117 fs_visitor::generate_discard_and(fs_inst
*inst
, struct brw_reg mask
)
2119 struct brw_reg g0
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
2120 mask
= brw_uw1_reg(mask
.file
, mask
.nr
, 0);
2122 brw_push_insn_state(p
);
2123 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2124 brw_AND(p
, g0
, mask
, g0
);
2125 brw_pop_insn_state(p
);
2129 fs_visitor::assign_curb_setup()
2131 c
->prog_data
.first_curbe_grf
= c
->key
.nr_payload_regs
;
2132 c
->prog_data
.curb_read_length
= ALIGN(c
->prog_data
.nr_params
, 8) / 8;
2134 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2135 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2136 fs_inst
*inst
= (fs_inst
*)iter
.get();
2138 for (unsigned int i
= 0; i
< 3; i
++) {
2139 if (inst
->src
[i
].file
== UNIFORM
) {
2140 int constant_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
2141 struct brw_reg brw_reg
= brw_vec1_grf(c
->prog_data
.first_curbe_grf
+
2145 inst
->src
[i
].file
= FIXED_HW_REG
;
2146 inst
->src
[i
].fixed_hw_reg
= brw_reg
;
2153 fs_visitor::calculate_urb_setup()
2155 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2160 /* Figure out where each of the incoming setup attributes lands. */
2161 if (intel
->gen
>= 6) {
2162 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2163 if (brw
->fragment_program
->Base
.InputsRead
& BITFIELD64_BIT(i
)) {
2164 urb_setup
[i
] = urb_next
++;
2168 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2169 for (unsigned int i
= 0; i
< VERT_RESULT_MAX
; i
++) {
2170 if (c
->key
.vp_outputs_written
& BITFIELD64_BIT(i
)) {
2173 if (i
>= VERT_RESULT_VAR0
)
2174 fp_index
= i
- (VERT_RESULT_VAR0
- FRAG_ATTRIB_VAR0
);
2175 else if (i
<= VERT_RESULT_TEX7
)
2181 urb_setup
[fp_index
] = urb_next
++;
2186 /* Each attribute is 4 setup channels, each of which is half a reg. */
2187 c
->prog_data
.urb_read_length
= urb_next
* 2;
2191 fs_visitor::assign_urb_setup()
2193 int urb_start
= c
->prog_data
.first_curbe_grf
+ c
->prog_data
.curb_read_length
;
2195 /* Offset all the urb_setup[] index by the actual position of the
2196 * setup regs, now that the location of the constants has been chosen.
2198 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2199 fs_inst
*inst
= (fs_inst
*)iter
.get();
2201 if (inst
->opcode
!= FS_OPCODE_LINTERP
)
2204 assert(inst
->src
[2].file
== FIXED_HW_REG
);
2206 inst
->src
[2].fixed_hw_reg
.nr
+= urb_start
;
2209 this->first_non_payload_grf
= urb_start
+ c
->prog_data
.urb_read_length
;
2213 assign_reg(int *reg_hw_locations
, fs_reg
*reg
)
2215 if (reg
->file
== GRF
&& reg
->reg
!= 0) {
2216 assert(reg
->reg_offset
>= 0);
2217 reg
->hw_reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
;
2223 fs_visitor::assign_regs_trivial()
2226 int hw_reg_mapping
[this->virtual_grf_next
];
2229 hw_reg_mapping
[0] = 0;
2230 hw_reg_mapping
[1] = this->first_non_payload_grf
;
2231 for (i
= 2; i
< this->virtual_grf_next
; i
++) {
2232 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
2233 this->virtual_grf_sizes
[i
- 1]);
2235 last_grf
= hw_reg_mapping
[i
- 1] + this->virtual_grf_sizes
[i
- 1];
2237 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2238 fs_inst
*inst
= (fs_inst
*)iter
.get();
2240 assign_reg(hw_reg_mapping
, &inst
->dst
);
2241 assign_reg(hw_reg_mapping
, &inst
->src
[0]);
2242 assign_reg(hw_reg_mapping
, &inst
->src
[1]);
2245 this->grf_used
= last_grf
+ 1;
2249 fs_visitor::assign_regs()
2252 int hw_reg_mapping
[this->virtual_grf_next
+ 1];
2253 int base_reg_count
= BRW_MAX_GRF
- this->first_non_payload_grf
;
2254 int class_sizes
[base_reg_count
];
2255 int class_count
= 0;
2256 int aligned_pair_class
= -1;
2258 /* Set up the register classes.
2260 * The base registers store a scalar value. For texture samples,
2261 * we get virtual GRFs composed of 4 contiguous hw register. For
2262 * structures and arrays, we store them as contiguous larger things
2263 * than that, though we should be able to do better most of the
2266 class_sizes
[class_count
++] = 1;
2267 if (brw
->has_pln
&& intel
->gen
< 6) {
2268 /* Always set up the (unaligned) pairs for gen5, so we can find
2269 * them for making the aligned pair class.
2271 class_sizes
[class_count
++] = 2;
2273 for (int r
= 1; r
< this->virtual_grf_next
; r
++) {
2276 for (i
= 0; i
< class_count
; i
++) {
2277 if (class_sizes
[i
] == this->virtual_grf_sizes
[r
])
2280 if (i
== class_count
) {
2281 if (this->virtual_grf_sizes
[r
] >= base_reg_count
) {
2282 fprintf(stderr
, "Object too large to register allocate.\n");
2286 class_sizes
[class_count
++] = this->virtual_grf_sizes
[r
];
2290 int ra_reg_count
= 0;
2291 int class_base_reg
[class_count
];
2292 int class_reg_count
[class_count
];
2293 int classes
[class_count
+ 1];
2295 for (int i
= 0; i
< class_count
; i
++) {
2296 class_base_reg
[i
] = ra_reg_count
;
2297 class_reg_count
[i
] = base_reg_count
- (class_sizes
[i
] - 1);
2298 ra_reg_count
+= class_reg_count
[i
];
2301 struct ra_regs
*regs
= ra_alloc_reg_set(ra_reg_count
);
2302 for (int i
= 0; i
< class_count
; i
++) {
2303 classes
[i
] = ra_alloc_reg_class(regs
);
2305 for (int i_r
= 0; i_r
< class_reg_count
[i
]; i_r
++) {
2306 ra_class_add_reg(regs
, classes
[i
], class_base_reg
[i
] + i_r
);
2309 /* Add conflicts between our contiguous registers aliasing
2310 * base regs and other register classes' contiguous registers
2311 * that alias base regs, or the base regs themselves for classes[0].
2313 for (int c
= 0; c
<= i
; c
++) {
2314 for (int i_r
= 0; i_r
< class_reg_count
[i
]; i_r
++) {
2315 for (int c_r
= MAX2(0, i_r
- (class_sizes
[c
] - 1));
2316 c_r
< MIN2(class_reg_count
[c
], i_r
+ class_sizes
[i
]);
2320 printf("%d/%d conflicts %d/%d\n",
2321 class_sizes
[i
], this->first_non_payload_grf
+ i_r
,
2322 class_sizes
[c
], this->first_non_payload_grf
+ c_r
);
2325 ra_add_reg_conflict(regs
,
2326 class_base_reg
[i
] + i_r
,
2327 class_base_reg
[c
] + c_r
);
2333 /* Add a special class for aligned pairs, which we'll put delta_x/y
2334 * in on gen5 so that we can do PLN.
2336 if (brw
->has_pln
&& intel
->gen
< 6) {
2337 int reg_count
= (base_reg_count
- 1) / 2;
2338 int unaligned_pair_class
= 1;
2339 assert(class_sizes
[unaligned_pair_class
] == 2);
2341 aligned_pair_class
= class_count
;
2342 classes
[aligned_pair_class
] = ra_alloc_reg_class(regs
);
2343 class_sizes
[aligned_pair_class
] = 2;
2344 class_base_reg
[aligned_pair_class
] = 0;
2345 class_reg_count
[aligned_pair_class
] = 0;
2346 int start
= (this->first_non_payload_grf
& 1) ? 1 : 0;
2348 for (int i
= 0; i
< reg_count
; i
++) {
2349 ra_class_add_reg(regs
, classes
[aligned_pair_class
],
2350 class_base_reg
[unaligned_pair_class
] + i
* 2 + start
);
2355 ra_set_finalize(regs
);
2357 struct ra_graph
*g
= ra_alloc_interference_graph(regs
,
2358 this->virtual_grf_next
);
2359 /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1
2362 ra_set_node_class(g
, 0, classes
[0]);
2364 for (int i
= 1; i
< this->virtual_grf_next
; i
++) {
2365 for (int c
= 0; c
< class_count
; c
++) {
2366 if (class_sizes
[c
] == this->virtual_grf_sizes
[i
]) {
2367 if (aligned_pair_class
>= 0 &&
2368 this->delta_x
.reg
== i
) {
2369 ra_set_node_class(g
, i
, classes
[aligned_pair_class
]);
2371 ra_set_node_class(g
, i
, classes
[c
]);
2377 for (int j
= 1; j
< i
; j
++) {
2378 if (virtual_grf_interferes(i
, j
)) {
2379 ra_add_node_interference(g
, i
, j
);
2384 /* FINISHME: Handle spilling */
2385 if (!ra_allocate_no_spills(g
)) {
2386 fprintf(stderr
, "Failed to allocate registers.\n");
2391 /* Get the chosen virtual registers for each node, and map virtual
2392 * regs in the register classes back down to real hardware reg
2395 hw_reg_mapping
[0] = 0; /* unused */
2396 for (int i
= 1; i
< this->virtual_grf_next
; i
++) {
2397 int reg
= ra_get_node_reg(g
, i
);
2400 for (int c
= 0; c
< class_count
; c
++) {
2401 if (reg
>= class_base_reg
[c
] &&
2402 reg
< class_base_reg
[c
] + class_reg_count
[c
]) {
2403 hw_reg
= reg
- class_base_reg
[c
];
2408 assert(hw_reg
>= 0);
2409 hw_reg_mapping
[i
] = this->first_non_payload_grf
+ hw_reg
;
2410 last_grf
= MAX2(last_grf
,
2411 hw_reg_mapping
[i
] + this->virtual_grf_sizes
[i
] - 1);
2414 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2415 fs_inst
*inst
= (fs_inst
*)iter
.get();
2417 assign_reg(hw_reg_mapping
, &inst
->dst
);
2418 assign_reg(hw_reg_mapping
, &inst
->src
[0]);
2419 assign_reg(hw_reg_mapping
, &inst
->src
[1]);
2422 this->grf_used
= last_grf
+ 1;
2429 * Split large virtual GRFs into separate components if we can.
2431 * This is mostly duplicated with what brw_fs_vector_splitting does,
2432 * but that's really conservative because it's afraid of doing
2433 * splitting that doesn't result in real progress after the rest of
2434 * the optimization phases, which would cause infinite looping in
2435 * optimization. We can do it once here, safely. This also has the
2436 * opportunity to split interpolated values, or maybe even uniforms,
2437 * which we don't have at the IR level.
2439 * We want to split, because virtual GRFs are what we register
2440 * allocate and spill (due to contiguousness requirements for some
2441 * instructions), and they're what we naturally generate in the
2442 * codegen process, but most virtual GRFs don't actually need to be
2443 * contiguous sets of GRFs. If we split, we'll end up with reduced
2444 * live intervals and better dead code elimination and coalescing.
2447 fs_visitor::split_virtual_grfs()
2449 int num_vars
= this->virtual_grf_next
;
2450 bool split_grf
[num_vars
];
2451 int new_virtual_grf
[num_vars
];
2453 /* Try to split anything > 0 sized. */
2454 for (int i
= 0; i
< num_vars
; i
++) {
2455 if (this->virtual_grf_sizes
[i
] != 1)
2456 split_grf
[i
] = true;
2458 split_grf
[i
] = false;
2462 /* PLN opcodes rely on the delta_xy being contiguous. */
2463 split_grf
[this->delta_x
.reg
] = false;
2466 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2467 fs_inst
*inst
= (fs_inst
*)iter
.get();
2469 /* Texturing produces 4 contiguous registers, so no splitting. */
2470 if ((inst
->opcode
== FS_OPCODE_TEX
||
2471 inst
->opcode
== FS_OPCODE_TXB
||
2472 inst
->opcode
== FS_OPCODE_TXL
) &&
2473 inst
->dst
.file
== GRF
) {
2474 split_grf
[inst
->dst
.reg
] = false;
2478 /* Allocate new space for split regs. Note that the virtual
2479 * numbers will be contiguous.
2481 for (int i
= 0; i
< num_vars
; i
++) {
2483 new_virtual_grf
[i
] = virtual_grf_alloc(1);
2484 for (int j
= 2; j
< this->virtual_grf_sizes
[i
]; j
++) {
2485 int reg
= virtual_grf_alloc(1);
2486 assert(reg
== new_virtual_grf
[i
] + j
- 1);
2488 this->virtual_grf_sizes
[i
] = 1;
2492 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2493 fs_inst
*inst
= (fs_inst
*)iter
.get();
2495 if (inst
->dst
.file
== GRF
&&
2496 split_grf
[inst
->dst
.reg
] &&
2497 inst
->dst
.reg_offset
!= 0) {
2498 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
2499 inst
->dst
.reg_offset
- 1);
2500 inst
->dst
.reg_offset
= 0;
2502 for (int i
= 0; i
< 3; i
++) {
2503 if (inst
->src
[i
].file
== GRF
&&
2504 split_grf
[inst
->src
[i
].reg
] &&
2505 inst
->src
[i
].reg_offset
!= 0) {
2506 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
2507 inst
->src
[i
].reg_offset
- 1);
2508 inst
->src
[i
].reg_offset
= 0;
2515 fs_visitor::calculate_live_intervals()
2517 int num_vars
= this->virtual_grf_next
;
2518 int *def
= talloc_array(mem_ctx
, int, num_vars
);
2519 int *use
= talloc_array(mem_ctx
, int, num_vars
);
2523 for (int i
= 0; i
< num_vars
; i
++) {
2529 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2530 fs_inst
*inst
= (fs_inst
*)iter
.get();
2532 if (inst
->opcode
== BRW_OPCODE_DO
) {
2533 if (loop_depth
++ == 0)
2535 } else if (inst
->opcode
== BRW_OPCODE_WHILE
) {
2538 if (loop_depth
== 0) {
2541 * Patches up any vars marked for use within the loop as
2542 * live until the end. This is conservative, as there
2543 * will often be variables defined and used inside the
2544 * loop but dead at the end of the loop body.
2546 for (int i
= 0; i
< num_vars
; i
++) {
2547 if (use
[i
] == loop_start
) {
2558 for (unsigned int i
= 0; i
< 3; i
++) {
2559 if (inst
->src
[i
].file
== GRF
&& inst
->src
[i
].reg
!= 0) {
2560 use
[inst
->src
[i
].reg
] = MAX2(use
[inst
->src
[i
].reg
], eip
);
2563 if (inst
->dst
.file
== GRF
&& inst
->dst
.reg
!= 0) {
2564 def
[inst
->dst
.reg
] = MIN2(def
[inst
->dst
.reg
], eip
);
2571 talloc_free(this->virtual_grf_def
);
2572 talloc_free(this->virtual_grf_use
);
2573 this->virtual_grf_def
= def
;
2574 this->virtual_grf_use
= use
;
2578 * Attempts to move immediate constants into the immediate
2579 * constant slot of following instructions.
2581 * Immediate constants are a bit tricky -- they have to be in the last
2582 * operand slot, you can't do abs/negate on them,
2586 fs_visitor::propagate_constants()
2588 bool progress
= false;
2590 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2591 fs_inst
*inst
= (fs_inst
*)iter
.get();
2593 if (inst
->opcode
!= BRW_OPCODE_MOV
||
2595 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= IMM
||
2596 inst
->dst
.type
!= inst
->src
[0].type
)
2599 /* Don't bother with cases where we should have had the
2600 * operation on the constant folded in GLSL already.
2605 /* Found a move of a constant to a GRF. Find anything else using the GRF
2606 * before it's written, and replace it with the constant if we can.
2608 exec_list_iterator scan_iter
= iter
;
2610 for (; scan_iter
.has_next(); scan_iter
.next()) {
2611 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
2613 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
2614 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
2615 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
2616 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
2620 for (int i
= 2; i
>= 0; i
--) {
2621 if (scan_inst
->src
[i
].file
!= GRF
||
2622 scan_inst
->src
[i
].reg
!= inst
->dst
.reg
||
2623 scan_inst
->src
[i
].reg_offset
!= inst
->dst
.reg_offset
)
2626 /* Don't bother with cases where we should have had the
2627 * operation on the constant folded in GLSL already.
2629 if (scan_inst
->src
[i
].negate
|| scan_inst
->src
[i
].abs
)
2632 switch (scan_inst
->opcode
) {
2633 case BRW_OPCODE_MOV
:
2634 scan_inst
->src
[i
] = inst
->src
[0];
2638 case BRW_OPCODE_MUL
:
2639 case BRW_OPCODE_ADD
:
2641 scan_inst
->src
[i
] = inst
->src
[0];
2643 } else if (i
== 0 && scan_inst
->src
[1].file
!= IMM
) {
2644 /* Fit this constant in by commuting the operands */
2645 scan_inst
->src
[0] = scan_inst
->src
[1];
2646 scan_inst
->src
[1] = inst
->src
[0];
2649 case BRW_OPCODE_CMP
:
2651 scan_inst
->src
[i
] = inst
->src
[0];
2657 if (scan_inst
->dst
.file
== GRF
&&
2658 scan_inst
->dst
.reg
== inst
->dst
.reg
&&
2659 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
2660 scan_inst
->opcode
== FS_OPCODE_TEX
)) {
2669 * Must be called after calculate_live_intervales() to remove unused
2670 * writes to registers -- register allocation will fail otherwise
2671 * because something deffed but not used won't be considered to
2672 * interfere with other regs.
2675 fs_visitor::dead_code_eliminate()
2677 bool progress
= false;
2678 int num_vars
= this->virtual_grf_next
;
2679 bool dead
[num_vars
];
2681 for (int i
= 0; i
< num_vars
; i
++) {
2682 dead
[i
] = this->virtual_grf_def
[i
] >= this->virtual_grf_use
[i
];
2685 /* Mark off its interval so it won't interfere with anything. */
2686 this->virtual_grf_def
[i
] = -1;
2687 this->virtual_grf_use
[i
] = -1;
2691 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2692 fs_inst
*inst
= (fs_inst
*)iter
.get();
2694 if (inst
->dst
.file
== GRF
&& dead
[inst
->dst
.reg
]) {
2704 fs_visitor::register_coalesce()
2706 bool progress
= false;
2708 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2709 fs_inst
*inst
= (fs_inst
*)iter
.get();
2711 if (inst
->opcode
!= BRW_OPCODE_MOV
||
2714 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= GRF
||
2715 inst
->dst
.type
!= inst
->src
[0].type
)
2718 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
2719 * them: check for no writes to either one until the exit of the
2722 bool interfered
= false;
2723 exec_list_iterator scan_iter
= iter
;
2725 for (; scan_iter
.has_next(); scan_iter
.next()) {
2726 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
2728 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
2729 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
2730 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
2736 if (scan_inst
->dst
.file
== GRF
) {
2737 if (scan_inst
->dst
.reg
== inst
->dst
.reg
&&
2738 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
2739 scan_inst
->opcode
== FS_OPCODE_TEX
)) {
2743 if (scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
2744 (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
||
2745 scan_inst
->opcode
== FS_OPCODE_TEX
)) {
2755 /* Update live interval so we don't have to recalculate. */
2756 this->virtual_grf_use
[inst
->src
[0].reg
] = MAX2(virtual_grf_use
[inst
->src
[0].reg
],
2757 virtual_grf_use
[inst
->dst
.reg
]);
2759 /* Rewrite the later usage to point at the source of the move to
2762 for (exec_list_iterator scan_iter
= iter
; scan_iter
.has_next();
2764 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
2766 for (int i
= 0; i
< 3; i
++) {
2767 if (scan_inst
->src
[i
].file
== GRF
&&
2768 scan_inst
->src
[i
].reg
== inst
->dst
.reg
&&
2769 scan_inst
->src
[i
].reg_offset
== inst
->dst
.reg_offset
) {
2770 scan_inst
->src
[i
].reg
= inst
->src
[0].reg
;
2771 scan_inst
->src
[i
].reg_offset
= inst
->src
[0].reg_offset
;
2772 scan_inst
->src
[i
].abs
|= inst
->src
[0].abs
;
2773 scan_inst
->src
[i
].negate
^= inst
->src
[0].negate
;
2787 fs_visitor::compute_to_mrf()
2789 bool progress
= false;
2792 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2793 fs_inst
*inst
= (fs_inst
*)iter
.get();
2798 if (inst
->opcode
!= BRW_OPCODE_MOV
||
2800 inst
->dst
.file
!= MRF
|| inst
->src
[0].file
!= GRF
||
2801 inst
->dst
.type
!= inst
->src
[0].type
||
2802 inst
->src
[0].abs
|| inst
->src
[0].negate
)
2805 /* Can't compute-to-MRF this GRF if someone else was going to
2808 if (this->virtual_grf_use
[inst
->src
[0].reg
] > ip
)
2811 /* Found a move of a GRF to a MRF. Let's see if we can go
2812 * rewrite the thing that made this GRF to write into the MRF.
2816 for (scan_inst
= (fs_inst
*)inst
->prev
;
2817 scan_inst
->prev
!= NULL
;
2818 scan_inst
= (fs_inst
*)scan_inst
->prev
) {
2819 /* We don't handle flow control here. Most computation of
2820 * values that end up in MRFs are shortly before the MRF
2823 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
2824 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
2825 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
2829 /* You can't read from an MRF, so if someone else reads our
2830 * MRF's source GRF that we wanted to rewrite, that stops us.
2832 bool interfered
= false;
2833 for (int i
= 0; i
< 3; i
++) {
2834 if (scan_inst
->src
[i
].file
== GRF
&&
2835 scan_inst
->src
[i
].reg
== inst
->src
[0].reg
&&
2836 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
2843 if (scan_inst
->dst
.file
== MRF
&&
2844 scan_inst
->dst
.hw_reg
== inst
->dst
.hw_reg
) {
2845 /* Somebody else wrote our MRF here, so we can't can't
2846 * compute-to-MRF before that.
2851 if (scan_inst
->mlen
> 0) {
2852 /* Found a SEND instruction, which will do some amount of
2853 * implied write that may overwrite our MRF that we were
2854 * hoping to compute-to-MRF somewhere above it. Nothing
2855 * we have implied-writes more than 2 MRFs from base_mrf,
2858 int implied_write_len
= MIN2(scan_inst
->mlen
, 2);
2859 if (inst
->dst
.hw_reg
>= scan_inst
->base_mrf
&&
2860 inst
->dst
.hw_reg
< scan_inst
->base_mrf
+ implied_write_len
) {
2865 if (scan_inst
->dst
.file
== GRF
&&
2866 scan_inst
->dst
.reg
== inst
->src
[0].reg
) {
2867 /* Found the last thing to write our reg we want to turn
2868 * into a compute-to-MRF.
2871 if (scan_inst
->opcode
== FS_OPCODE_TEX
) {
2872 /* texturing writes several continuous regs, so we can't
2873 * compute-to-mrf that.
2878 /* If it's predicated, it (probably) didn't populate all
2881 if (scan_inst
->predicated
)
2884 /* SEND instructions can't have MRF as a destination. */
2885 if (scan_inst
->mlen
)
2888 if (intel
->gen
>= 6) {
2889 /* gen6 math instructions must have the destination be
2890 * GRF, so no compute-to-MRF for them.
2892 if (scan_inst
->opcode
== FS_OPCODE_RCP
||
2893 scan_inst
->opcode
== FS_OPCODE_RSQ
||
2894 scan_inst
->opcode
== FS_OPCODE_SQRT
||
2895 scan_inst
->opcode
== FS_OPCODE_EXP2
||
2896 scan_inst
->opcode
== FS_OPCODE_LOG2
||
2897 scan_inst
->opcode
== FS_OPCODE_SIN
||
2898 scan_inst
->opcode
== FS_OPCODE_COS
||
2899 scan_inst
->opcode
== FS_OPCODE_POW
) {
2904 if (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
2905 /* Found the creator of our MRF's source value. */
2912 scan_inst
->dst
.file
= MRF
;
2913 scan_inst
->dst
.hw_reg
= inst
->dst
.hw_reg
;
2914 scan_inst
->saturate
|= inst
->saturate
;
2924 fs_visitor::virtual_grf_interferes(int a
, int b
)
2926 int start
= MAX2(this->virtual_grf_def
[a
], this->virtual_grf_def
[b
]);
2927 int end
= MIN2(this->virtual_grf_use
[a
], this->virtual_grf_use
[b
]);
2929 /* For dead code, just check if the def interferes with the other range. */
2930 if (this->virtual_grf_use
[a
] == -1) {
2931 return (this->virtual_grf_def
[a
] >= this->virtual_grf_def
[b
] &&
2932 this->virtual_grf_def
[a
] < this->virtual_grf_use
[b
]);
2934 if (this->virtual_grf_use
[b
] == -1) {
2935 return (this->virtual_grf_def
[b
] >= this->virtual_grf_def
[a
] &&
2936 this->virtual_grf_def
[b
] < this->virtual_grf_use
[a
]);
2942 static struct brw_reg
brw_reg_from_fs_reg(fs_reg
*reg
)
2944 struct brw_reg brw_reg
;
2946 switch (reg
->file
) {
2950 brw_reg
= brw_vec8_reg(reg
->file
,
2952 brw_reg
= retype(brw_reg
, reg
->type
);
2955 switch (reg
->type
) {
2956 case BRW_REGISTER_TYPE_F
:
2957 brw_reg
= brw_imm_f(reg
->imm
.f
);
2959 case BRW_REGISTER_TYPE_D
:
2960 brw_reg
= brw_imm_d(reg
->imm
.i
);
2962 case BRW_REGISTER_TYPE_UD
:
2963 brw_reg
= brw_imm_ud(reg
->imm
.u
);
2966 assert(!"not reached");
2971 brw_reg
= reg
->fixed_hw_reg
;
2974 /* Probably unused. */
2975 brw_reg
= brw_null_reg();
2978 assert(!"not reached");
2979 brw_reg
= brw_null_reg();
2983 brw_reg
= brw_abs(brw_reg
);
2985 brw_reg
= negate(brw_reg
);
2991 fs_visitor::generate_code()
2993 unsigned int annotation_len
= 0;
2994 int last_native_inst
= 0;
2995 struct brw_instruction
*if_stack
[16], *loop_stack
[16];
2996 int if_stack_depth
= 0, loop_stack_depth
= 0;
2997 int if_depth_in_loop
[16];
2999 if_depth_in_loop
[loop_stack_depth
] = 0;
3001 memset(&if_stack
, 0, sizeof(if_stack
));
3002 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3003 fs_inst
*inst
= (fs_inst
*)iter
.get();
3004 struct brw_reg src
[3], dst
;
3006 for (unsigned int i
= 0; i
< 3; i
++) {
3007 src
[i
] = brw_reg_from_fs_reg(&inst
->src
[i
]);
3009 dst
= brw_reg_from_fs_reg(&inst
->dst
);
3011 brw_set_conditionalmod(p
, inst
->conditional_mod
);
3012 brw_set_predicate_control(p
, inst
->predicated
);
3014 switch (inst
->opcode
) {
3015 case BRW_OPCODE_MOV
:
3016 brw_MOV(p
, dst
, src
[0]);
3018 case BRW_OPCODE_ADD
:
3019 brw_ADD(p
, dst
, src
[0], src
[1]);
3021 case BRW_OPCODE_MUL
:
3022 brw_MUL(p
, dst
, src
[0], src
[1]);
3025 case BRW_OPCODE_FRC
:
3026 brw_FRC(p
, dst
, src
[0]);
3028 case BRW_OPCODE_RNDD
:
3029 brw_RNDD(p
, dst
, src
[0]);
3031 case BRW_OPCODE_RNDE
:
3032 brw_RNDE(p
, dst
, src
[0]);
3034 case BRW_OPCODE_RNDZ
:
3035 brw_RNDZ(p
, dst
, src
[0]);
3038 case BRW_OPCODE_AND
:
3039 brw_AND(p
, dst
, src
[0], src
[1]);
3042 brw_OR(p
, dst
, src
[0], src
[1]);
3044 case BRW_OPCODE_XOR
:
3045 brw_XOR(p
, dst
, src
[0], src
[1]);
3047 case BRW_OPCODE_NOT
:
3048 brw_NOT(p
, dst
, src
[0]);
3050 case BRW_OPCODE_ASR
:
3051 brw_ASR(p
, dst
, src
[0], src
[1]);
3053 case BRW_OPCODE_SHR
:
3054 brw_SHR(p
, dst
, src
[0], src
[1]);
3056 case BRW_OPCODE_SHL
:
3057 brw_SHL(p
, dst
, src
[0], src
[1]);
3060 case BRW_OPCODE_CMP
:
3061 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
3063 case BRW_OPCODE_SEL
:
3064 brw_SEL(p
, dst
, src
[0], src
[1]);
3068 assert(if_stack_depth
< 16);
3069 if_stack
[if_stack_depth
] = brw_IF(p
, BRW_EXECUTE_8
);
3070 if_depth_in_loop
[loop_stack_depth
]++;
3073 case BRW_OPCODE_ELSE
:
3074 if_stack
[if_stack_depth
- 1] =
3075 brw_ELSE(p
, if_stack
[if_stack_depth
- 1]);
3077 case BRW_OPCODE_ENDIF
:
3079 brw_ENDIF(p
, if_stack
[if_stack_depth
]);
3080 if_depth_in_loop
[loop_stack_depth
]--;
3084 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
3085 if_depth_in_loop
[loop_stack_depth
] = 0;
3088 case BRW_OPCODE_BREAK
:
3089 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
3090 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3092 case BRW_OPCODE_CONTINUE
:
3093 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
3094 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3097 case BRW_OPCODE_WHILE
: {
3098 struct brw_instruction
*inst0
, *inst1
;
3101 if (intel
->gen
>= 5)
3104 assert(loop_stack_depth
> 0);
3106 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
3107 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3108 while (inst0
> loop_stack
[loop_stack_depth
]) {
3110 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
3111 inst0
->bits3
.if_else
.jump_count
== 0) {
3112 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
3114 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
3115 inst0
->bits3
.if_else
.jump_count
== 0) {
3116 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
3124 case FS_OPCODE_SQRT
:
3125 case FS_OPCODE_EXP2
:
3126 case FS_OPCODE_LOG2
:
3130 generate_math(inst
, dst
, src
);
3132 case FS_OPCODE_LINTERP
:
3133 generate_linterp(inst
, dst
, src
);
3138 generate_tex(inst
, dst
);
3140 case FS_OPCODE_DISCARD_NOT
:
3141 generate_discard_not(inst
, dst
);
3143 case FS_OPCODE_DISCARD_AND
:
3144 generate_discard_and(inst
, src
[0]);
3147 generate_ddx(inst
, dst
, src
[0]);
3150 generate_ddy(inst
, dst
, src
[0]);
3152 case FS_OPCODE_FB_WRITE
:
3153 generate_fb_write(inst
);
3156 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
3157 _mesa_problem(ctx
, "Unsupported opcode `%s' in FS",
3158 brw_opcodes
[inst
->opcode
].name
);
3160 _mesa_problem(ctx
, "Unsupported opcode %d in FS", inst
->opcode
);
3165 if (annotation_len
< p
->nr_insn
) {
3166 annotation_len
*= 2;
3167 if (annotation_len
< 16)
3168 annotation_len
= 16;
3170 this->annotation_string
= talloc_realloc(this->mem_ctx
,
3174 this->annotation_ir
= talloc_realloc(this->mem_ctx
,
3180 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
3181 this->annotation_string
[i
] = inst
->annotation
;
3182 this->annotation_ir
[i
] = inst
->ir
;
3184 last_native_inst
= p
->nr_insn
;
3189 brw_wm_fs_emit(struct brw_context
*brw
, struct brw_wm_compile
*c
)
3191 struct brw_compile
*p
= &c
->func
;
3192 struct intel_context
*intel
= &brw
->intel
;
3193 struct gl_context
*ctx
= &intel
->ctx
;
3194 struct gl_shader_program
*prog
= ctx
->Shader
.CurrentProgram
;
3199 struct brw_shader
*shader
=
3200 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
3204 /* We always use 8-wide mode, at least for now. For one, flow
3205 * control only works in 8-wide. Also, when we're fragment shader
3206 * bound, we're almost always under register pressure as well, so
3207 * 8-wide would save us from the performance cliff of spilling
3210 c
->dispatch_width
= 8;
3212 if (INTEL_DEBUG
& DEBUG_WM
) {
3213 printf("GLSL IR for native fragment shader %d:\n", prog
->Name
);
3214 _mesa_print_ir(shader
->ir
, NULL
);
3218 /* Now the main event: Visit the shader IR and generate our FS IR for it.
3220 fs_visitor
v(c
, shader
);
3225 v
.calculate_urb_setup();
3227 v
.emit_interpolation_setup_gen4();
3229 v
.emit_interpolation_setup_gen6();
3231 /* Generate FS IR for main(). (the visitor only descends into
3232 * functions called "main").
3234 foreach_iter(exec_list_iterator
, iter
, *shader
->ir
) {
3235 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
3242 v
.split_virtual_grfs();
3244 v
.assign_curb_setup();
3245 v
.assign_urb_setup();
3250 v
.calculate_live_intervals();
3251 progress
= v
.propagate_constants() || progress
;
3252 progress
= v
.register_coalesce() || progress
;
3253 progress
= v
.compute_to_mrf() || progress
;
3254 progress
= v
.dead_code_eliminate() || progress
;
3258 v
.assign_regs_trivial();
3266 assert(!v
.fail
); /* FINISHME: Cleanly fail, tested at link time, etc. */
3271 if (INTEL_DEBUG
& DEBUG_WM
) {
3272 const char *last_annotation_string
= NULL
;
3273 ir_instruction
*last_annotation_ir
= NULL
;
3275 printf("Native code for fragment shader %d:\n", prog
->Name
);
3276 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
3277 if (last_annotation_ir
!= v
.annotation_ir
[i
]) {
3278 last_annotation_ir
= v
.annotation_ir
[i
];
3279 if (last_annotation_ir
) {
3281 last_annotation_ir
->print();
3285 if (last_annotation_string
!= v
.annotation_string
[i
]) {
3286 last_annotation_string
= v
.annotation_string
[i
];
3287 if (last_annotation_string
)
3288 printf(" %s\n", last_annotation_string
);
3290 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
3291 printf("0x%08x 0x%08x 0x%08x 0x%08x\n",
3292 ((uint32_t *)&p
->store
[i
])[3],
3293 ((uint32_t *)&p
->store
[i
])[2],
3294 ((uint32_t *)&p
->store
[i
])[1],
3295 ((uint32_t *)&p
->store
[i
])[0]);
3300 c
->prog_data
.total_grf
= v
.grf_used
;
3301 c
->prog_data
.total_scratch
= 0;