2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include <sys/types.h>
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
46 #include "../glsl/glsl_types.h"
47 #include "../glsl/ir_optimization.h"
48 #include "../glsl/ir_print_visitor.h"
50 #define MAX_INSTRUCTION (1 << 30)
51 static struct brw_reg
brw_reg_from_fs_reg(class fs_reg
*reg
);
54 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
56 struct brw_shader
*shader
;
58 shader
= rzalloc(NULL
, struct brw_shader
);
60 shader
->base
.Type
= type
;
61 shader
->base
.Name
= name
;
62 _mesa_init_shader(ctx
, &shader
->base
);
68 struct gl_shader_program
*
69 brw_new_shader_program(struct gl_context
*ctx
, GLuint name
)
71 struct brw_shader_program
*prog
;
72 prog
= rzalloc(NULL
, struct brw_shader_program
);
74 prog
->base
.Name
= name
;
75 _mesa_init_shader_program(ctx
, &prog
->base
);
81 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
83 struct brw_context
*brw
= brw_context(ctx
);
84 struct intel_context
*intel
= &brw
->intel
;
86 struct brw_shader
*shader
=
87 (struct brw_shader
*)prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
89 void *mem_ctx
= ralloc_context(NULL
);
93 ralloc_free(shader
->ir
);
94 shader
->ir
= new(shader
) exec_list
;
95 clone_ir_list(mem_ctx
, shader
->ir
, shader
->base
.ir
);
97 do_mat_op_to_vec(shader
->ir
);
98 lower_instructions(shader
->ir
,
105 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
106 * if-statements need to be flattened.
109 lower_if_to_cond_assign(shader
->ir
, 16);
111 do_lower_texture_projection(shader
->ir
);
112 do_vec_index_to_cond_assign(shader
->ir
);
113 brw_do_cubemap_normalize(shader
->ir
);
114 lower_noise(shader
->ir
);
115 lower_quadop_vector(shader
->ir
, false);
116 lower_variable_index_to_cond_assign(shader
->ir
,
118 GL_TRUE
, /* output */
120 GL_TRUE
/* uniform */
126 brw_do_channel_expressions(shader
->ir
);
127 brw_do_vector_splitting(shader
->ir
);
129 progress
= do_lower_jumps(shader
->ir
, true, true,
130 true, /* main return */
131 false, /* continue */
135 progress
= do_common_optimization(shader
->ir
, true, 32) || progress
;
138 validate_ir_tree(shader
->ir
);
140 reparent_ir(shader
->ir
, shader
->ir
);
141 ralloc_free(mem_ctx
);
144 if (!_mesa_ir_link_shader(ctx
, prog
))
151 type_size(const struct glsl_type
*type
)
153 unsigned int size
, i
;
155 switch (type
->base_type
) {
158 case GLSL_TYPE_FLOAT
:
160 return type
->components();
161 case GLSL_TYPE_ARRAY
:
162 return type_size(type
->fields
.array
) * type
->length
;
163 case GLSL_TYPE_STRUCT
:
165 for (i
= 0; i
< type
->length
; i
++) {
166 size
+= type_size(type
->fields
.structure
[i
].type
);
169 case GLSL_TYPE_SAMPLER
:
170 /* Samplers take up no register space, since they're baked in at
175 assert(!"not reached");
181 fs_visitor::fail(const char *format
, ...)
186 if (INTEL_DEBUG
& DEBUG_WM
) {
187 fprintf(stderr
, "FS compile failed: ");
190 va_start(va
, format
);
191 vfprintf(stderr
, format
, va
);
198 fs_visitor::push_force_uncompressed()
200 force_uncompressed_stack
++;
204 fs_visitor::pop_force_uncompressed()
206 force_uncompressed_stack
--;
207 assert(force_uncompressed_stack
>= 0);
211 fs_visitor::push_force_sechalf()
213 force_sechalf_stack
++;
217 fs_visitor::pop_force_sechalf()
219 force_sechalf_stack
--;
220 assert(force_sechalf_stack
>= 0);
224 * Returns how many MRFs an FS opcode will write over.
226 * Note that this is not the 0 or 1 implied writes in an actual gen
227 * instruction -- the FS opcodes often generate MOVs in addition.
230 fs_visitor::implied_mrf_writes(fs_inst
*inst
)
235 switch (inst
->opcode
) {
243 return 1 * c
->dispatch_width
/ 8;
245 return 2 * c
->dispatch_width
/ 8;
251 case FS_OPCODE_FB_WRITE
:
253 case FS_OPCODE_PULL_CONSTANT_LOAD
:
254 case FS_OPCODE_UNSPILL
:
256 case FS_OPCODE_SPILL
:
259 assert(!"not reached");
265 fs_visitor::virtual_grf_alloc(int size
)
267 if (virtual_grf_array_size
<= virtual_grf_next
) {
268 if (virtual_grf_array_size
== 0)
269 virtual_grf_array_size
= 16;
271 virtual_grf_array_size
*= 2;
272 virtual_grf_sizes
= reralloc(mem_ctx
, virtual_grf_sizes
, int,
273 virtual_grf_array_size
);
275 /* This slot is always unused. */
276 virtual_grf_sizes
[0] = 0;
278 virtual_grf_sizes
[virtual_grf_next
] = size
;
279 return virtual_grf_next
++;
282 /** Fixed HW reg constructor. */
283 fs_reg::fs_reg(enum register_file file
, int hw_reg
)
287 this->hw_reg
= hw_reg
;
288 this->type
= BRW_REGISTER_TYPE_F
;
291 /** Fixed HW reg constructor. */
292 fs_reg::fs_reg(enum register_file file
, int hw_reg
, uint32_t type
)
296 this->hw_reg
= hw_reg
;
301 brw_type_for_base_type(const struct glsl_type
*type
)
303 switch (type
->base_type
) {
304 case GLSL_TYPE_FLOAT
:
305 return BRW_REGISTER_TYPE_F
;
308 return BRW_REGISTER_TYPE_D
;
310 return BRW_REGISTER_TYPE_UD
;
311 case GLSL_TYPE_ARRAY
:
312 case GLSL_TYPE_STRUCT
:
313 case GLSL_TYPE_SAMPLER
:
314 /* These should be overridden with the type of the member when
315 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
316 * way to trip up if we don't.
318 return BRW_REGISTER_TYPE_UD
;
320 assert(!"not reached");
321 return BRW_REGISTER_TYPE_F
;
325 /** Automatic reg constructor. */
326 fs_reg::fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
)
331 this->reg
= v
->virtual_grf_alloc(type_size(type
));
332 this->reg_offset
= 0;
333 this->type
= brw_type_for_base_type(type
);
337 fs_visitor::variable_storage(ir_variable
*var
)
339 return (fs_reg
*)hash_table_find(this->variable_ht
, var
);
343 import_uniforms_callback(const void *key
,
347 struct hash_table
*dst_ht
= (struct hash_table
*)closure
;
348 const fs_reg
*reg
= (const fs_reg
*)data
;
350 if (reg
->file
!= UNIFORM
)
353 hash_table_insert(dst_ht
, data
, key
);
356 /* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
357 * This brings in those uniform definitions
360 fs_visitor::import_uniforms(struct hash_table
*src_variable_ht
)
362 hash_table_call_foreach(src_variable_ht
,
363 import_uniforms_callback
,
367 /* Our support for uniforms is piggy-backed on the struct
368 * gl_fragment_program, because that's where the values actually
369 * get stored, rather than in some global gl_shader_program uniform
373 fs_visitor::setup_uniform_values(int loc
, const glsl_type
*type
)
375 unsigned int offset
= 0;
377 if (type
->is_matrix()) {
378 const glsl_type
*column
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
379 type
->vector_elements
,
382 for (unsigned int i
= 0; i
< type
->matrix_columns
; i
++) {
383 offset
+= setup_uniform_values(loc
+ offset
, column
);
389 switch (type
->base_type
) {
390 case GLSL_TYPE_FLOAT
:
394 for (unsigned int i
= 0; i
< type
->vector_elements
; i
++) {
395 unsigned int param
= c
->prog_data
.nr_params
++;
397 assert(param
< ARRAY_SIZE(c
->prog_data
.param
));
399 switch (type
->base_type
) {
400 case GLSL_TYPE_FLOAT
:
401 c
->prog_data
.param_convert
[param
] = PARAM_NO_CONVERT
;
404 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2U
;
407 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2I
;
410 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2B
;
413 assert(!"not reached");
414 c
->prog_data
.param_convert
[param
] = PARAM_NO_CONVERT
;
417 this->param_index
[param
] = loc
;
418 this->param_offset
[param
] = i
;
422 case GLSL_TYPE_STRUCT
:
423 for (unsigned int i
= 0; i
< type
->length
; i
++) {
424 offset
+= setup_uniform_values(loc
+ offset
,
425 type
->fields
.structure
[i
].type
);
429 case GLSL_TYPE_ARRAY
:
430 for (unsigned int i
= 0; i
< type
->length
; i
++) {
431 offset
+= setup_uniform_values(loc
+ offset
, type
->fields
.array
);
435 case GLSL_TYPE_SAMPLER
:
436 /* The sampler takes up a slot, but we don't use any values from it. */
440 assert(!"not reached");
446 /* Our support for builtin uniforms is even scarier than non-builtin.
447 * It sits on top of the PROG_STATE_VAR parameters that are
448 * automatically updated from GL context state.
451 fs_visitor::setup_builtin_uniform_values(ir_variable
*ir
)
453 const ir_state_slot
*const slots
= ir
->state_slots
;
454 assert(ir
->state_slots
!= NULL
);
456 for (unsigned int i
= 0; i
< ir
->num_state_slots
; i
++) {
457 /* This state reference has already been setup by ir_to_mesa, but we'll
458 * get the same index back here.
460 int index
= _mesa_add_state_reference(this->fp
->Base
.Parameters
,
461 (gl_state_index
*)slots
[i
].tokens
);
463 /* Add each of the unique swizzles of the element as a parameter.
464 * This'll end up matching the expected layout of the
465 * array/matrix/structure we're trying to fill in.
468 for (unsigned int j
= 0; j
< 4; j
++) {
469 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
470 if (swiz
== last_swiz
)
474 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
] =
476 this->param_index
[c
->prog_data
.nr_params
] = index
;
477 this->param_offset
[c
->prog_data
.nr_params
] = swiz
;
478 c
->prog_data
.nr_params
++;
484 fs_visitor::emit_fragcoord_interpolation(ir_variable
*ir
)
486 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
488 fs_reg neg_y
= this->pixel_y
;
490 bool flip
= !ir
->origin_upper_left
^ c
->key
.render_to_fbo
;
493 if (ir
->pixel_center_integer
) {
494 emit(BRW_OPCODE_MOV
, wpos
, this->pixel_x
);
496 emit(BRW_OPCODE_ADD
, wpos
, this->pixel_x
, fs_reg(0.5f
));
501 if (!flip
&& ir
->pixel_center_integer
) {
502 emit(BRW_OPCODE_MOV
, wpos
, this->pixel_y
);
504 fs_reg pixel_y
= this->pixel_y
;
505 float offset
= (ir
->pixel_center_integer
? 0.0 : 0.5);
508 pixel_y
.negate
= true;
509 offset
+= c
->key
.drawable_height
- 1.0;
512 emit(BRW_OPCODE_ADD
, wpos
, pixel_y
, fs_reg(offset
));
517 if (intel
->gen
>= 6) {
518 emit(BRW_OPCODE_MOV
, wpos
,
519 fs_reg(brw_vec8_grf(c
->source_depth_reg
, 0)));
521 emit(FS_OPCODE_LINTERP
, wpos
, this->delta_x
, this->delta_y
,
522 interp_reg(FRAG_ATTRIB_WPOS
, 2));
526 /* gl_FragCoord.w: Already set up in emit_interpolation */
527 emit(BRW_OPCODE_MOV
, wpos
, this->wpos_w
);
533 fs_visitor::emit_general_interpolation(ir_variable
*ir
)
535 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
536 /* Interpolation is always in floating point regs. */
537 reg
->type
= BRW_REGISTER_TYPE_F
;
540 unsigned int array_elements
;
541 const glsl_type
*type
;
543 if (ir
->type
->is_array()) {
544 array_elements
= ir
->type
->length
;
545 if (array_elements
== 0) {
546 fail("dereferenced array '%s' has length 0\n", ir
->name
);
548 type
= ir
->type
->fields
.array
;
554 int location
= ir
->location
;
555 for (unsigned int i
= 0; i
< array_elements
; i
++) {
556 for (unsigned int j
= 0; j
< type
->matrix_columns
; j
++) {
557 if (urb_setup
[location
] == -1) {
558 /* If there's no incoming setup data for this slot, don't
559 * emit interpolation for it.
561 attr
.reg_offset
+= type
->vector_elements
;
567 location
== FRAG_ATTRIB_COL0
|| location
== FRAG_ATTRIB_COL1
;
569 if (c
->key
.flat_shade
&& is_gl_Color
) {
570 /* Constant interpolation (flat shading) case. The SF has
571 * handed us defined values in only the constant offset
572 * field of the setup reg.
574 for (unsigned int k
= 0; k
< type
->vector_elements
; k
++) {
575 struct brw_reg interp
= interp_reg(location
, k
);
576 interp
= suboffset(interp
, 3);
577 emit(FS_OPCODE_CINTERP
, attr
, fs_reg(interp
));
581 /* Perspective interpolation case. */
582 for (unsigned int k
= 0; k
< type
->vector_elements
; k
++) {
583 struct brw_reg interp
= interp_reg(location
, k
);
584 emit(FS_OPCODE_LINTERP
, attr
,
585 this->delta_x
, this->delta_y
, fs_reg(interp
));
589 if (intel
->gen
< 6 && !(is_gl_Color
&& c
->key
.linear_color
)) {
590 attr
.reg_offset
-= type
->vector_elements
;
591 for (unsigned int k
= 0; k
< type
->vector_elements
; k
++) {
592 emit(BRW_OPCODE_MUL
, attr
, attr
, this->pixel_w
);
605 fs_visitor::emit_frontfacing_interpolation(ir_variable
*ir
)
607 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
609 /* The frontfacing comes in as a bit in the thread payload. */
610 if (intel
->gen
>= 6) {
611 emit(BRW_OPCODE_ASR
, *reg
,
612 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D
)),
614 emit(BRW_OPCODE_NOT
, *reg
, *reg
);
615 emit(BRW_OPCODE_AND
, *reg
, *reg
, fs_reg(1));
617 struct brw_reg r1_6ud
= retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
);
618 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
621 fs_inst
*inst
= emit(BRW_OPCODE_CMP
, *reg
,
624 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
625 emit(BRW_OPCODE_AND
, *reg
, *reg
, fs_reg(1u));
632 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src
)
644 assert(!"not reached: bad math opcode");
648 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
649 * might be able to do better by doing execsize = 1 math and then
650 * expanding that result out, but we would need to be careful with
653 * The hardware ignores source modifiers (negate and abs) on math
654 * instructions, so we also move to a temp to set those up.
656 if (intel
->gen
>= 6 && (src
.file
== UNIFORM
||
659 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
660 emit(BRW_OPCODE_MOV
, expanded
, src
);
664 fs_inst
*inst
= emit(opcode
, dst
, src
);
666 if (intel
->gen
< 6) {
668 inst
->mlen
= c
->dispatch_width
/ 8;
675 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
)
680 assert(opcode
== FS_OPCODE_POW
);
682 if (intel
->gen
>= 6) {
683 /* Can't do hstride == 0 args to gen6 math, so expand it out.
685 * The hardware ignores source modifiers (negate and abs) on math
686 * instructions, so we also move to a temp to set those up.
688 if (src0
.file
== UNIFORM
|| src0
.abs
|| src0
.negate
) {
689 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
690 emit(BRW_OPCODE_MOV
, expanded
, src0
);
694 if (src1
.file
== UNIFORM
|| src1
.abs
|| src1
.negate
) {
695 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
696 emit(BRW_OPCODE_MOV
, expanded
, src1
);
700 inst
= emit(opcode
, dst
, src0
, src1
);
702 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ 1), src1
);
703 inst
= emit(opcode
, dst
, src0
, reg_null_f
);
705 inst
->base_mrf
= base_mrf
;
706 inst
->mlen
= 2 * c
->dispatch_width
/ 8;
712 fs_visitor::visit(ir_variable
*ir
)
716 if (variable_storage(ir
))
719 if (strcmp(ir
->name
, "gl_FragColor") == 0) {
720 this->frag_color
= ir
;
721 } else if (strcmp(ir
->name
, "gl_FragData") == 0) {
722 this->frag_data
= ir
;
723 } else if (strcmp(ir
->name
, "gl_FragDepth") == 0) {
724 this->frag_depth
= ir
;
727 if (ir
->mode
== ir_var_in
) {
728 if (!strcmp(ir
->name
, "gl_FragCoord")) {
729 reg
= emit_fragcoord_interpolation(ir
);
730 } else if (!strcmp(ir
->name
, "gl_FrontFacing")) {
731 reg
= emit_frontfacing_interpolation(ir
);
733 reg
= emit_general_interpolation(ir
);
736 hash_table_insert(this->variable_ht
, reg
, ir
);
740 if (ir
->mode
== ir_var_uniform
) {
741 int param_index
= c
->prog_data
.nr_params
;
743 if (c
->dispatch_width
== 16) {
744 if (!variable_storage(ir
)) {
745 fail("Failed to find uniform '%s' in 16-wide\n", ir
->name
);
750 if (!strncmp(ir
->name
, "gl_", 3)) {
751 setup_builtin_uniform_values(ir
);
753 setup_uniform_values(ir
->location
, ir
->type
);
756 reg
= new(this->mem_ctx
) fs_reg(UNIFORM
, param_index
);
757 reg
->type
= brw_type_for_base_type(ir
->type
);
761 reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
763 hash_table_insert(this->variable_ht
, reg
, ir
);
767 fs_visitor::visit(ir_dereference_variable
*ir
)
769 fs_reg
*reg
= variable_storage(ir
->var
);
774 fs_visitor::visit(ir_dereference_record
*ir
)
776 const glsl_type
*struct_type
= ir
->record
->type
;
778 ir
->record
->accept(this);
780 unsigned int offset
= 0;
781 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
782 if (strcmp(struct_type
->fields
.structure
[i
].name
, ir
->field
) == 0)
784 offset
+= type_size(struct_type
->fields
.structure
[i
].type
);
786 this->result
.reg_offset
+= offset
;
787 this->result
.type
= brw_type_for_base_type(ir
->type
);
791 fs_visitor::visit(ir_dereference_array
*ir
)
796 ir
->array
->accept(this);
797 index
= ir
->array_index
->as_constant();
799 element_size
= type_size(ir
->type
);
800 this->result
.type
= brw_type_for_base_type(ir
->type
);
803 assert(this->result
.file
== UNIFORM
||
804 (this->result
.file
== GRF
&&
805 this->result
.reg
!= 0));
806 this->result
.reg_offset
+= index
->value
.i
[0] * element_size
;
808 assert(!"FINISHME: non-constant array element");
812 /* Instruction selection: Produce a MOV.sat instead of
813 * MIN(MAX(val, 0), 1) when possible.
816 fs_visitor::try_emit_saturate(ir_expression
*ir
)
818 ir_rvalue
*sat_val
= ir
->as_rvalue_to_saturate();
823 sat_val
->accept(this);
824 fs_reg src
= this->result
;
826 this->result
= fs_reg(this, ir
->type
);
827 fs_inst
*inst
= emit(BRW_OPCODE_MOV
, this->result
, src
);
828 inst
->saturate
= true;
834 brw_conditional_for_comparison(unsigned int op
)
838 return BRW_CONDITIONAL_L
;
839 case ir_binop_greater
:
840 return BRW_CONDITIONAL_G
;
841 case ir_binop_lequal
:
842 return BRW_CONDITIONAL_LE
;
843 case ir_binop_gequal
:
844 return BRW_CONDITIONAL_GE
;
846 case ir_binop_all_equal
: /* same as equal for scalars */
847 return BRW_CONDITIONAL_Z
;
848 case ir_binop_nequal
:
849 case ir_binop_any_nequal
: /* same as nequal for scalars */
850 return BRW_CONDITIONAL_NZ
;
852 assert(!"not reached: bad operation for comparison");
853 return BRW_CONDITIONAL_NZ
;
858 fs_visitor::visit(ir_expression
*ir
)
860 unsigned int operand
;
864 assert(ir
->get_num_operands() <= 2);
866 if (try_emit_saturate(ir
))
869 for (operand
= 0; operand
< ir
->get_num_operands(); operand
++) {
870 ir
->operands
[operand
]->accept(this);
871 if (this->result
.file
== BAD_FILE
) {
873 fail("Failed to get tree for expression operand:\n");
874 ir
->operands
[operand
]->accept(&v
);
876 op
[operand
] = this->result
;
878 /* Matrix expression operands should have been broken down to vector
879 * operations already.
881 assert(!ir
->operands
[operand
]->type
->is_matrix());
882 /* And then those vector operands should have been broken down to scalar.
884 assert(!ir
->operands
[operand
]->type
->is_vector());
887 /* Storage for our result. If our result goes into an assignment, it will
888 * just get copy-propagated out, so no worries.
890 this->result
= fs_reg(this, ir
->type
);
892 switch (ir
->operation
) {
893 case ir_unop_logic_not
:
894 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
895 * ones complement of the whole register, not just bit 0.
897 emit(BRW_OPCODE_XOR
, this->result
, op
[0], fs_reg(1));
900 op
[0].negate
= !op
[0].negate
;
901 this->result
= op
[0];
905 op
[0].negate
= false;
906 this->result
= op
[0];
909 temp
= fs_reg(this, ir
->type
);
911 emit(BRW_OPCODE_MOV
, this->result
, fs_reg(0.0f
));
913 inst
= emit(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
));
914 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
915 inst
= emit(BRW_OPCODE_MOV
, this->result
, fs_reg(1.0f
));
916 inst
->predicated
= true;
918 inst
= emit(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
));
919 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
920 inst
= emit(BRW_OPCODE_MOV
, this->result
, fs_reg(-1.0f
));
921 inst
->predicated
= true;
925 emit_math(FS_OPCODE_RCP
, this->result
, op
[0]);
929 emit_math(FS_OPCODE_EXP2
, this->result
, op
[0]);
932 emit_math(FS_OPCODE_LOG2
, this->result
, op
[0]);
936 assert(!"not reached: should be handled by ir_explog_to_explog2");
939 case ir_unop_sin_reduced
:
940 emit_math(FS_OPCODE_SIN
, this->result
, op
[0]);
943 case ir_unop_cos_reduced
:
944 emit_math(FS_OPCODE_COS
, this->result
, op
[0]);
948 emit(FS_OPCODE_DDX
, this->result
, op
[0]);
951 emit(FS_OPCODE_DDY
, this->result
, op
[0]);
955 emit(BRW_OPCODE_ADD
, this->result
, op
[0], op
[1]);
958 assert(!"not reached: should be handled by ir_sub_to_add_neg");
962 emit(BRW_OPCODE_MUL
, this->result
, op
[0], op
[1]);
965 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
968 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
972 case ir_binop_greater
:
973 case ir_binop_lequal
:
974 case ir_binop_gequal
:
976 case ir_binop_all_equal
:
977 case ir_binop_nequal
:
978 case ir_binop_any_nequal
:
980 /* original gen4 does implicit conversion before comparison. */
982 temp
.type
= op
[0].type
;
984 inst
= emit(BRW_OPCODE_CMP
, temp
, op
[0], op
[1]);
985 inst
->conditional_mod
= brw_conditional_for_comparison(ir
->operation
);
986 emit(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1));
989 case ir_binop_logic_xor
:
990 emit(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]);
993 case ir_binop_logic_or
:
994 emit(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]);
997 case ir_binop_logic_and
:
998 emit(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]);
1003 assert(!"not reached: should be handled by brw_fs_channel_expressions");
1007 assert(!"not reached: should be handled by lower_noise");
1010 case ir_quadop_vector
:
1011 assert(!"not reached: should be handled by lower_quadop_vector");
1015 emit_math(FS_OPCODE_SQRT
, this->result
, op
[0]);
1019 emit_math(FS_OPCODE_RSQ
, this->result
, op
[0]);
1026 emit(BRW_OPCODE_MOV
, this->result
, op
[0]);
1030 temp
= this->result
;
1031 /* original gen4 does implicit conversion before comparison. */
1033 temp
.type
= op
[0].type
;
1035 inst
= emit(BRW_OPCODE_CMP
, temp
, op
[0], fs_reg(0.0f
));
1036 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1037 inst
= emit(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(1));
1041 emit(BRW_OPCODE_RNDZ
, this->result
, op
[0]);
1044 op
[0].negate
= !op
[0].negate
;
1045 inst
= emit(BRW_OPCODE_RNDD
, this->result
, op
[0]);
1046 this->result
.negate
= true;
1049 inst
= emit(BRW_OPCODE_RNDD
, this->result
, op
[0]);
1052 inst
= emit(BRW_OPCODE_FRC
, this->result
, op
[0]);
1054 case ir_unop_round_even
:
1055 emit(BRW_OPCODE_RNDE
, this->result
, op
[0]);
1059 inst
= emit(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]);
1060 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1062 inst
= emit(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]);
1063 inst
->predicated
= true;
1066 inst
= emit(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]);
1067 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1069 inst
= emit(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]);
1070 inst
->predicated
= true;
1074 emit_math(FS_OPCODE_POW
, this->result
, op
[0], op
[1]);
1077 case ir_unop_bit_not
:
1078 inst
= emit(BRW_OPCODE_NOT
, this->result
, op
[0]);
1080 case ir_binop_bit_and
:
1081 inst
= emit(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]);
1083 case ir_binop_bit_xor
:
1084 inst
= emit(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]);
1086 case ir_binop_bit_or
:
1087 inst
= emit(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]);
1091 case ir_binop_lshift
:
1092 case ir_binop_rshift
:
1093 assert(!"GLSL 1.30 features unsupported");
1099 fs_visitor::emit_assignment_writes(fs_reg
&l
, fs_reg
&r
,
1100 const glsl_type
*type
, bool predicated
)
1102 switch (type
->base_type
) {
1103 case GLSL_TYPE_FLOAT
:
1104 case GLSL_TYPE_UINT
:
1106 case GLSL_TYPE_BOOL
:
1107 for (unsigned int i
= 0; i
< type
->components(); i
++) {
1108 l
.type
= brw_type_for_base_type(type
);
1109 r
.type
= brw_type_for_base_type(type
);
1111 fs_inst
*inst
= emit(BRW_OPCODE_MOV
, l
, r
);
1112 inst
->predicated
= predicated
;
1118 case GLSL_TYPE_ARRAY
:
1119 for (unsigned int i
= 0; i
< type
->length
; i
++) {
1120 emit_assignment_writes(l
, r
, type
->fields
.array
, predicated
);
1124 case GLSL_TYPE_STRUCT
:
1125 for (unsigned int i
= 0; i
< type
->length
; i
++) {
1126 emit_assignment_writes(l
, r
, type
->fields
.structure
[i
].type
,
1131 case GLSL_TYPE_SAMPLER
:
1135 assert(!"not reached");
1141 fs_visitor::visit(ir_assignment
*ir
)
1146 /* FINISHME: arrays on the lhs */
1147 ir
->lhs
->accept(this);
1150 ir
->rhs
->accept(this);
1153 assert(l
.file
!= BAD_FILE
);
1154 assert(r
.file
!= BAD_FILE
);
1156 if (ir
->condition
) {
1157 emit_bool_to_cond_code(ir
->condition
);
1160 if (ir
->lhs
->type
->is_scalar() ||
1161 ir
->lhs
->type
->is_vector()) {
1162 for (int i
= 0; i
< ir
->lhs
->type
->vector_elements
; i
++) {
1163 if (ir
->write_mask
& (1 << i
)) {
1164 inst
= emit(BRW_OPCODE_MOV
, l
, r
);
1166 inst
->predicated
= true;
1172 emit_assignment_writes(l
, r
, ir
->lhs
->type
, ir
->condition
!= NULL
);
1177 fs_visitor::emit_texture_gen4(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1181 bool simd16
= false;
1187 if (ir
->shadow_comparitor
) {
1188 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1189 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
), coordinate
);
1190 coordinate
.reg_offset
++;
1192 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1195 if (ir
->op
== ir_tex
) {
1196 /* There's no plain shadow compare message, so we use shadow
1197 * compare with a bias of 0.0.
1199 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), fs_reg(0.0f
));
1201 } else if (ir
->op
== ir_txb
) {
1202 ir
->lod_info
.bias
->accept(this);
1203 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1206 assert(ir
->op
== ir_txl
);
1207 ir
->lod_info
.lod
->accept(this);
1208 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1212 ir
->shadow_comparitor
->accept(this);
1213 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1215 } else if (ir
->op
== ir_tex
) {
1216 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1217 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
), coordinate
);
1218 coordinate
.reg_offset
++;
1220 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1222 } else if (ir
->op
== ir_txd
) {
1223 assert(!"TXD isn't supported on gen4 yet.");
1225 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1226 * instructions. We'll need to do SIMD16 here.
1228 assert(ir
->op
== ir_txb
|| ir
->op
== ir_txl
);
1230 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1231 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
* 2), coordinate
);
1232 coordinate
.reg_offset
++;
1235 /* lod/bias appears after u/v/r. */
1238 if (ir
->op
== ir_txb
) {
1239 ir
->lod_info
.bias
->accept(this);
1240 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1243 ir
->lod_info
.lod
->accept(this);
1244 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1248 /* The unused upper half. */
1251 /* Now, since we're doing simd16, the return is 2 interleaved
1252 * vec4s where the odd-indexed ones are junk. We'll need to move
1253 * this weirdness around to the expected layout.
1257 dst
= fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
,
1259 dst
.type
= BRW_REGISTER_TYPE_F
;
1262 fs_inst
*inst
= NULL
;
1265 inst
= emit(FS_OPCODE_TEX
, dst
);
1268 inst
= emit(FS_OPCODE_TXB
, dst
);
1271 inst
= emit(FS_OPCODE_TXL
, dst
);
1274 inst
= emit(FS_OPCODE_TXD
, dst
);
1277 assert(!"GLSL 1.30 features unsupported");
1280 inst
->base_mrf
= base_mrf
;
1284 for (int i
= 0; i
< 4; i
++) {
1285 emit(BRW_OPCODE_MOV
, orig_dst
, dst
);
1286 orig_dst
.reg_offset
++;
1287 dst
.reg_offset
+= 2;
1294 /* gen5's sampler has slots for u, v, r, array index, then optional
1295 * parameters like shadow comparitor or LOD bias. If optional
1296 * parameters aren't present, those base slots are optional and don't
1297 * need to be included in the message.
1299 * We don't fill in the unnecessary slots regardless, which may look
1300 * surprising in the disassembly.
1303 fs_visitor::emit_texture_gen5(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1305 int mlen
= 1; /* g0 header always present. */
1307 int reg_width
= c
->dispatch_width
/ 8;
1309 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1310 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
* reg_width
),
1312 coordinate
.reg_offset
++;
1314 mlen
+= ir
->coordinate
->type
->vector_elements
* reg_width
;
1316 if (ir
->shadow_comparitor
) {
1317 mlen
= MAX2(mlen
, 1 + 4 * reg_width
);
1319 ir
->shadow_comparitor
->accept(this);
1320 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1324 fs_inst
*inst
= NULL
;
1327 inst
= emit(FS_OPCODE_TEX
, dst
);
1330 ir
->lod_info
.bias
->accept(this);
1331 mlen
= MAX2(mlen
, 1 + 4 * reg_width
);
1332 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1335 inst
= emit(FS_OPCODE_TXB
, dst
);
1339 ir
->lod_info
.lod
->accept(this);
1340 mlen
= MAX2(mlen
, 1 + 4 * reg_width
);
1341 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
);
1344 inst
= emit(FS_OPCODE_TXL
, dst
);
1348 assert(!"GLSL 1.30 features unsupported");
1351 inst
->base_mrf
= base_mrf
;
1355 fail("Message length >11 disallowed by hardware\n");
1362 fs_visitor::visit(ir_texture
*ir
)
1365 fs_inst
*inst
= NULL
;
1367 ir
->coordinate
->accept(this);
1368 fs_reg coordinate
= this->result
;
1370 if (ir
->offset
!= NULL
) {
1371 ir_constant
*offset
= ir
->offset
->as_constant();
1372 assert(offset
!= NULL
);
1374 signed char offsets
[3];
1375 for (unsigned i
= 0; i
< ir
->offset
->type
->vector_elements
; i
++)
1376 offsets
[i
] = (signed char) offset
->value
.i
[i
];
1378 /* Combine all three offsets into a single unsigned dword:
1380 * bits 11:8 - U Offset (X component)
1381 * bits 7:4 - V Offset (Y component)
1382 * bits 3:0 - R Offset (Z component)
1384 unsigned offset_bits
= 0;
1385 for (unsigned i
= 0; i
< ir
->offset
->type
->vector_elements
; i
++) {
1386 const unsigned shift
= 4 * (2 - i
);
1387 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
1390 /* Explicitly set up the message header by copying g0 to msg reg m1. */
1391 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, 1, BRW_REGISTER_TYPE_UD
),
1392 fs_reg(GRF
, 0, BRW_REGISTER_TYPE_UD
));
1394 /* Then set the offset bits in DWord 2 of the message header. */
1395 emit(BRW_OPCODE_MOV
,
1396 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 1, 2),
1397 BRW_REGISTER_TYPE_UD
)),
1398 fs_reg(brw_imm_uw(offset_bits
)));
1401 /* Should be lowered by do_lower_texture_projection */
1402 assert(!ir
->projector
);
1404 sampler
= _mesa_get_sampler_uniform_value(ir
->sampler
,
1405 ctx
->Shader
.CurrentFragmentProgram
,
1406 &brw
->fragment_program
->Base
);
1407 sampler
= c
->fp
->program
.Base
.SamplerUnits
[sampler
];
1409 /* The 965 requires the EU to do the normalization of GL rectangle
1410 * texture coordinates. We use the program parameter state
1411 * tracking to get the scaling factor.
1413 if (ir
->sampler
->type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_RECT
) {
1414 struct gl_program_parameter_list
*params
= c
->fp
->program
.Base
.Parameters
;
1415 int tokens
[STATE_LENGTH
] = {
1417 STATE_TEXRECT_SCALE
,
1423 if (c
->dispatch_width
== 16) {
1424 fail("rectangle scale uniform setup not supported on 16-wide\n");
1425 this->result
= fs_reg(this, ir
->type
);
1429 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
] =
1431 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
+ 1] =
1434 fs_reg scale_x
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
);
1435 fs_reg scale_y
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
+ 1);
1436 GLuint index
= _mesa_add_state_reference(params
,
1437 (gl_state_index
*)tokens
);
1439 this->param_index
[c
->prog_data
.nr_params
] = index
;
1440 this->param_offset
[c
->prog_data
.nr_params
] = 0;
1441 c
->prog_data
.nr_params
++;
1442 this->param_index
[c
->prog_data
.nr_params
] = index
;
1443 this->param_offset
[c
->prog_data
.nr_params
] = 1;
1444 c
->prog_data
.nr_params
++;
1446 fs_reg dst
= fs_reg(this, ir
->coordinate
->type
);
1447 fs_reg src
= coordinate
;
1450 emit(BRW_OPCODE_MUL
, dst
, src
, scale_x
);
1453 emit(BRW_OPCODE_MUL
, dst
, src
, scale_y
);
1456 /* Writemasking doesn't eliminate channels on SIMD8 texture
1457 * samples, so don't worry about them.
1459 fs_reg dst
= fs_reg(this, glsl_type::vec4_type
);
1461 if (intel
->gen
< 5) {
1462 inst
= emit_texture_gen4(ir
, dst
, coordinate
);
1464 inst
= emit_texture_gen5(ir
, dst
, coordinate
);
1467 /* If there's an offset, we already set up m1. To avoid the implied move,
1468 * use the null register. Otherwise, we want an implied move from g0.
1470 if (ir
->offset
!= NULL
)
1471 inst
->src
[0] = fs_reg(brw_null_reg());
1473 inst
->src
[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
));
1475 inst
->sampler
= sampler
;
1479 if (ir
->shadow_comparitor
)
1480 inst
->shadow_compare
= true;
1482 if (ir
->type
== glsl_type::float_type
) {
1483 /* Ignore DEPTH_TEXTURE_MODE swizzling. */
1484 assert(ir
->sampler
->type
->sampler_shadow
);
1485 } else if (c
->key
.tex_swizzles
[inst
->sampler
] != SWIZZLE_NOOP
) {
1486 fs_reg swizzle_dst
= fs_reg(this, glsl_type::vec4_type
);
1488 for (int i
= 0; i
< 4; i
++) {
1489 int swiz
= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1490 fs_reg l
= swizzle_dst
;
1493 if (swiz
== SWIZZLE_ZERO
) {
1494 emit(BRW_OPCODE_MOV
, l
, fs_reg(0.0f
));
1495 } else if (swiz
== SWIZZLE_ONE
) {
1496 emit(BRW_OPCODE_MOV
, l
, fs_reg(1.0f
));
1499 r
.reg_offset
+= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1500 emit(BRW_OPCODE_MOV
, l
, r
);
1503 this->result
= swizzle_dst
;
1508 fs_visitor::visit(ir_swizzle
*ir
)
1510 ir
->val
->accept(this);
1511 fs_reg val
= this->result
;
1513 if (ir
->type
->vector_elements
== 1) {
1514 this->result
.reg_offset
+= ir
->mask
.x
;
1518 fs_reg result
= fs_reg(this, ir
->type
);
1519 this->result
= result
;
1521 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1522 fs_reg channel
= val
;
1540 channel
.reg_offset
+= swiz
;
1541 emit(BRW_OPCODE_MOV
, result
, channel
);
1542 result
.reg_offset
++;
1547 fs_visitor::visit(ir_discard
*ir
)
1549 fs_reg temp
= fs_reg(this, glsl_type::uint_type
);
1551 assert(ir
->condition
== NULL
); /* FINISHME */
1553 emit(FS_OPCODE_DISCARD_NOT
, temp
, reg_null_d
);
1554 emit(FS_OPCODE_DISCARD_AND
, reg_null_d
, temp
);
1555 kill_emitted
= true;
1559 fs_visitor::visit(ir_constant
*ir
)
1561 /* Set this->result to reg at the bottom of the function because some code
1562 * paths will cause this visitor to be applied to other fields. This will
1563 * cause the value stored in this->result to be modified.
1565 * Make reg constant so that it doesn't get accidentally modified along the
1566 * way. Yes, I actually had this problem. :(
1568 const fs_reg
reg(this, ir
->type
);
1569 fs_reg dst_reg
= reg
;
1571 if (ir
->type
->is_array()) {
1572 const unsigned size
= type_size(ir
->type
->fields
.array
);
1574 for (unsigned i
= 0; i
< ir
->type
->length
; i
++) {
1575 ir
->array_elements
[i
]->accept(this);
1576 fs_reg src_reg
= this->result
;
1578 dst_reg
.type
= src_reg
.type
;
1579 for (unsigned j
= 0; j
< size
; j
++) {
1580 emit(BRW_OPCODE_MOV
, dst_reg
, src_reg
);
1581 src_reg
.reg_offset
++;
1582 dst_reg
.reg_offset
++;
1585 } else if (ir
->type
->is_record()) {
1586 foreach_list(node
, &ir
->components
) {
1587 ir_instruction
*const field
= (ir_instruction
*) node
;
1588 const unsigned size
= type_size(field
->type
);
1590 field
->accept(this);
1591 fs_reg src_reg
= this->result
;
1593 dst_reg
.type
= src_reg
.type
;
1594 for (unsigned j
= 0; j
< size
; j
++) {
1595 emit(BRW_OPCODE_MOV
, dst_reg
, src_reg
);
1596 src_reg
.reg_offset
++;
1597 dst_reg
.reg_offset
++;
1601 const unsigned size
= type_size(ir
->type
);
1603 for (unsigned i
= 0; i
< size
; i
++) {
1604 switch (ir
->type
->base_type
) {
1605 case GLSL_TYPE_FLOAT
:
1606 emit(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.f
[i
]));
1608 case GLSL_TYPE_UINT
:
1609 emit(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.u
[i
]));
1612 emit(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.i
[i
]));
1614 case GLSL_TYPE_BOOL
:
1615 emit(BRW_OPCODE_MOV
, dst_reg
, fs_reg((int)ir
->value
.b
[i
]));
1618 assert(!"Non-float/uint/int/bool constant");
1620 dst_reg
.reg_offset
++;
1628 fs_visitor::emit_bool_to_cond_code(ir_rvalue
*ir
)
1630 ir_expression
*expr
= ir
->as_expression();
1636 assert(expr
->get_num_operands() <= 2);
1637 for (unsigned int i
= 0; i
< expr
->get_num_operands(); i
++) {
1638 assert(expr
->operands
[i
]->type
->is_scalar());
1640 expr
->operands
[i
]->accept(this);
1641 op
[i
] = this->result
;
1644 switch (expr
->operation
) {
1645 case ir_unop_logic_not
:
1646 inst
= emit(BRW_OPCODE_AND
, reg_null_d
, op
[0], fs_reg(1));
1647 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1650 case ir_binop_logic_xor
:
1651 inst
= emit(BRW_OPCODE_XOR
, reg_null_d
, op
[0], op
[1]);
1652 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1655 case ir_binop_logic_or
:
1656 inst
= emit(BRW_OPCODE_OR
, reg_null_d
, op
[0], op
[1]);
1657 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1660 case ir_binop_logic_and
:
1661 inst
= emit(BRW_OPCODE_AND
, reg_null_d
, op
[0], op
[1]);
1662 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1666 if (intel
->gen
>= 6) {
1667 inst
= emit(BRW_OPCODE_CMP
, reg_null_d
, op
[0], fs_reg(0.0f
));
1669 inst
= emit(BRW_OPCODE_MOV
, reg_null_f
, op
[0]);
1671 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1675 if (intel
->gen
>= 6) {
1676 inst
= emit(BRW_OPCODE_CMP
, reg_null_d
, op
[0], fs_reg(0));
1678 inst
= emit(BRW_OPCODE_MOV
, reg_null_d
, op
[0]);
1680 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1683 case ir_binop_greater
:
1684 case ir_binop_gequal
:
1686 case ir_binop_lequal
:
1687 case ir_binop_equal
:
1688 case ir_binop_all_equal
:
1689 case ir_binop_nequal
:
1690 case ir_binop_any_nequal
:
1691 inst
= emit(BRW_OPCODE_CMP
, reg_null_cmp
, op
[0], op
[1]);
1692 inst
->conditional_mod
=
1693 brw_conditional_for_comparison(expr
->operation
);
1697 assert(!"not reached");
1698 fail("bad cond code\n");
1706 if (intel
->gen
>= 6) {
1707 fs_inst
*inst
= emit(BRW_OPCODE_AND
, reg_null_d
, this->result
, fs_reg(1));
1708 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1710 fs_inst
*inst
= emit(BRW_OPCODE_MOV
, reg_null_d
, this->result
);
1711 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1716 * Emit a gen6 IF statement with the comparison folded into the IF
1720 fs_visitor::emit_if_gen6(ir_if
*ir
)
1722 ir_expression
*expr
= ir
->condition
->as_expression();
1729 assert(expr
->get_num_operands() <= 2);
1730 for (unsigned int i
= 0; i
< expr
->get_num_operands(); i
++) {
1731 assert(expr
->operands
[i
]->type
->is_scalar());
1733 expr
->operands
[i
]->accept(this);
1734 op
[i
] = this->result
;
1737 switch (expr
->operation
) {
1738 case ir_unop_logic_not
:
1739 inst
= emit(BRW_OPCODE_IF
, temp
, op
[0], fs_reg(0));
1740 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1743 case ir_binop_logic_xor
:
1744 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, op
[0], op
[1]);
1745 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1748 case ir_binop_logic_or
:
1749 temp
= fs_reg(this, glsl_type::bool_type
);
1750 emit(BRW_OPCODE_OR
, temp
, op
[0], op
[1]);
1751 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, temp
, fs_reg(0));
1752 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1755 case ir_binop_logic_and
:
1756 temp
= fs_reg(this, glsl_type::bool_type
);
1757 emit(BRW_OPCODE_AND
, temp
, op
[0], op
[1]);
1758 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, temp
, fs_reg(0));
1759 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1763 inst
= emit(BRW_OPCODE_IF
, reg_null_f
, op
[0], fs_reg(0));
1764 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1768 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, op
[0], fs_reg(0));
1769 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1772 case ir_binop_greater
:
1773 case ir_binop_gequal
:
1775 case ir_binop_lequal
:
1776 case ir_binop_equal
:
1777 case ir_binop_all_equal
:
1778 case ir_binop_nequal
:
1779 case ir_binop_any_nequal
:
1780 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, op
[0], op
[1]);
1781 inst
->conditional_mod
=
1782 brw_conditional_for_comparison(expr
->operation
);
1785 assert(!"not reached");
1786 inst
= emit(BRW_OPCODE_IF
, reg_null_d
, op
[0], fs_reg(0));
1787 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1788 fail("bad condition\n");
1794 ir
->condition
->accept(this);
1796 fs_inst
*inst
= emit(BRW_OPCODE_IF
, reg_null_d
, this->result
, fs_reg(0));
1797 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1801 fs_visitor::visit(ir_if
*ir
)
1805 if (c
->dispatch_width
== 16) {
1806 fail("Can't support (non-uniform) control flow on 16-wide\n");
1809 /* Don't point the annotation at the if statement, because then it plus
1810 * the then and else blocks get printed.
1812 this->base_ir
= ir
->condition
;
1814 if (intel
->gen
>= 6) {
1817 emit_bool_to_cond_code(ir
->condition
);
1819 inst
= emit(BRW_OPCODE_IF
);
1820 inst
->predicated
= true;
1823 foreach_iter(exec_list_iterator
, iter
, ir
->then_instructions
) {
1824 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1830 if (!ir
->else_instructions
.is_empty()) {
1831 emit(BRW_OPCODE_ELSE
);
1833 foreach_iter(exec_list_iterator
, iter
, ir
->else_instructions
) {
1834 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1841 emit(BRW_OPCODE_ENDIF
);
1845 fs_visitor::visit(ir_loop
*ir
)
1847 fs_reg counter
= reg_undef
;
1849 if (c
->dispatch_width
== 16) {
1850 fail("Can't support (non-uniform) control flow on 16-wide\n");
1854 this->base_ir
= ir
->counter
;
1855 ir
->counter
->accept(this);
1856 counter
= *(variable_storage(ir
->counter
));
1859 this->base_ir
= ir
->from
;
1860 ir
->from
->accept(this);
1862 emit(BRW_OPCODE_MOV
, counter
, this->result
);
1866 emit(BRW_OPCODE_DO
);
1869 this->base_ir
= ir
->to
;
1870 ir
->to
->accept(this);
1872 fs_inst
*inst
= emit(BRW_OPCODE_CMP
, reg_null_cmp
, counter
, this->result
);
1873 inst
->conditional_mod
= brw_conditional_for_comparison(ir
->cmp
);
1875 inst
= emit(BRW_OPCODE_BREAK
);
1876 inst
->predicated
= true;
1879 foreach_iter(exec_list_iterator
, iter
, ir
->body_instructions
) {
1880 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1886 if (ir
->increment
) {
1887 this->base_ir
= ir
->increment
;
1888 ir
->increment
->accept(this);
1889 emit(BRW_OPCODE_ADD
, counter
, counter
, this->result
);
1892 emit(BRW_OPCODE_WHILE
);
1896 fs_visitor::visit(ir_loop_jump
*ir
)
1899 case ir_loop_jump::jump_break
:
1900 emit(BRW_OPCODE_BREAK
);
1902 case ir_loop_jump::jump_continue
:
1903 emit(BRW_OPCODE_CONTINUE
);
1909 fs_visitor::visit(ir_call
*ir
)
1911 assert(!"FINISHME");
1915 fs_visitor::visit(ir_return
*ir
)
1917 assert(!"FINISHME");
1921 fs_visitor::visit(ir_function
*ir
)
1923 /* Ignore function bodies other than main() -- we shouldn't see calls to
1924 * them since they should all be inlined before we get to ir_to_mesa.
1926 if (strcmp(ir
->name
, "main") == 0) {
1927 const ir_function_signature
*sig
;
1930 sig
= ir
->matching_signature(&empty
);
1934 foreach_iter(exec_list_iterator
, iter
, sig
->body
) {
1935 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1944 fs_visitor::visit(ir_function_signature
*ir
)
1946 assert(!"not reached");
1951 fs_visitor::emit(fs_inst inst
)
1953 fs_inst
*list_inst
= new(mem_ctx
) fs_inst
;
1956 if (force_uncompressed_stack
> 0)
1957 list_inst
->force_uncompressed
= true;
1958 else if (force_sechalf_stack
> 0)
1959 list_inst
->force_sechalf
= true;
1961 list_inst
->annotation
= this->current_annotation
;
1962 list_inst
->ir
= this->base_ir
;
1964 this->instructions
.push_tail(list_inst
);
1969 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1971 fs_visitor::emit_dummy_fs()
1973 /* Everyone's favorite color. */
1974 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, 2), fs_reg(1.0f
));
1975 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, 3), fs_reg(0.0f
));
1976 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, 4), fs_reg(1.0f
));
1977 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, 5), fs_reg(0.0f
));
1980 write
= emit(FS_OPCODE_FB_WRITE
, fs_reg(0), fs_reg(0));
1981 write
->base_mrf
= 0;
1984 /* The register location here is relative to the start of the URB
1985 * data. It will get adjusted to be a real location before
1986 * generate_code() time.
1989 fs_visitor::interp_reg(int location
, int channel
)
1991 int regnr
= urb_setup
[location
] * 2 + channel
/ 2;
1992 int stride
= (channel
& 1) * 4;
1994 assert(urb_setup
[location
] != -1);
1996 return brw_vec1_grf(regnr
, stride
);
1999 /** Emits the interpolation for the varying inputs. */
2001 fs_visitor::emit_interpolation_setup_gen4()
2003 this->current_annotation
= "compute pixel centers";
2004 this->pixel_x
= fs_reg(this, glsl_type::uint_type
);
2005 this->pixel_y
= fs_reg(this, glsl_type::uint_type
);
2006 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
2007 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
2009 emit(FS_OPCODE_PIXEL_X
, this->pixel_x
);
2010 emit(FS_OPCODE_PIXEL_Y
, this->pixel_y
);
2012 this->current_annotation
= "compute pixel deltas from v0";
2014 this->delta_x
= fs_reg(this, glsl_type::vec2_type
);
2015 this->delta_y
= this->delta_x
;
2016 this->delta_y
.reg_offset
++;
2018 this->delta_x
= fs_reg(this, glsl_type::float_type
);
2019 this->delta_y
= fs_reg(this, glsl_type::float_type
);
2021 emit(BRW_OPCODE_ADD
, this->delta_x
,
2022 this->pixel_x
, fs_reg(negate(brw_vec1_grf(1, 0))));
2023 emit(BRW_OPCODE_ADD
, this->delta_y
,
2024 this->pixel_y
, fs_reg(negate(brw_vec1_grf(1, 1))));
2026 this->current_annotation
= "compute pos.w and 1/pos.w";
2027 /* Compute wpos.w. It's always in our setup, since it's needed to
2028 * interpolate the other attributes.
2030 this->wpos_w
= fs_reg(this, glsl_type::float_type
);
2031 emit(FS_OPCODE_LINTERP
, wpos_w
, this->delta_x
, this->delta_y
,
2032 interp_reg(FRAG_ATTRIB_WPOS
, 3));
2033 /* Compute the pixel 1/W value from wpos.w. */
2034 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
2035 emit_math(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
);
2036 this->current_annotation
= NULL
;
2039 /** Emits the interpolation for the varying inputs. */
2041 fs_visitor::emit_interpolation_setup_gen6()
2043 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
2045 /* If the pixel centers end up used, the setup is the same as for gen4. */
2046 this->current_annotation
= "compute pixel centers";
2047 fs_reg int_pixel_x
= fs_reg(this, glsl_type::uint_type
);
2048 fs_reg int_pixel_y
= fs_reg(this, glsl_type::uint_type
);
2049 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
2050 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
2051 emit(BRW_OPCODE_ADD
,
2053 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
2054 fs_reg(brw_imm_v(0x10101010)));
2055 emit(BRW_OPCODE_ADD
,
2057 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
2058 fs_reg(brw_imm_v(0x11001100)));
2060 /* As of gen6, we can no longer mix float and int sources. We have
2061 * to turn the integer pixel centers into floats for their actual
2064 this->pixel_x
= fs_reg(this, glsl_type::float_type
);
2065 this->pixel_y
= fs_reg(this, glsl_type::float_type
);
2066 emit(BRW_OPCODE_MOV
, this->pixel_x
, int_pixel_x
);
2067 emit(BRW_OPCODE_MOV
, this->pixel_y
, int_pixel_y
);
2069 this->current_annotation
= "compute pos.w";
2070 this->pixel_w
= fs_reg(brw_vec8_grf(c
->source_w_reg
, 0));
2071 this->wpos_w
= fs_reg(this, glsl_type::float_type
);
2072 emit_math(FS_OPCODE_RCP
, this->wpos_w
, this->pixel_w
);
2074 this->delta_x
= fs_reg(brw_vec8_grf(2, 0));
2075 this->delta_y
= fs_reg(brw_vec8_grf(3, 0));
2077 this->current_annotation
= NULL
;
2081 fs_visitor::emit_color_write(int index
, int first_color_mrf
, fs_reg color
)
2083 int reg_width
= c
->dispatch_width
/ 8;
2085 if (c
->dispatch_width
== 8 || intel
->gen
== 6) {
2086 /* SIMD8 write looks like:
2092 * gen6 SIMD16 DP write looks like:
2102 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, first_color_mrf
+ index
* reg_width
),
2105 /* pre-gen6 SIMD16 single source DP write looks like:
2115 if (brw
->has_compr4
) {
2116 /* By setting the high bit of the MRF register number, we
2117 * indicate that we want COMPR4 mode - instead of doing the
2118 * usual destination + 1 for the second half we get
2121 emit(BRW_OPCODE_MOV
,
2122 fs_reg(MRF
, BRW_MRF_COMPR4
+ first_color_mrf
+ index
), color
);
2124 push_force_uncompressed();
2125 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, first_color_mrf
+ index
), color
);
2126 pop_force_uncompressed();
2128 push_force_sechalf();
2129 color
.sechalf
= true;
2130 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, first_color_mrf
+ index
+ 4), color
);
2131 pop_force_sechalf();
2132 color
.sechalf
= false;
2138 fs_visitor::emit_fb_writes()
2140 this->current_annotation
= "FB write header";
2141 GLboolean header_present
= GL_TRUE
;
2143 int reg_width
= c
->dispatch_width
/ 8;
2145 if (intel
->gen
>= 6 &&
2146 !this->kill_emitted
&&
2147 c
->key
.nr_color_regions
== 1) {
2148 header_present
= false;
2151 if (header_present
) {
2156 if (c
->aa_dest_stencil_reg
) {
2157 push_force_uncompressed();
2158 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
2159 fs_reg(brw_vec8_grf(c
->aa_dest_stencil_reg
, 0)));
2160 pop_force_uncompressed();
2163 /* Reserve space for color. It'll be filled in per MRT below. */
2165 nr
+= 4 * reg_width
;
2167 if (c
->source_depth_to_render_target
) {
2168 if (intel
->gen
== 6 && c
->dispatch_width
== 16) {
2169 /* For outputting oDepth on gen6, SIMD8 writes have to be
2170 * used. This would require 8-wide moves of each half to
2171 * message regs, kind of like pre-gen5 SIMD16 FB writes.
2172 * Just bail on doing so for now.
2174 fail("Missing support for simd16 depth writes on gen6\n");
2177 if (c
->computes_depth
) {
2178 /* Hand over gl_FragDepth. */
2179 assert(this->frag_depth
);
2180 fs_reg depth
= *(variable_storage(this->frag_depth
));
2182 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
), depth
);
2184 /* Pass through the payload depth. */
2185 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
),
2186 fs_reg(brw_vec8_grf(c
->source_depth_reg
, 0)));
2191 if (c
->dest_depth_reg
) {
2192 emit(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
),
2193 fs_reg(brw_vec8_grf(c
->dest_depth_reg
, 0)));
2197 fs_reg color
= reg_undef
;
2198 if (this->frag_color
)
2199 color
= *(variable_storage(this->frag_color
));
2200 else if (this->frag_data
) {
2201 color
= *(variable_storage(this->frag_data
));
2202 color
.type
= BRW_REGISTER_TYPE_F
;
2205 for (int target
= 0; target
< c
->key
.nr_color_regions
; target
++) {
2206 this->current_annotation
= ralloc_asprintf(this->mem_ctx
,
2207 "FB write target %d",
2209 if (this->frag_color
|| this->frag_data
) {
2210 for (int i
= 0; i
< 4; i
++) {
2211 emit_color_write(i
, color_mrf
, color
);
2216 if (this->frag_color
)
2217 color
.reg_offset
-= 4;
2219 fs_inst
*inst
= emit(FS_OPCODE_FB_WRITE
);
2220 inst
->target
= target
;
2223 if (target
== c
->key
.nr_color_regions
- 1)
2225 inst
->header_present
= header_present
;
2228 if (c
->key
.nr_color_regions
== 0) {
2229 if (c
->key
.alpha_test
&& (this->frag_color
|| this->frag_data
)) {
2230 /* If the alpha test is enabled but there's no color buffer,
2231 * we still need to send alpha out the pipeline to our null
2234 color
.reg_offset
+= 3;
2235 emit_color_write(3, color_mrf
, color
);
2238 fs_inst
*inst
= emit(FS_OPCODE_FB_WRITE
);
2242 inst
->header_present
= header_present
;
2245 this->current_annotation
= NULL
;
2249 fs_visitor::generate_fb_write(fs_inst
*inst
)
2251 GLboolean eot
= inst
->eot
;
2252 struct brw_reg implied_header
;
2254 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
2257 brw_push_insn_state(p
);
2258 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2259 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2261 if (inst
->header_present
) {
2262 if (intel
->gen
>= 6) {
2264 brw_message_reg(inst
->base_mrf
),
2265 brw_vec8_grf(0, 0));
2267 if (inst
->target
> 0) {
2268 /* Set the render target index for choosing BLEND_STATE. */
2269 brw_MOV(p
, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 0, 2),
2270 BRW_REGISTER_TYPE_UD
),
2271 brw_imm_ud(inst
->target
));
2274 /* Clear viewport index, render target array index. */
2275 brw_AND(p
, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 0, 0),
2276 BRW_REGISTER_TYPE_UD
),
2277 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD
),
2278 brw_imm_ud(0xf7ff));
2280 implied_header
= brw_null_reg();
2282 implied_header
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
2286 brw_message_reg(inst
->base_mrf
+ 1),
2287 brw_vec8_grf(1, 0));
2289 implied_header
= brw_null_reg();
2292 brw_pop_insn_state(p
);
2302 inst
->header_present
);
2305 /* Computes the integer pixel x,y values from the origin.
2307 * This is the basis of gl_FragCoord computation, but is also used
2308 * pre-gen6 for computing the deltas from v0 for computing
2312 fs_visitor::generate_pixel_xy(struct brw_reg dst
, bool is_x
)
2314 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
2316 struct brw_reg deltas
;
2319 src
= stride(suboffset(g1_uw
, 4), 2, 4, 0);
2320 deltas
= brw_imm_v(0x10101010);
2322 src
= stride(suboffset(g1_uw
, 5), 2, 4, 0);
2323 deltas
= brw_imm_v(0x11001100);
2326 if (c
->dispatch_width
== 16) {
2330 /* We do this 8 or 16-wide, but since the destination is UW we
2331 * don't do compression in the 16-wide case.
2333 brw_push_insn_state(p
);
2334 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2335 brw_ADD(p
, dst
, src
, deltas
);
2336 brw_pop_insn_state(p
);
2340 fs_visitor::generate_linterp(fs_inst
*inst
,
2341 struct brw_reg dst
, struct brw_reg
*src
)
2343 struct brw_reg delta_x
= src
[0];
2344 struct brw_reg delta_y
= src
[1];
2345 struct brw_reg interp
= src
[2];
2348 delta_y
.nr
== delta_x
.nr
+ 1 &&
2349 (intel
->gen
>= 6 || (delta_x
.nr
& 1) == 0)) {
2350 brw_PLN(p
, dst
, interp
, delta_x
);
2352 brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
2353 brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
2358 fs_visitor::generate_math(fs_inst
*inst
,
2359 struct brw_reg dst
, struct brw_reg
*src
)
2363 switch (inst
->opcode
) {
2365 op
= BRW_MATH_FUNCTION_INV
;
2368 op
= BRW_MATH_FUNCTION_RSQ
;
2370 case FS_OPCODE_SQRT
:
2371 op
= BRW_MATH_FUNCTION_SQRT
;
2373 case FS_OPCODE_EXP2
:
2374 op
= BRW_MATH_FUNCTION_EXP
;
2376 case FS_OPCODE_LOG2
:
2377 op
= BRW_MATH_FUNCTION_LOG
;
2380 op
= BRW_MATH_FUNCTION_POW
;
2383 op
= BRW_MATH_FUNCTION_SIN
;
2386 op
= BRW_MATH_FUNCTION_COS
;
2389 assert(!"not reached: unknown math function");
2394 if (intel
->gen
>= 6) {
2395 assert(inst
->mlen
== 0);
2397 if (inst
->opcode
== FS_OPCODE_POW
) {
2398 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2399 brw_math2(p
, dst
, op
, src
[0], src
[1]);
2401 if (c
->dispatch_width
== 16) {
2402 brw_set_compression_control(p
, BRW_COMPRESSION_2NDHALF
);
2403 brw_math2(p
, sechalf(dst
), op
, sechalf(src
[0]), sechalf(src
[1]));
2404 brw_set_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
2407 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2410 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2411 BRW_MATH_SATURATE_NONE
,
2413 BRW_MATH_DATA_VECTOR
,
2414 BRW_MATH_PRECISION_FULL
);
2416 if (c
->dispatch_width
== 16) {
2417 brw_set_compression_control(p
, BRW_COMPRESSION_2NDHALF
);
2418 brw_math(p
, sechalf(dst
),
2420 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2421 BRW_MATH_SATURATE_NONE
,
2423 BRW_MATH_DATA_VECTOR
,
2424 BRW_MATH_PRECISION_FULL
);
2425 brw_set_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
2428 } else /* gen <= 5 */{
2429 assert(inst
->mlen
>= 1);
2431 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2434 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2435 BRW_MATH_SATURATE_NONE
,
2436 inst
->base_mrf
, src
[0],
2437 BRW_MATH_DATA_VECTOR
,
2438 BRW_MATH_PRECISION_FULL
);
2440 if (c
->dispatch_width
== 16) {
2441 brw_set_compression_control(p
, BRW_COMPRESSION_2NDHALF
);
2442 brw_math(p
, sechalf(dst
),
2444 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2445 BRW_MATH_SATURATE_NONE
,
2446 inst
->base_mrf
+ 1, sechalf(src
[0]),
2447 BRW_MATH_DATA_VECTOR
,
2448 BRW_MATH_PRECISION_FULL
);
2450 brw_set_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
2456 fs_visitor::generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2460 uint32_t simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
2462 if (c
->dispatch_width
== 16) {
2465 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2468 if (intel
->gen
>= 5) {
2469 switch (inst
->opcode
) {
2471 if (inst
->shadow_compare
) {
2472 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE
;
2474 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE
;
2478 if (inst
->shadow_compare
) {
2479 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE
;
2481 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS
;
2485 if (inst
->shadow_compare
) {
2486 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE
;
2488 msg_type
= GEN5_SAMPLER_MESSAGE_SAMPLE_LOD
;
2492 assert(!"TXD isn't supported on gen5+ yet.");
2496 switch (inst
->opcode
) {
2498 /* Note that G45 and older determines shadow compare and dispatch width
2499 * from message length for most messages.
2501 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
2502 if (inst
->shadow_compare
) {
2503 assert(inst
->mlen
== 6);
2505 assert(inst
->mlen
<= 4);
2509 if (inst
->shadow_compare
) {
2510 assert(inst
->mlen
== 6);
2511 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE
;
2513 assert(inst
->mlen
== 9);
2514 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
2515 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2519 if (inst
->shadow_compare
) {
2520 assert(inst
->mlen
== 6);
2521 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE
;
2523 assert(inst
->mlen
== 9);
2524 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD
;
2525 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2529 assert(!"TXD isn't supported on gen4 yet.");
2533 assert(msg_type
!= -1);
2536 retype(dst
, BRW_REGISTER_TYPE_UW
),
2539 SURF_INDEX_TEXTURE(inst
->sampler
),
2551 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2554 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2556 * and we're trying to produce:
2559 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2560 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2561 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2562 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2563 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2564 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2565 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2566 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2568 * and add another set of two more subspans if in 16-pixel dispatch mode.
2570 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2571 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2572 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2573 * between each other. We could probably do it like ddx and swizzle the right
2574 * order later, but bail for now and just produce
2575 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2578 fs_visitor::generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2580 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 1,
2581 BRW_REGISTER_TYPE_F
,
2582 BRW_VERTICAL_STRIDE_2
,
2584 BRW_HORIZONTAL_STRIDE_0
,
2585 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2586 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
2587 BRW_REGISTER_TYPE_F
,
2588 BRW_VERTICAL_STRIDE_2
,
2590 BRW_HORIZONTAL_STRIDE_0
,
2591 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2592 brw_ADD(p
, dst
, src0
, negate(src1
));
2596 fs_visitor::generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2598 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
2599 BRW_REGISTER_TYPE_F
,
2600 BRW_VERTICAL_STRIDE_4
,
2602 BRW_HORIZONTAL_STRIDE_0
,
2603 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2604 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 2,
2605 BRW_REGISTER_TYPE_F
,
2606 BRW_VERTICAL_STRIDE_4
,
2608 BRW_HORIZONTAL_STRIDE_0
,
2609 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2610 brw_ADD(p
, dst
, src0
, negate(src1
));
2614 fs_visitor::generate_discard_not(fs_inst
*inst
, struct brw_reg mask
)
2616 if (intel
->gen
>= 6) {
2617 /* Gen6 no longer has the mask reg for us to just read the
2618 * active channels from. However, cmp updates just the channels
2619 * of the flag reg that are enabled, so we can get at the
2620 * channel enables that way. In this step, make a reg of ones
2623 brw_MOV(p
, mask
, brw_imm_ud(1));
2625 brw_push_insn_state(p
);
2626 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2627 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2628 brw_NOT(p
, mask
, brw_mask_reg(1)); /* IMASK */
2629 brw_pop_insn_state(p
);
2634 fs_visitor::generate_discard_and(fs_inst
*inst
, struct brw_reg mask
)
2636 if (intel
->gen
>= 6) {
2637 struct brw_reg f0
= brw_flag_reg();
2638 struct brw_reg g1
= retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW
);
2640 brw_push_insn_state(p
);
2641 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2642 brw_MOV(p
, f0
, brw_imm_uw(0xffff)); /* inactive channels undiscarded */
2643 brw_pop_insn_state(p
);
2645 brw_CMP(p
, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
),
2646 BRW_CONDITIONAL_Z
, mask
, brw_imm_ud(0)); /* active channels fail test */
2647 /* Undo CMP's whacking of predication*/
2648 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
2650 brw_push_insn_state(p
);
2651 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2652 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2653 brw_AND(p
, g1
, f0
, g1
);
2654 brw_pop_insn_state(p
);
2656 struct brw_reg g0
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
2658 mask
= brw_uw1_reg(mask
.file
, mask
.nr
, 0);
2660 brw_push_insn_state(p
);
2661 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2662 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2663 brw_AND(p
, g0
, mask
, g0
);
2664 brw_pop_insn_state(p
);
2669 fs_visitor::generate_spill(fs_inst
*inst
, struct brw_reg src
)
2671 assert(inst
->mlen
!= 0);
2674 retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_UD
),
2675 retype(src
, BRW_REGISTER_TYPE_UD
));
2676 brw_oword_block_write_scratch(p
, brw_message_reg(inst
->base_mrf
), 1,
2681 fs_visitor::generate_unspill(fs_inst
*inst
, struct brw_reg dst
)
2683 assert(inst
->mlen
!= 0);
2685 /* Clear any post destination dependencies that would be ignored by
2686 * the block read. See the B-Spec for pre-gen5 send instruction.
2688 * This could use a better solution, since texture sampling and
2689 * math reads could potentially run into it as well -- anywhere
2690 * that we have a SEND with a destination that is a register that
2691 * was written but not read within the last N instructions (what's
2692 * N? unsure). This is rare because of dead code elimination, but
2695 if (intel
->gen
== 4 && !intel
->is_g4x
)
2696 brw_MOV(p
, brw_null_reg(), dst
);
2698 brw_oword_block_read_scratch(p
, dst
, brw_message_reg(inst
->base_mrf
), 1,
2701 if (intel
->gen
== 4 && !intel
->is_g4x
) {
2702 /* gen4 errata: destination from a send can't be used as a
2703 * destination until it's been read. Just read it so we don't
2706 brw_MOV(p
, brw_null_reg(), dst
);
2712 fs_visitor::generate_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
)
2714 assert(inst
->mlen
!= 0);
2716 /* Clear any post destination dependencies that would be ignored by
2717 * the block read. See the B-Spec for pre-gen5 send instruction.
2719 * This could use a better solution, since texture sampling and
2720 * math reads could potentially run into it as well -- anywhere
2721 * that we have a SEND with a destination that is a register that
2722 * was written but not read within the last N instructions (what's
2723 * N? unsure). This is rare because of dead code elimination, but
2726 if (intel
->gen
== 4 && !intel
->is_g4x
)
2727 brw_MOV(p
, brw_null_reg(), dst
);
2729 brw_oword_block_read(p
, dst
, brw_message_reg(inst
->base_mrf
),
2730 inst
->offset
, SURF_INDEX_FRAG_CONST_BUFFER
);
2732 if (intel
->gen
== 4 && !intel
->is_g4x
) {
2733 /* gen4 errata: destination from a send can't be used as a
2734 * destination until it's been read. Just read it so we don't
2737 brw_MOV(p
, brw_null_reg(), dst
);
2742 * To be called after the last _mesa_add_state_reference() call, to
2743 * set up prog_data.param[] for assign_curb_setup() and
2744 * setup_pull_constants().
2747 fs_visitor::setup_paramvalues_refs()
2749 if (c
->dispatch_width
!= 8)
2752 /* Set up the pointers to ParamValues now that that array is finalized. */
2753 for (unsigned int i
= 0; i
< c
->prog_data
.nr_params
; i
++) {
2754 c
->prog_data
.param
[i
] =
2755 fp
->Base
.Parameters
->ParameterValues
[this->param_index
[i
]] +
2756 this->param_offset
[i
];
2761 fs_visitor::assign_curb_setup()
2763 c
->prog_data
.curb_read_length
= ALIGN(c
->prog_data
.nr_params
, 8) / 8;
2764 if (c
->dispatch_width
== 8) {
2765 c
->prog_data
.first_curbe_grf
= c
->nr_payload_regs
;
2767 c
->prog_data
.first_curbe_grf_16
= c
->nr_payload_regs
;
2770 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2771 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2772 fs_inst
*inst
= (fs_inst
*)iter
.get();
2774 for (unsigned int i
= 0; i
< 3; i
++) {
2775 if (inst
->src
[i
].file
== UNIFORM
) {
2776 int constant_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
2777 struct brw_reg brw_reg
= brw_vec1_grf(c
->nr_payload_regs
+
2781 inst
->src
[i
].file
= FIXED_HW_REG
;
2782 inst
->src
[i
].fixed_hw_reg
= retype(brw_reg
, inst
->src
[i
].type
);
2789 fs_visitor::calculate_urb_setup()
2791 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2796 /* Figure out where each of the incoming setup attributes lands. */
2797 if (intel
->gen
>= 6) {
2798 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2799 if (brw
->fragment_program
->Base
.InputsRead
& BITFIELD64_BIT(i
)) {
2800 urb_setup
[i
] = urb_next
++;
2804 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2805 for (unsigned int i
= 0; i
< VERT_RESULT_MAX
; i
++) {
2806 if (c
->key
.vp_outputs_written
& BITFIELD64_BIT(i
)) {
2809 if (i
>= VERT_RESULT_VAR0
)
2810 fp_index
= i
- (VERT_RESULT_VAR0
- FRAG_ATTRIB_VAR0
);
2811 else if (i
<= VERT_RESULT_TEX7
)
2817 urb_setup
[fp_index
] = urb_next
++;
2822 /* Each attribute is 4 setup channels, each of which is half a reg. */
2823 c
->prog_data
.urb_read_length
= urb_next
* 2;
2827 fs_visitor::assign_urb_setup()
2829 int urb_start
= c
->nr_payload_regs
+ c
->prog_data
.curb_read_length
;
2831 /* Offset all the urb_setup[] index by the actual position of the
2832 * setup regs, now that the location of the constants has been chosen.
2834 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2835 fs_inst
*inst
= (fs_inst
*)iter
.get();
2837 if (inst
->opcode
== FS_OPCODE_LINTERP
) {
2838 assert(inst
->src
[2].file
== FIXED_HW_REG
);
2839 inst
->src
[2].fixed_hw_reg
.nr
+= urb_start
;
2842 if (inst
->opcode
== FS_OPCODE_CINTERP
) {
2843 assert(inst
->src
[0].file
== FIXED_HW_REG
);
2844 inst
->src
[0].fixed_hw_reg
.nr
+= urb_start
;
2848 this->first_non_payload_grf
= urb_start
+ c
->prog_data
.urb_read_length
;
2852 * Split large virtual GRFs into separate components if we can.
2854 * This is mostly duplicated with what brw_fs_vector_splitting does,
2855 * but that's really conservative because it's afraid of doing
2856 * splitting that doesn't result in real progress after the rest of
2857 * the optimization phases, which would cause infinite looping in
2858 * optimization. We can do it once here, safely. This also has the
2859 * opportunity to split interpolated values, or maybe even uniforms,
2860 * which we don't have at the IR level.
2862 * We want to split, because virtual GRFs are what we register
2863 * allocate and spill (due to contiguousness requirements for some
2864 * instructions), and they're what we naturally generate in the
2865 * codegen process, but most virtual GRFs don't actually need to be
2866 * contiguous sets of GRFs. If we split, we'll end up with reduced
2867 * live intervals and better dead code elimination and coalescing.
2870 fs_visitor::split_virtual_grfs()
2872 int num_vars
= this->virtual_grf_next
;
2873 bool split_grf
[num_vars
];
2874 int new_virtual_grf
[num_vars
];
2876 /* Try to split anything > 0 sized. */
2877 for (int i
= 0; i
< num_vars
; i
++) {
2878 if (this->virtual_grf_sizes
[i
] != 1)
2879 split_grf
[i
] = true;
2881 split_grf
[i
] = false;
2885 /* PLN opcodes rely on the delta_xy being contiguous. */
2886 split_grf
[this->delta_x
.reg
] = false;
2889 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2890 fs_inst
*inst
= (fs_inst
*)iter
.get();
2892 /* Texturing produces 4 contiguous registers, so no splitting. */
2893 if (inst
->is_tex()) {
2894 split_grf
[inst
->dst
.reg
] = false;
2898 /* Allocate new space for split regs. Note that the virtual
2899 * numbers will be contiguous.
2901 for (int i
= 0; i
< num_vars
; i
++) {
2903 new_virtual_grf
[i
] = virtual_grf_alloc(1);
2904 for (int j
= 2; j
< this->virtual_grf_sizes
[i
]; j
++) {
2905 int reg
= virtual_grf_alloc(1);
2906 assert(reg
== new_virtual_grf
[i
] + j
- 1);
2909 this->virtual_grf_sizes
[i
] = 1;
2913 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2914 fs_inst
*inst
= (fs_inst
*)iter
.get();
2916 if (inst
->dst
.file
== GRF
&&
2917 split_grf
[inst
->dst
.reg
] &&
2918 inst
->dst
.reg_offset
!= 0) {
2919 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
2920 inst
->dst
.reg_offset
- 1);
2921 inst
->dst
.reg_offset
= 0;
2923 for (int i
= 0; i
< 3; i
++) {
2924 if (inst
->src
[i
].file
== GRF
&&
2925 split_grf
[inst
->src
[i
].reg
] &&
2926 inst
->src
[i
].reg_offset
!= 0) {
2927 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
2928 inst
->src
[i
].reg_offset
- 1);
2929 inst
->src
[i
].reg_offset
= 0;
2933 this->live_intervals_valid
= false;
2937 * Choose accesses from the UNIFORM file to demote to using the pull
2940 * We allow a fragment shader to have more than the specified minimum
2941 * maximum number of fragment shader uniform components (64). If
2942 * there are too many of these, they'd fill up all of register space.
2943 * So, this will push some of them out to the pull constant buffer and
2944 * update the program to load them.
2947 fs_visitor::setup_pull_constants()
2949 /* Only allow 16 registers (128 uniform components) as push constants. */
2950 unsigned int max_uniform_components
= 16 * 8;
2951 if (c
->prog_data
.nr_params
<= max_uniform_components
)
2954 if (c
->dispatch_width
== 16) {
2955 fail("Pull constants not supported in 16-wide\n");
2959 /* Just demote the end of the list. We could probably do better
2960 * here, demoting things that are rarely used in the program first.
2962 int pull_uniform_base
= max_uniform_components
;
2963 int pull_uniform_count
= c
->prog_data
.nr_params
- pull_uniform_base
;
2965 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2966 fs_inst
*inst
= (fs_inst
*)iter
.get();
2968 for (int i
= 0; i
< 3; i
++) {
2969 if (inst
->src
[i
].file
!= UNIFORM
)
2972 int uniform_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
2973 if (uniform_nr
< pull_uniform_base
)
2976 fs_reg dst
= fs_reg(this, glsl_type::float_type
);
2977 fs_inst
*pull
= new(mem_ctx
) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD
,
2979 pull
->offset
= ((uniform_nr
- pull_uniform_base
) * 4) & ~15;
2980 pull
->ir
= inst
->ir
;
2981 pull
->annotation
= inst
->annotation
;
2982 pull
->base_mrf
= 14;
2985 inst
->insert_before(pull
);
2987 inst
->src
[i
].file
= GRF
;
2988 inst
->src
[i
].reg
= dst
.reg
;
2989 inst
->src
[i
].reg_offset
= 0;
2990 inst
->src
[i
].smear
= (uniform_nr
- pull_uniform_base
) & 3;
2994 for (int i
= 0; i
< pull_uniform_count
; i
++) {
2995 c
->prog_data
.pull_param
[i
] = c
->prog_data
.param
[pull_uniform_base
+ i
];
2996 c
->prog_data
.pull_param_convert
[i
] =
2997 c
->prog_data
.param_convert
[pull_uniform_base
+ i
];
2999 c
->prog_data
.nr_params
-= pull_uniform_count
;
3000 c
->prog_data
.nr_pull_params
= pull_uniform_count
;
3004 fs_visitor::calculate_live_intervals()
3006 int num_vars
= this->virtual_grf_next
;
3007 int *def
= ralloc_array(mem_ctx
, int, num_vars
);
3008 int *use
= ralloc_array(mem_ctx
, int, num_vars
);
3011 int bb_header_ip
= 0;
3013 if (this->live_intervals_valid
)
3016 for (int i
= 0; i
< num_vars
; i
++) {
3017 def
[i
] = MAX_INSTRUCTION
;
3022 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3023 fs_inst
*inst
= (fs_inst
*)iter
.get();
3025 if (inst
->opcode
== BRW_OPCODE_DO
) {
3026 if (loop_depth
++ == 0)
3028 } else if (inst
->opcode
== BRW_OPCODE_WHILE
) {
3031 if (loop_depth
== 0) {
3032 /* Patches up the use of vars marked for being live across
3035 for (int i
= 0; i
< num_vars
; i
++) {
3036 if (use
[i
] == loop_start
) {
3042 for (unsigned int i
= 0; i
< 3; i
++) {
3043 if (inst
->src
[i
].file
== GRF
&& inst
->src
[i
].reg
!= 0) {
3044 int reg
= inst
->src
[i
].reg
;
3049 def
[reg
] = MIN2(loop_start
, def
[reg
]);
3050 use
[reg
] = loop_start
;
3052 /* Nobody else is going to go smash our start to
3053 * later in the loop now, because def[reg] now
3054 * points before the bb header.
3059 if (inst
->dst
.file
== GRF
&& inst
->dst
.reg
!= 0) {
3060 int reg
= inst
->dst
.reg
;
3063 def
[reg
] = MIN2(def
[reg
], ip
);
3065 def
[reg
] = MIN2(def
[reg
], loop_start
);
3072 /* Set the basic block header IP. This is used for determining
3073 * if a complete def of single-register virtual GRF in a loop
3074 * dominates a use in the same basic block. It's a quick way to
3075 * reduce the live interval range of most register used in a
3078 if (inst
->opcode
== BRW_OPCODE_IF
||
3079 inst
->opcode
== BRW_OPCODE_ELSE
||
3080 inst
->opcode
== BRW_OPCODE_ENDIF
||
3081 inst
->opcode
== BRW_OPCODE_DO
||
3082 inst
->opcode
== BRW_OPCODE_WHILE
||
3083 inst
->opcode
== BRW_OPCODE_BREAK
||
3084 inst
->opcode
== BRW_OPCODE_CONTINUE
) {
3089 ralloc_free(this->virtual_grf_def
);
3090 ralloc_free(this->virtual_grf_use
);
3091 this->virtual_grf_def
= def
;
3092 this->virtual_grf_use
= use
;
3094 this->live_intervals_valid
= true;
3098 * Attempts to move immediate constants into the immediate
3099 * constant slot of following instructions.
3101 * Immediate constants are a bit tricky -- they have to be in the last
3102 * operand slot, you can't do abs/negate on them,
3106 fs_visitor::propagate_constants()
3108 bool progress
= false;
3110 calculate_live_intervals();
3112 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3113 fs_inst
*inst
= (fs_inst
*)iter
.get();
3115 if (inst
->opcode
!= BRW_OPCODE_MOV
||
3117 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= IMM
||
3118 inst
->dst
.type
!= inst
->src
[0].type
||
3119 (c
->dispatch_width
== 16 &&
3120 (inst
->force_uncompressed
|| inst
->force_sechalf
)))
3123 /* Don't bother with cases where we should have had the
3124 * operation on the constant folded in GLSL already.
3129 /* Found a move of a constant to a GRF. Find anything else using the GRF
3130 * before it's written, and replace it with the constant if we can.
3132 exec_list_iterator scan_iter
= iter
;
3134 for (; scan_iter
.has_next(); scan_iter
.next()) {
3135 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
3137 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
3138 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
3139 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
3140 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
3144 for (int i
= 2; i
>= 0; i
--) {
3145 if (scan_inst
->src
[i
].file
!= GRF
||
3146 scan_inst
->src
[i
].reg
!= inst
->dst
.reg
||
3147 scan_inst
->src
[i
].reg_offset
!= inst
->dst
.reg_offset
)
3150 /* Don't bother with cases where we should have had the
3151 * operation on the constant folded in GLSL already.
3153 if (scan_inst
->src
[i
].negate
|| scan_inst
->src
[i
].abs
)
3156 switch (scan_inst
->opcode
) {
3157 case BRW_OPCODE_MOV
:
3158 scan_inst
->src
[i
] = inst
->src
[0];
3162 case BRW_OPCODE_MUL
:
3163 case BRW_OPCODE_ADD
:
3165 scan_inst
->src
[i
] = inst
->src
[0];
3167 } else if (i
== 0 && scan_inst
->src
[1].file
!= IMM
) {
3168 /* Fit this constant in by commuting the operands */
3169 scan_inst
->src
[0] = scan_inst
->src
[1];
3170 scan_inst
->src
[1] = inst
->src
[0];
3175 case BRW_OPCODE_CMP
:
3177 scan_inst
->src
[i
] = inst
->src
[0];
3179 } else if (i
== 0 && scan_inst
->src
[1].file
!= IMM
) {
3182 new_cmod
= brw_swap_cmod(scan_inst
->conditional_mod
);
3183 if (new_cmod
!= ~0u) {
3184 /* Fit this constant in by swapping the operands and
3187 scan_inst
->src
[0] = scan_inst
->src
[1];
3188 scan_inst
->src
[1] = inst
->src
[0];
3189 scan_inst
->conditional_mod
= new_cmod
;
3195 case BRW_OPCODE_SEL
:
3197 scan_inst
->src
[i
] = inst
->src
[0];
3199 } else if (i
== 0 && scan_inst
->src
[1].file
!= IMM
) {
3200 /* Fit this constant in by swapping the operands and
3201 * flipping the predicate
3203 scan_inst
->src
[0] = scan_inst
->src
[1];
3204 scan_inst
->src
[1] = inst
->src
[0];
3205 scan_inst
->predicate_inverse
= !scan_inst
->predicate_inverse
;
3212 if (scan_inst
->dst
.file
== GRF
&&
3213 scan_inst
->dst
.reg
== inst
->dst
.reg
&&
3214 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
3215 scan_inst
->is_tex())) {
3222 this->live_intervals_valid
= false;
3227 * Must be called after calculate_live_intervales() to remove unused
3228 * writes to registers -- register allocation will fail otherwise
3229 * because something deffed but not used won't be considered to
3230 * interfere with other regs.
3233 fs_visitor::dead_code_eliminate()
3235 bool progress
= false;
3238 calculate_live_intervals();
3240 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3241 fs_inst
*inst
= (fs_inst
*)iter
.get();
3243 if (inst
->dst
.file
== GRF
&& this->virtual_grf_use
[inst
->dst
.reg
] <= pc
) {
3252 live_intervals_valid
= false;
3258 fs_visitor::register_coalesce()
3260 bool progress
= false;
3264 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3265 fs_inst
*inst
= (fs_inst
*)iter
.get();
3267 /* Make sure that we dominate the instructions we're going to
3268 * scan for interfering with our coalescing, or we won't have
3269 * scanned enough to see if anything interferes with our
3270 * coalescing. We don't dominate the following instructions if
3271 * we're in a loop or an if block.
3273 switch (inst
->opcode
) {
3277 case BRW_OPCODE_WHILE
:
3283 case BRW_OPCODE_ENDIF
:
3287 if (loop_depth
|| if_depth
)
3290 if (inst
->opcode
!= BRW_OPCODE_MOV
||
3293 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= GRF
||
3294 inst
->dst
.type
!= inst
->src
[0].type
)
3297 bool has_source_modifiers
= inst
->src
[0].abs
|| inst
->src
[0].negate
;
3299 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
3300 * them: check for no writes to either one until the exit of the
3303 bool interfered
= false;
3304 exec_list_iterator scan_iter
= iter
;
3306 for (; scan_iter
.has_next(); scan_iter
.next()) {
3307 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
3309 if (scan_inst
->dst
.file
== GRF
) {
3310 if (scan_inst
->dst
.reg
== inst
->dst
.reg
&&
3311 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
3312 scan_inst
->is_tex())) {
3316 if (scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
3317 (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
||
3318 scan_inst
->is_tex())) {
3324 /* The gen6 MATH instruction can't handle source modifiers, so avoid
3325 * coalescing those for now. We should do something more specific.
3327 if (intel
->gen
== 6 && scan_inst
->is_math() && has_source_modifiers
) {
3336 /* Rewrite the later usage to point at the source of the move to
3339 for (exec_list_iterator scan_iter
= iter
; scan_iter
.has_next();
3341 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
3343 for (int i
= 0; i
< 3; i
++) {
3344 if (scan_inst
->src
[i
].file
== GRF
&&
3345 scan_inst
->src
[i
].reg
== inst
->dst
.reg
&&
3346 scan_inst
->src
[i
].reg_offset
== inst
->dst
.reg_offset
) {
3347 scan_inst
->src
[i
].reg
= inst
->src
[0].reg
;
3348 scan_inst
->src
[i
].reg_offset
= inst
->src
[0].reg_offset
;
3349 scan_inst
->src
[i
].abs
|= inst
->src
[0].abs
;
3350 scan_inst
->src
[i
].negate
^= inst
->src
[0].negate
;
3351 scan_inst
->src
[i
].smear
= inst
->src
[0].smear
;
3361 live_intervals_valid
= false;
3368 fs_visitor::compute_to_mrf()
3370 bool progress
= false;
3373 calculate_live_intervals();
3375 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3376 fs_inst
*inst
= (fs_inst
*)iter
.get();
3381 if (inst
->opcode
!= BRW_OPCODE_MOV
||
3383 inst
->dst
.file
!= MRF
|| inst
->src
[0].file
!= GRF
||
3384 inst
->dst
.type
!= inst
->src
[0].type
||
3385 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].smear
!= -1)
3388 /* Work out which hardware MRF registers are written by this
3391 int mrf_low
= inst
->dst
.hw_reg
& ~BRW_MRF_COMPR4
;
3393 if (inst
->dst
.hw_reg
& BRW_MRF_COMPR4
) {
3394 mrf_high
= mrf_low
+ 4;
3395 } else if (c
->dispatch_width
== 16 &&
3396 (!inst
->force_uncompressed
&& !inst
->force_sechalf
)) {
3397 mrf_high
= mrf_low
+ 1;
3402 /* Can't compute-to-MRF this GRF if someone else was going to
3405 if (this->virtual_grf_use
[inst
->src
[0].reg
] > ip
)
3408 /* Found a move of a GRF to a MRF. Let's see if we can go
3409 * rewrite the thing that made this GRF to write into the MRF.
3412 for (scan_inst
= (fs_inst
*)inst
->prev
;
3413 scan_inst
->prev
!= NULL
;
3414 scan_inst
= (fs_inst
*)scan_inst
->prev
) {
3415 if (scan_inst
->dst
.file
== GRF
&&
3416 scan_inst
->dst
.reg
== inst
->src
[0].reg
) {
3417 /* Found the last thing to write our reg we want to turn
3418 * into a compute-to-MRF.
3421 if (scan_inst
->is_tex()) {
3422 /* texturing writes several continuous regs, so we can't
3423 * compute-to-mrf that.
3428 /* If it's predicated, it (probably) didn't populate all
3429 * the channels. We might be able to rewrite everything
3430 * that writes that reg, but it would require smarter
3431 * tracking to delay the rewriting until complete success.
3433 if (scan_inst
->predicated
)
3436 /* If it's half of register setup and not the same half as
3437 * our MOV we're trying to remove, bail for now.
3439 if (scan_inst
->force_uncompressed
!= inst
->force_uncompressed
||
3440 scan_inst
->force_sechalf
!= inst
->force_sechalf
) {
3444 /* SEND instructions can't have MRF as a destination. */
3445 if (scan_inst
->mlen
)
3448 if (intel
->gen
>= 6) {
3449 /* gen6 math instructions must have the destination be
3450 * GRF, so no compute-to-MRF for them.
3452 if (scan_inst
->is_math()) {
3457 if (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
3458 /* Found the creator of our MRF's source value. */
3459 scan_inst
->dst
.file
= MRF
;
3460 scan_inst
->dst
.hw_reg
= inst
->dst
.hw_reg
;
3461 scan_inst
->saturate
|= inst
->saturate
;
3468 /* We don't handle flow control here. Most computation of
3469 * values that end up in MRFs are shortly before the MRF
3472 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
3473 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
3474 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
3475 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
3479 /* You can't read from an MRF, so if someone else reads our
3480 * MRF's source GRF that we wanted to rewrite, that stops us.
3482 bool interfered
= false;
3483 for (int i
= 0; i
< 3; i
++) {
3484 if (scan_inst
->src
[i
].file
== GRF
&&
3485 scan_inst
->src
[i
].reg
== inst
->src
[0].reg
&&
3486 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
3493 if (scan_inst
->dst
.file
== MRF
) {
3494 /* If somebody else writes our MRF here, we can't
3495 * compute-to-MRF before that.
3497 int scan_mrf_low
= scan_inst
->dst
.hw_reg
& ~BRW_MRF_COMPR4
;
3500 if (scan_inst
->dst
.hw_reg
& BRW_MRF_COMPR4
) {
3501 scan_mrf_high
= scan_mrf_low
+ 4;
3502 } else if (c
->dispatch_width
== 16 &&
3503 (!scan_inst
->force_uncompressed
&&
3504 !scan_inst
->force_sechalf
)) {
3505 scan_mrf_high
= scan_mrf_low
+ 1;
3507 scan_mrf_high
= scan_mrf_low
;
3510 if (mrf_low
== scan_mrf_low
||
3511 mrf_low
== scan_mrf_high
||
3512 mrf_high
== scan_mrf_low
||
3513 mrf_high
== scan_mrf_high
) {
3518 if (scan_inst
->mlen
> 0) {
3519 /* Found a SEND instruction, which means that there are
3520 * live values in MRFs from base_mrf to base_mrf +
3521 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3524 if (mrf_low
>= scan_inst
->base_mrf
&&
3525 mrf_low
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
3528 if (mrf_high
>= scan_inst
->base_mrf
&&
3529 mrf_high
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
3540 * Walks through basic blocks, locking for repeated MRF writes and
3541 * removing the later ones.
3544 fs_visitor::remove_duplicate_mrf_writes()
3546 fs_inst
*last_mrf_move
[16];
3547 bool progress
= false;
3549 /* Need to update the MRF tracking for compressed instructions. */
3550 if (c
->dispatch_width
== 16)
3553 memset(last_mrf_move
, 0, sizeof(last_mrf_move
));
3555 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3556 fs_inst
*inst
= (fs_inst
*)iter
.get();
3558 switch (inst
->opcode
) {
3560 case BRW_OPCODE_WHILE
:
3562 case BRW_OPCODE_ELSE
:
3563 case BRW_OPCODE_ENDIF
:
3564 memset(last_mrf_move
, 0, sizeof(last_mrf_move
));
3570 if (inst
->opcode
== BRW_OPCODE_MOV
&&
3571 inst
->dst
.file
== MRF
) {
3572 fs_inst
*prev_inst
= last_mrf_move
[inst
->dst
.hw_reg
];
3573 if (prev_inst
&& inst
->equals(prev_inst
)) {
3580 /* Clear out the last-write records for MRFs that were overwritten. */
3581 if (inst
->dst
.file
== MRF
) {
3582 last_mrf_move
[inst
->dst
.hw_reg
] = NULL
;
3585 if (inst
->mlen
> 0) {
3586 /* Found a SEND instruction, which will include two or fewer
3587 * implied MRF writes. We could do better here.
3589 for (int i
= 0; i
< implied_mrf_writes(inst
); i
++) {
3590 last_mrf_move
[inst
->base_mrf
+ i
] = NULL
;
3594 /* Clear out any MRF move records whose sources got overwritten. */
3595 if (inst
->dst
.file
== GRF
) {
3596 for (unsigned int i
= 0; i
< Elements(last_mrf_move
); i
++) {
3597 if (last_mrf_move
[i
] &&
3598 last_mrf_move
[i
]->src
[0].reg
== inst
->dst
.reg
) {
3599 last_mrf_move
[i
] = NULL
;
3604 if (inst
->opcode
== BRW_OPCODE_MOV
&&
3605 inst
->dst
.file
== MRF
&&
3606 inst
->src
[0].file
== GRF
&&
3607 !inst
->predicated
) {
3608 last_mrf_move
[inst
->dst
.hw_reg
] = inst
;
3616 fs_visitor::virtual_grf_interferes(int a
, int b
)
3618 int start
= MAX2(this->virtual_grf_def
[a
], this->virtual_grf_def
[b
]);
3619 int end
= MIN2(this->virtual_grf_use
[a
], this->virtual_grf_use
[b
]);
3621 /* We can't handle dead register writes here, without iterating
3622 * over the whole instruction stream to find every single dead
3623 * write to that register to compare to the live interval of the
3624 * other register. Just assert that dead_code_eliminate() has been
3627 assert((this->virtual_grf_use
[a
] != -1 ||
3628 this->virtual_grf_def
[a
] == MAX_INSTRUCTION
) &&
3629 (this->virtual_grf_use
[b
] != -1 ||
3630 this->virtual_grf_def
[b
] == MAX_INSTRUCTION
));
3632 /* If the register is used to store 16 values of less than float
3633 * size (only the case for pixel_[xy]), then we can't allocate
3634 * another dword-sized thing to that register that would be used in
3635 * the same instruction. This is because when the GPU decodes (for
3638 * (declare (in ) vec4 gl_FragCoord@0x97766a0)
3639 * add(16) g6<1>F g6<8,8,1>UW 0.5F { align1 compr };
3641 * it's actually processed as:
3642 * add(8) g6<1>F g6<8,8,1>UW 0.5F { align1 };
3643 * add(8) g7<1>F g6.8<8,8,1>UW 0.5F { align1 sechalf };
3645 * so our second half values in g6 got overwritten in the first
3648 if (c
->dispatch_width
== 16 && (this->pixel_x
.reg
== a
||
3649 this->pixel_x
.reg
== b
||
3650 this->pixel_y
.reg
== a
||
3651 this->pixel_y
.reg
== b
)) {
3652 return start
<= end
;
3658 static struct brw_reg
brw_reg_from_fs_reg(fs_reg
*reg
)
3660 struct brw_reg brw_reg
;
3662 switch (reg
->file
) {
3666 if (reg
->smear
== -1) {
3667 brw_reg
= brw_vec8_reg(reg
->file
,
3670 brw_reg
= brw_vec1_reg(reg
->file
,
3671 reg
->hw_reg
, reg
->smear
);
3673 brw_reg
= retype(brw_reg
, reg
->type
);
3675 brw_reg
= sechalf(brw_reg
);
3678 switch (reg
->type
) {
3679 case BRW_REGISTER_TYPE_F
:
3680 brw_reg
= brw_imm_f(reg
->imm
.f
);
3682 case BRW_REGISTER_TYPE_D
:
3683 brw_reg
= brw_imm_d(reg
->imm
.i
);
3685 case BRW_REGISTER_TYPE_UD
:
3686 brw_reg
= brw_imm_ud(reg
->imm
.u
);
3689 assert(!"not reached");
3690 brw_reg
= brw_null_reg();
3695 brw_reg
= reg
->fixed_hw_reg
;
3698 /* Probably unused. */
3699 brw_reg
= brw_null_reg();
3702 assert(!"not reached");
3703 brw_reg
= brw_null_reg();
3706 assert(!"not reached");
3707 brw_reg
= brw_null_reg();
3711 brw_reg
= brw_abs(brw_reg
);
3713 brw_reg
= negate(brw_reg
);
3719 fs_visitor::generate_code()
3721 int last_native_inst
= p
->nr_insn
;
3722 const char *last_annotation_string
= NULL
;
3723 ir_instruction
*last_annotation_ir
= NULL
;
3725 int if_stack_array_size
= 16;
3726 int loop_stack_array_size
= 16;
3727 int if_stack_depth
= 0, loop_stack_depth
= 0;
3728 brw_instruction
**if_stack
=
3729 rzalloc_array(this->mem_ctx
, brw_instruction
*, if_stack_array_size
);
3730 brw_instruction
**loop_stack
=
3731 rzalloc_array(this->mem_ctx
, brw_instruction
*, loop_stack_array_size
);
3732 int *if_depth_in_loop
=
3733 rzalloc_array(this->mem_ctx
, int, loop_stack_array_size
);
3736 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3737 printf("Native code for fragment shader %d (%d-wide dispatch):\n",
3738 ctx
->Shader
.CurrentFragmentProgram
->Name
, c
->dispatch_width
);
3741 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3742 fs_inst
*inst
= (fs_inst
*)iter
.get();
3743 struct brw_reg src
[3], dst
;
3745 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3746 if (last_annotation_ir
!= inst
->ir
) {
3747 last_annotation_ir
= inst
->ir
;
3748 if (last_annotation_ir
) {
3750 last_annotation_ir
->print();
3754 if (last_annotation_string
!= inst
->annotation
) {
3755 last_annotation_string
= inst
->annotation
;
3756 if (last_annotation_string
)
3757 printf(" %s\n", last_annotation_string
);
3761 for (unsigned int i
= 0; i
< 3; i
++) {
3762 src
[i
] = brw_reg_from_fs_reg(&inst
->src
[i
]);
3764 dst
= brw_reg_from_fs_reg(&inst
->dst
);
3766 brw_set_conditionalmod(p
, inst
->conditional_mod
);
3767 brw_set_predicate_control(p
, inst
->predicated
);
3768 brw_set_predicate_inverse(p
, inst
->predicate_inverse
);
3769 brw_set_saturate(p
, inst
->saturate
);
3771 if (inst
->force_uncompressed
|| c
->dispatch_width
== 8) {
3772 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
3773 } else if (inst
->force_sechalf
) {
3774 brw_set_compression_control(p
, BRW_COMPRESSION_2NDHALF
);
3776 brw_set_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
3779 switch (inst
->opcode
) {
3780 case BRW_OPCODE_MOV
:
3781 brw_MOV(p
, dst
, src
[0]);
3783 case BRW_OPCODE_ADD
:
3784 brw_ADD(p
, dst
, src
[0], src
[1]);
3786 case BRW_OPCODE_MUL
:
3787 brw_MUL(p
, dst
, src
[0], src
[1]);
3790 case BRW_OPCODE_FRC
:
3791 brw_FRC(p
, dst
, src
[0]);
3793 case BRW_OPCODE_RNDD
:
3794 brw_RNDD(p
, dst
, src
[0]);
3796 case BRW_OPCODE_RNDE
:
3797 brw_RNDE(p
, dst
, src
[0]);
3799 case BRW_OPCODE_RNDZ
:
3800 brw_RNDZ(p
, dst
, src
[0]);
3803 case BRW_OPCODE_AND
:
3804 brw_AND(p
, dst
, src
[0], src
[1]);
3807 brw_OR(p
, dst
, src
[0], src
[1]);
3809 case BRW_OPCODE_XOR
:
3810 brw_XOR(p
, dst
, src
[0], src
[1]);
3812 case BRW_OPCODE_NOT
:
3813 brw_NOT(p
, dst
, src
[0]);
3815 case BRW_OPCODE_ASR
:
3816 brw_ASR(p
, dst
, src
[0], src
[1]);
3818 case BRW_OPCODE_SHR
:
3819 brw_SHR(p
, dst
, src
[0], src
[1]);
3821 case BRW_OPCODE_SHL
:
3822 brw_SHL(p
, dst
, src
[0], src
[1]);
3825 case BRW_OPCODE_CMP
:
3826 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
3828 case BRW_OPCODE_SEL
:
3829 brw_SEL(p
, dst
, src
[0], src
[1]);
3833 if (inst
->src
[0].file
!= BAD_FILE
) {
3834 assert(intel
->gen
>= 6);
3835 if_stack
[if_stack_depth
] = gen6_IF(p
, inst
->conditional_mod
, src
[0], src
[1]);
3837 if_stack
[if_stack_depth
] = brw_IF(p
, BRW_EXECUTE_8
);
3839 if_depth_in_loop
[loop_stack_depth
]++;
3841 if (if_stack_array_size
<= if_stack_depth
) {
3842 if_stack_array_size
*= 2;
3843 if_stack
= reralloc(this->mem_ctx
, if_stack
, brw_instruction
*,
3844 if_stack_array_size
);
3848 case BRW_OPCODE_ELSE
:
3849 if_stack
[if_stack_depth
- 1] =
3850 brw_ELSE(p
, if_stack
[if_stack_depth
- 1]);
3852 case BRW_OPCODE_ENDIF
:
3854 brw_ENDIF(p
, if_stack
[if_stack_depth
]);
3855 if_depth_in_loop
[loop_stack_depth
]--;
3859 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
3860 if (loop_stack_array_size
<= loop_stack_depth
) {
3861 loop_stack_array_size
*= 2;
3862 loop_stack
= reralloc(this->mem_ctx
, loop_stack
, brw_instruction
*,
3863 loop_stack_array_size
);
3864 if_depth_in_loop
= reralloc(this->mem_ctx
, if_depth_in_loop
, int,
3865 loop_stack_array_size
);
3867 if_depth_in_loop
[loop_stack_depth
] = 0;
3870 case BRW_OPCODE_BREAK
:
3871 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
3872 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3874 case BRW_OPCODE_CONTINUE
:
3875 /* FINISHME: We need to write the loop instruction support still. */
3876 if (intel
->gen
>= 6)
3877 gen6_CONT(p
, loop_stack
[loop_stack_depth
- 1]);
3879 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
3880 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3883 case BRW_OPCODE_WHILE
: {
3884 struct brw_instruction
*inst0
, *inst1
;
3887 if (intel
->gen
>= 5)
3890 assert(loop_stack_depth
> 0);
3892 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
3893 if (intel
->gen
< 6) {
3894 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3895 while (inst0
> loop_stack
[loop_stack_depth
]) {
3897 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
3898 inst0
->bits3
.if_else
.jump_count
== 0) {
3899 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
3901 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
3902 inst0
->bits3
.if_else
.jump_count
== 0) {
3903 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
3912 case FS_OPCODE_SQRT
:
3913 case FS_OPCODE_EXP2
:
3914 case FS_OPCODE_LOG2
:
3918 generate_math(inst
, dst
, src
);
3920 case FS_OPCODE_PIXEL_X
:
3921 generate_pixel_xy(dst
, true);
3923 case FS_OPCODE_PIXEL_Y
:
3924 generate_pixel_xy(dst
, false);
3926 case FS_OPCODE_CINTERP
:
3927 brw_MOV(p
, dst
, src
[0]);
3929 case FS_OPCODE_LINTERP
:
3930 generate_linterp(inst
, dst
, src
);
3936 generate_tex(inst
, dst
, src
[0]);
3938 case FS_OPCODE_DISCARD_NOT
:
3939 generate_discard_not(inst
, dst
);
3941 case FS_OPCODE_DISCARD_AND
:
3942 generate_discard_and(inst
, src
[0]);
3945 generate_ddx(inst
, dst
, src
[0]);
3948 generate_ddy(inst
, dst
, src
[0]);
3951 case FS_OPCODE_SPILL
:
3952 generate_spill(inst
, src
[0]);
3955 case FS_OPCODE_UNSPILL
:
3956 generate_unspill(inst
, dst
);
3959 case FS_OPCODE_PULL_CONSTANT_LOAD
:
3960 generate_pull_constant_load(inst
, dst
);
3963 case FS_OPCODE_FB_WRITE
:
3964 generate_fb_write(inst
);
3967 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
3968 _mesa_problem(ctx
, "Unsupported opcode `%s' in FS",
3969 brw_opcodes
[inst
->opcode
].name
);
3971 _mesa_problem(ctx
, "Unsupported opcode %d in FS", inst
->opcode
);
3973 fail("unsupported opcode in FS\n");
3976 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3977 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
3979 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3980 ((uint32_t *)&p
->store
[i
])[3],
3981 ((uint32_t *)&p
->store
[i
])[2],
3982 ((uint32_t *)&p
->store
[i
])[1],
3983 ((uint32_t *)&p
->store
[i
])[0]);
3985 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
3989 last_native_inst
= p
->nr_insn
;
3992 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3996 ralloc_free(if_stack
);
3997 ralloc_free(loop_stack
);
3998 ralloc_free(if_depth_in_loop
);
4002 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
4003 * emit issues, it doesn't get the jump distances into the output,
4004 * which is often something we want to debug. So this is here in
4005 * case you're doing that.
4008 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
4009 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
4010 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
4011 ((uint32_t *)&p
->store
[i
])[3],
4012 ((uint32_t *)&p
->store
[i
])[2],
4013 ((uint32_t *)&p
->store
[i
])[1],
4014 ((uint32_t *)&p
->store
[i
])[0]);
4015 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
4024 uint32_t prog_offset_16
= 0;
4025 uint32_t orig_nr_params
= c
->prog_data
.nr_params
;
4027 brw_wm_payload_setup(brw
, c
);
4029 if (c
->dispatch_width
== 16) {
4030 /* align to 64 byte boundary. */
4031 while ((c
->func
.nr_insn
* sizeof(struct brw_instruction
)) % 64) {
4035 /* Save off the start of this 16-wide program in case we succeed. */
4036 prog_offset_16
= c
->func
.nr_insn
* sizeof(struct brw_instruction
);
4038 brw_set_compression_control(p
, BRW_COMPRESSION_COMPRESSED
);
4044 calculate_urb_setup();
4046 emit_interpolation_setup_gen4();
4048 emit_interpolation_setup_gen6();
4050 /* Generate FS IR for main(). (the visitor only descends into
4051 * functions called "main").
4053 foreach_iter(exec_list_iterator
, iter
, *shader
->ir
) {
4054 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
4061 split_virtual_grfs();
4063 setup_paramvalues_refs();
4064 setup_pull_constants();
4070 progress
= remove_duplicate_mrf_writes() || progress
;
4072 progress
= propagate_constants() || progress
;
4073 progress
= register_coalesce() || progress
;
4074 progress
= compute_to_mrf() || progress
;
4075 progress
= dead_code_eliminate() || progress
;
4078 schedule_instructions();
4080 assign_curb_setup();
4084 /* Debug of register spilling: Go spill everything. */
4085 int virtual_grf_count
= virtual_grf_next
;
4086 for (int i
= 1; i
< virtual_grf_count
; i
++) {
4092 assign_regs_trivial();
4094 while (!assign_regs()) {
4100 assert(force_uncompressed_stack
== 0);
4101 assert(force_sechalf_stack
== 0);
4108 if (c
->dispatch_width
== 8) {
4109 c
->prog_data
.total_grf
= grf_used
;
4111 c
->prog_data
.total_grf_16
= grf_used
;
4112 c
->prog_data
.prog_offset_16
= prog_offset_16
;
4114 /* Make sure we didn't try to sneak in an extra uniform */
4115 assert(orig_nr_params
== c
->prog_data
.nr_params
);
4122 brw_wm_fs_emit(struct brw_context
*brw
, struct brw_wm_compile
*c
)
4124 struct intel_context
*intel
= &brw
->intel
;
4125 struct gl_context
*ctx
= &intel
->ctx
;
4126 struct gl_shader_program
*prog
= ctx
->Shader
.CurrentFragmentProgram
;
4131 struct brw_shader
*shader
=
4132 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
4136 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
4137 printf("GLSL IR for native fragment shader %d:\n", prog
->Name
);
4138 _mesa_print_ir(shader
->ir
, NULL
);
4142 /* Now the main event: Visit the shader IR and generate our FS IR for it.
4144 c
->dispatch_width
= 8;
4146 fs_visitor
v(c
, shader
);
4148 /* FINISHME: Cleanly fail, test at link time, etc. */
4149 assert(!"not reached");
4153 if (intel
->gen
>= 5 && c
->prog_data
.nr_pull_params
== 0) {
4154 c
->dispatch_width
= 16;
4155 fs_visitor
v2(c
, shader
);
4156 v2
.import_uniforms(v
.variable_ht
);
4160 c
->prog_data
.dispatch_width
= 8;