2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include <sys/types.h>
32 #include "main/macros.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_optimize.h"
38 #include "program/register_allocate.h"
39 #include "program/sampler.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
46 #include "../glsl/glsl_types.h"
47 #include "../glsl/ir_optimization.h"
48 #include "../glsl/ir_print_visitor.h"
50 #define MAX_INSTRUCTION (1 << 30)
51 static struct brw_reg
brw_reg_from_fs_reg(class fs_reg
*reg
);
54 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
56 struct brw_shader
*shader
;
58 shader
= rzalloc(NULL
, struct brw_shader
);
60 shader
->base
.Type
= type
;
61 shader
->base
.Name
= name
;
62 _mesa_init_shader(ctx
, &shader
->base
);
68 struct gl_shader_program
*
69 brw_new_shader_program(struct gl_context
*ctx
, GLuint name
)
71 struct brw_shader_program
*prog
;
72 prog
= rzalloc(NULL
, struct brw_shader_program
);
74 prog
->base
.Name
= name
;
75 _mesa_init_shader_program(ctx
, &prog
->base
);
81 brw_compile_shader(struct gl_context
*ctx
, struct gl_shader
*shader
)
83 if (!_mesa_ir_compile_shader(ctx
, shader
))
90 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
92 struct brw_context
*brw
= brw_context(ctx
);
93 struct intel_context
*intel
= &brw
->intel
;
95 struct brw_shader
*shader
=
96 (struct brw_shader
*)prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
98 void *mem_ctx
= ralloc_context(NULL
);
102 ralloc_free(shader
->ir
);
103 shader
->ir
= new(shader
) exec_list
;
104 clone_ir_list(mem_ctx
, shader
->ir
, shader
->base
.ir
);
106 do_mat_op_to_vec(shader
->ir
);
107 lower_instructions(shader
->ir
,
114 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
115 * if-statements need to be flattened.
118 lower_if_to_cond_assign(shader
->ir
, 16);
120 do_lower_texture_projection(shader
->ir
);
121 do_vec_index_to_cond_assign(shader
->ir
);
122 brw_do_cubemap_normalize(shader
->ir
);
127 brw_do_channel_expressions(shader
->ir
);
128 brw_do_vector_splitting(shader
->ir
);
130 progress
= do_lower_jumps(shader
->ir
, true, true,
131 true, /* main return */
132 false, /* continue */
136 progress
= do_common_optimization(shader
->ir
, true, 32) || progress
;
138 progress
= lower_noise(shader
->ir
) || progress
;
140 lower_variable_index_to_cond_assign(shader
->ir
,
142 GL_TRUE
, /* output */
144 GL_TRUE
/* uniform */
146 progress
= lower_quadop_vector(shader
->ir
, false) || progress
;
149 validate_ir_tree(shader
->ir
);
151 reparent_ir(shader
->ir
, shader
->ir
);
152 ralloc_free(mem_ctx
);
155 if (!_mesa_ir_link_shader(ctx
, prog
))
162 type_size(const struct glsl_type
*type
)
164 unsigned int size
, i
;
166 switch (type
->base_type
) {
169 case GLSL_TYPE_FLOAT
:
171 return type
->components();
172 case GLSL_TYPE_ARRAY
:
173 return type_size(type
->fields
.array
) * type
->length
;
174 case GLSL_TYPE_STRUCT
:
176 for (i
= 0; i
< type
->length
; i
++) {
177 size
+= type_size(type
->fields
.structure
[i
].type
);
180 case GLSL_TYPE_SAMPLER
:
181 /* Samplers take up no register space, since they're baked in at
186 assert(!"not reached");
192 * Returns how many MRFs an FS opcode will write over.
194 * Note that this is not the 0 or 1 implied writes in an actual gen
195 * instruction -- the FS opcodes often generate MOVs in addition.
198 fs_visitor::implied_mrf_writes(fs_inst
*inst
)
203 switch (inst
->opcode
) {
219 case FS_OPCODE_FB_WRITE
:
221 case FS_OPCODE_PULL_CONSTANT_LOAD
:
222 case FS_OPCODE_UNSPILL
:
224 case FS_OPCODE_SPILL
:
227 assert(!"not reached");
233 fs_visitor::virtual_grf_alloc(int size
)
235 if (virtual_grf_array_size
<= virtual_grf_next
) {
236 if (virtual_grf_array_size
== 0)
237 virtual_grf_array_size
= 16;
239 virtual_grf_array_size
*= 2;
240 virtual_grf_sizes
= reralloc(mem_ctx
, virtual_grf_sizes
, int,
241 virtual_grf_array_size
);
243 /* This slot is always unused. */
244 virtual_grf_sizes
[0] = 0;
246 virtual_grf_sizes
[virtual_grf_next
] = size
;
247 return virtual_grf_next
++;
250 /** Fixed HW reg constructor. */
251 fs_reg::fs_reg(enum register_file file
, int hw_reg
)
255 this->hw_reg
= hw_reg
;
256 this->type
= BRW_REGISTER_TYPE_F
;
259 /** Fixed HW reg constructor. */
260 fs_reg::fs_reg(enum register_file file
, int hw_reg
, uint32_t type
)
264 this->hw_reg
= hw_reg
;
269 brw_type_for_base_type(const struct glsl_type
*type
)
271 switch (type
->base_type
) {
272 case GLSL_TYPE_FLOAT
:
273 return BRW_REGISTER_TYPE_F
;
276 return BRW_REGISTER_TYPE_D
;
278 return BRW_REGISTER_TYPE_UD
;
279 case GLSL_TYPE_ARRAY
:
280 case GLSL_TYPE_STRUCT
:
281 case GLSL_TYPE_SAMPLER
:
282 /* These should be overridden with the type of the member when
283 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
284 * way to trip up if we don't.
286 return BRW_REGISTER_TYPE_UD
;
288 assert(!"not reached");
289 return BRW_REGISTER_TYPE_F
;
293 /** Automatic reg constructor. */
294 fs_reg::fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
)
299 this->reg
= v
->virtual_grf_alloc(type_size(type
));
300 this->reg_offset
= 0;
301 this->type
= brw_type_for_base_type(type
);
305 fs_visitor::variable_storage(ir_variable
*var
)
307 return (fs_reg
*)hash_table_find(this->variable_ht
, var
);
310 /* Our support for uniforms is piggy-backed on the struct
311 * gl_fragment_program, because that's where the values actually
312 * get stored, rather than in some global gl_shader_program uniform
316 fs_visitor::setup_uniform_values(int loc
, const glsl_type
*type
)
318 unsigned int offset
= 0;
320 if (type
->is_matrix()) {
321 const glsl_type
*column
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
322 type
->vector_elements
,
325 for (unsigned int i
= 0; i
< type
->matrix_columns
; i
++) {
326 offset
+= setup_uniform_values(loc
+ offset
, column
);
332 switch (type
->base_type
) {
333 case GLSL_TYPE_FLOAT
:
337 for (unsigned int i
= 0; i
< type
->vector_elements
; i
++) {
338 unsigned int param
= c
->prog_data
.nr_params
++;
340 assert(param
< ARRAY_SIZE(c
->prog_data
.param
));
342 switch (type
->base_type
) {
343 case GLSL_TYPE_FLOAT
:
344 c
->prog_data
.param_convert
[param
] = PARAM_NO_CONVERT
;
347 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2U
;
350 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2I
;
353 c
->prog_data
.param_convert
[param
] = PARAM_CONVERT_F2B
;
356 assert(!"not reached");
357 c
->prog_data
.param_convert
[param
] = PARAM_NO_CONVERT
;
360 this->param_index
[param
] = loc
;
361 this->param_offset
[param
] = i
;
365 case GLSL_TYPE_STRUCT
:
366 for (unsigned int i
= 0; i
< type
->length
; i
++) {
367 offset
+= setup_uniform_values(loc
+ offset
,
368 type
->fields
.structure
[i
].type
);
372 case GLSL_TYPE_ARRAY
:
373 for (unsigned int i
= 0; i
< type
->length
; i
++) {
374 offset
+= setup_uniform_values(loc
+ offset
, type
->fields
.array
);
378 case GLSL_TYPE_SAMPLER
:
379 /* The sampler takes up a slot, but we don't use any values from it. */
383 assert(!"not reached");
389 /* Our support for builtin uniforms is even scarier than non-builtin.
390 * It sits on top of the PROG_STATE_VAR parameters that are
391 * automatically updated from GL context state.
394 fs_visitor::setup_builtin_uniform_values(ir_variable
*ir
)
396 const struct gl_builtin_uniform_desc
*statevar
= NULL
;
398 for (unsigned int i
= 0; _mesa_builtin_uniform_desc
[i
].name
; i
++) {
399 statevar
= &_mesa_builtin_uniform_desc
[i
];
400 if (strcmp(ir
->name
, _mesa_builtin_uniform_desc
[i
].name
) == 0)
404 if (!statevar
->name
) {
406 printf("Failed to find builtin uniform `%s'\n", ir
->name
);
411 if (ir
->type
->is_array()) {
412 array_count
= ir
->type
->length
;
417 for (int a
= 0; a
< array_count
; a
++) {
418 for (unsigned int i
= 0; i
< statevar
->num_elements
; i
++) {
419 struct gl_builtin_uniform_element
*element
= &statevar
->elements
[i
];
420 int tokens
[STATE_LENGTH
];
422 memcpy(tokens
, element
->tokens
, sizeof(element
->tokens
));
423 if (ir
->type
->is_array()) {
427 /* This state reference has already been setup by ir_to_mesa,
428 * but we'll get the same index back here.
430 int index
= _mesa_add_state_reference(this->fp
->Base
.Parameters
,
431 (gl_state_index
*)tokens
);
433 /* Add each of the unique swizzles of the element as a
434 * parameter. This'll end up matching the expected layout of
435 * the array/matrix/structure we're trying to fill in.
438 for (unsigned int i
= 0; i
< 4; i
++) {
439 int swiz
= GET_SWZ(element
->swizzle
, i
);
440 if (swiz
== last_swiz
)
444 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
] =
446 this->param_index
[c
->prog_data
.nr_params
] = index
;
447 this->param_offset
[c
->prog_data
.nr_params
] = swiz
;
448 c
->prog_data
.nr_params
++;
455 fs_visitor::emit_fragcoord_interpolation(ir_variable
*ir
)
457 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
459 fs_reg neg_y
= this->pixel_y
;
461 bool flip
= !ir
->origin_upper_left
^ c
->key
.render_to_fbo
;
464 if (ir
->pixel_center_integer
) {
465 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_x
));
467 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, this->pixel_x
, fs_reg(0.5f
)));
472 if (!flip
&& ir
->pixel_center_integer
) {
473 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->pixel_y
));
475 fs_reg pixel_y
= this->pixel_y
;
476 float offset
= (ir
->pixel_center_integer
? 0.0 : 0.5);
479 pixel_y
.negate
= true;
480 offset
+= c
->key
.drawable_height
- 1.0;
483 emit(fs_inst(BRW_OPCODE_ADD
, wpos
, pixel_y
, fs_reg(offset
)));
488 if (intel
->gen
>= 6) {
489 emit(fs_inst(BRW_OPCODE_MOV
, wpos
,
490 fs_reg(brw_vec8_grf(c
->source_depth_reg
, 0))));
492 emit(fs_inst(FS_OPCODE_LINTERP
, wpos
, this->delta_x
, this->delta_y
,
493 interp_reg(FRAG_ATTRIB_WPOS
, 2)));
497 /* gl_FragCoord.w: Already set up in emit_interpolation */
498 emit(fs_inst(BRW_OPCODE_MOV
, wpos
, this->wpos_w
));
504 fs_visitor::emit_general_interpolation(ir_variable
*ir
)
506 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
507 /* Interpolation is always in floating point regs. */
508 reg
->type
= BRW_REGISTER_TYPE_F
;
511 unsigned int array_elements
;
512 const glsl_type
*type
;
514 if (ir
->type
->is_array()) {
515 array_elements
= ir
->type
->length
;
516 if (array_elements
== 0) {
519 type
= ir
->type
->fields
.array
;
525 int location
= ir
->location
;
526 for (unsigned int i
= 0; i
< array_elements
; i
++) {
527 for (unsigned int j
= 0; j
< type
->matrix_columns
; j
++) {
528 if (urb_setup
[location
] == -1) {
529 /* If there's no incoming setup data for this slot, don't
530 * emit interpolation for it.
532 attr
.reg_offset
+= type
->vector_elements
;
537 if (c
->key
.flat_shade
&& (location
== FRAG_ATTRIB_COL0
||
538 location
== FRAG_ATTRIB_COL1
)) {
539 /* Constant interpolation (flat shading) case. The SF has
540 * handed us defined values in only the constant offset
541 * field of the setup reg.
543 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
544 struct brw_reg interp
= interp_reg(location
, c
);
545 interp
= suboffset(interp
, 3);
546 emit(fs_inst(FS_OPCODE_CINTERP
, attr
, fs_reg(interp
)));
550 /* Perspective interpolation case. */
551 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
552 struct brw_reg interp
= interp_reg(location
, c
);
553 emit(fs_inst(FS_OPCODE_LINTERP
,
561 if (intel
->gen
< 6) {
562 attr
.reg_offset
-= type
->vector_elements
;
563 for (unsigned int c
= 0; c
< type
->vector_elements
; c
++) {
564 emit(fs_inst(BRW_OPCODE_MUL
,
580 fs_visitor::emit_frontfacing_interpolation(ir_variable
*ir
)
582 fs_reg
*reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
584 /* The frontfacing comes in as a bit in the thread payload. */
585 if (intel
->gen
>= 6) {
586 emit(fs_inst(BRW_OPCODE_ASR
,
588 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D
)),
590 emit(fs_inst(BRW_OPCODE_NOT
,
593 emit(fs_inst(BRW_OPCODE_AND
,
598 struct brw_reg r1_6ud
= retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD
);
599 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
602 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
,
606 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
607 emit(fs_inst(BRW_OPCODE_AND
, *reg
, *reg
, fs_reg(1u)));
614 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src
)
626 assert(!"not reached: bad math opcode");
630 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
631 * might be able to do better by doing execsize = 1 math and then
632 * expanding that result out, but we would need to be careful with
635 * The hardware ignores source modifiers (negate and abs) on math
636 * instructions, so we also move to a temp to set those up.
638 if (intel
->gen
>= 6 && (src
.file
== UNIFORM
||
641 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
642 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src
));
646 fs_inst
*inst
= emit(fs_inst(opcode
, dst
, src
));
648 if (intel
->gen
< 6) {
657 fs_visitor::emit_math(fs_opcodes opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
)
662 assert(opcode
== FS_OPCODE_POW
);
664 if (intel
->gen
>= 6) {
665 /* Can't do hstride == 0 args to gen6 math, so expand it out.
667 * The hardware ignores source modifiers (negate and abs) on math
668 * instructions, so we also move to a temp to set those up.
670 if (src0
.file
== UNIFORM
|| src0
.abs
|| src0
.negate
) {
671 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
672 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src0
));
676 if (src1
.file
== UNIFORM
|| src1
.abs
|| src1
.negate
) {
677 fs_reg expanded
= fs_reg(this, glsl_type::float_type
);
678 emit(fs_inst(BRW_OPCODE_MOV
, expanded
, src1
));
682 inst
= emit(fs_inst(opcode
, dst
, src0
, src1
));
684 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ 1), src1
));
685 inst
= emit(fs_inst(opcode
, dst
, src0
, reg_null_f
));
687 inst
->base_mrf
= base_mrf
;
694 fs_visitor::visit(ir_variable
*ir
)
698 if (variable_storage(ir
))
701 if (strcmp(ir
->name
, "gl_FragColor") == 0) {
702 this->frag_color
= ir
;
703 } else if (strcmp(ir
->name
, "gl_FragData") == 0) {
704 this->frag_data
= ir
;
705 } else if (strcmp(ir
->name
, "gl_FragDepth") == 0) {
706 this->frag_depth
= ir
;
709 if (ir
->mode
== ir_var_in
) {
710 if (!strcmp(ir
->name
, "gl_FragCoord")) {
711 reg
= emit_fragcoord_interpolation(ir
);
712 } else if (!strcmp(ir
->name
, "gl_FrontFacing")) {
713 reg
= emit_frontfacing_interpolation(ir
);
715 reg
= emit_general_interpolation(ir
);
718 hash_table_insert(this->variable_ht
, reg
, ir
);
722 if (ir
->mode
== ir_var_uniform
) {
723 int param_index
= c
->prog_data
.nr_params
;
725 if (!strncmp(ir
->name
, "gl_", 3)) {
726 setup_builtin_uniform_values(ir
);
728 setup_uniform_values(ir
->location
, ir
->type
);
731 reg
= new(this->mem_ctx
) fs_reg(UNIFORM
, param_index
);
732 reg
->type
= brw_type_for_base_type(ir
->type
);
736 reg
= new(this->mem_ctx
) fs_reg(this, ir
->type
);
738 hash_table_insert(this->variable_ht
, reg
, ir
);
742 fs_visitor::visit(ir_dereference_variable
*ir
)
744 fs_reg
*reg
= variable_storage(ir
->var
);
749 fs_visitor::visit(ir_dereference_record
*ir
)
751 const glsl_type
*struct_type
= ir
->record
->type
;
753 ir
->record
->accept(this);
755 unsigned int offset
= 0;
756 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
757 if (strcmp(struct_type
->fields
.structure
[i
].name
, ir
->field
) == 0)
759 offset
+= type_size(struct_type
->fields
.structure
[i
].type
);
761 this->result
.reg_offset
+= offset
;
762 this->result
.type
= brw_type_for_base_type(ir
->type
);
766 fs_visitor::visit(ir_dereference_array
*ir
)
771 ir
->array
->accept(this);
772 index
= ir
->array_index
->as_constant();
774 element_size
= type_size(ir
->type
);
775 this->result
.type
= brw_type_for_base_type(ir
->type
);
778 assert(this->result
.file
== UNIFORM
||
779 (this->result
.file
== GRF
&&
780 this->result
.reg
!= 0));
781 this->result
.reg_offset
+= index
->value
.i
[0] * element_size
;
783 assert(!"FINISHME: non-constant array element");
787 /* Instruction selection: Produce a MOV.sat instead of
788 * MIN(MAX(val, 0), 1) when possible.
791 fs_visitor::try_emit_saturate(ir_expression
*ir
)
793 ir_rvalue
*sat_val
= ir
->as_rvalue_to_saturate();
798 sat_val
->accept(this);
799 fs_reg src
= this->result
;
801 this->result
= fs_reg(this, ir
->type
);
802 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, src
));
803 inst
->saturate
= true;
809 brw_conditional_for_comparison(unsigned int op
)
813 return BRW_CONDITIONAL_L
;
814 case ir_binop_greater
:
815 return BRW_CONDITIONAL_G
;
816 case ir_binop_lequal
:
817 return BRW_CONDITIONAL_LE
;
818 case ir_binop_gequal
:
819 return BRW_CONDITIONAL_GE
;
821 case ir_binop_all_equal
: /* same as equal for scalars */
822 return BRW_CONDITIONAL_Z
;
823 case ir_binop_nequal
:
824 case ir_binop_any_nequal
: /* same as nequal for scalars */
825 return BRW_CONDITIONAL_NZ
;
827 assert(!"not reached: bad operation for comparison");
828 return BRW_CONDITIONAL_NZ
;
833 fs_visitor::visit(ir_expression
*ir
)
835 unsigned int operand
;
839 assert(ir
->get_num_operands() <= 2);
841 if (try_emit_saturate(ir
))
844 for (operand
= 0; operand
< ir
->get_num_operands(); operand
++) {
845 ir
->operands
[operand
]->accept(this);
846 if (this->result
.file
== BAD_FILE
) {
848 printf("Failed to get tree for expression operand:\n");
849 ir
->operands
[operand
]->accept(&v
);
852 op
[operand
] = this->result
;
854 /* Matrix expression operands should have been broken down to vector
855 * operations already.
857 assert(!ir
->operands
[operand
]->type
->is_matrix());
858 /* And then those vector operands should have been broken down to scalar.
860 assert(!ir
->operands
[operand
]->type
->is_vector());
863 /* Storage for our result. If our result goes into an assignment, it will
864 * just get copy-propagated out, so no worries.
866 this->result
= fs_reg(this, ir
->type
);
868 switch (ir
->operation
) {
869 case ir_unop_logic_not
:
870 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
871 * ones complement of the whole register, not just bit 0.
873 emit(fs_inst(BRW_OPCODE_XOR
, this->result
, op
[0], fs_reg(1)));
876 op
[0].negate
= !op
[0].negate
;
877 this->result
= op
[0];
881 op
[0].negate
= false;
882 this->result
= op
[0];
885 temp
= fs_reg(this, ir
->type
);
887 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(0.0f
)));
889 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
)));
890 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
891 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(1.0f
)));
892 inst
->predicated
= true;
894 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_f
, op
[0], fs_reg(0.0f
)));
895 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
896 inst
= emit(fs_inst(BRW_OPCODE_MOV
, this->result
, fs_reg(-1.0f
)));
897 inst
->predicated
= true;
901 emit_math(FS_OPCODE_RCP
, this->result
, op
[0]);
905 emit_math(FS_OPCODE_EXP2
, this->result
, op
[0]);
908 emit_math(FS_OPCODE_LOG2
, this->result
, op
[0]);
912 assert(!"not reached: should be handled by ir_explog_to_explog2");
915 case ir_unop_sin_reduced
:
916 emit_math(FS_OPCODE_SIN
, this->result
, op
[0]);
919 case ir_unop_cos_reduced
:
920 emit_math(FS_OPCODE_COS
, this->result
, op
[0]);
924 emit(fs_inst(FS_OPCODE_DDX
, this->result
, op
[0]));
927 emit(fs_inst(FS_OPCODE_DDY
, this->result
, op
[0]));
931 emit(fs_inst(BRW_OPCODE_ADD
, this->result
, op
[0], op
[1]));
934 assert(!"not reached: should be handled by ir_sub_to_add_neg");
938 emit(fs_inst(BRW_OPCODE_MUL
, this->result
, op
[0], op
[1]));
941 assert(!"not reached: should be handled by ir_div_to_mul_rcp");
944 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
948 case ir_binop_greater
:
949 case ir_binop_lequal
:
950 case ir_binop_gequal
:
952 case ir_binop_all_equal
:
953 case ir_binop_nequal
:
954 case ir_binop_any_nequal
:
956 /* original gen4 does implicit conversion before comparison. */
958 temp
.type
= op
[0].type
;
960 inst
= emit(fs_inst(BRW_OPCODE_CMP
, temp
, op
[0], op
[1]));
961 inst
->conditional_mod
= brw_conditional_for_comparison(ir
->operation
);
962 emit(fs_inst(BRW_OPCODE_AND
, this->result
, this->result
, fs_reg(0x1)));
965 case ir_binop_logic_xor
:
966 emit(fs_inst(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]));
969 case ir_binop_logic_or
:
970 emit(fs_inst(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]));
973 case ir_binop_logic_and
:
974 emit(fs_inst(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]));
979 assert(!"not reached: should be handled by brw_fs_channel_expressions");
983 assert(!"not reached: should be handled by lower_noise");
986 case ir_quadop_vector
:
987 assert(!"not reached: should be handled by lower_quadop_vector");
991 emit_math(FS_OPCODE_SQRT
, this->result
, op
[0]);
995 emit_math(FS_OPCODE_RSQ
, this->result
, op
[0]);
1002 emit(fs_inst(BRW_OPCODE_MOV
, this->result
, op
[0]));
1006 temp
= this->result
;
1007 /* original gen4 does implicit conversion before comparison. */
1009 temp
.type
= op
[0].type
;
1011 inst
= emit(fs_inst(BRW_OPCODE_CMP
, temp
, op
[0], fs_reg(0.0f
)));
1012 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1013 inst
= emit(fs_inst(BRW_OPCODE_AND
, this->result
,
1014 this->result
, fs_reg(1)));
1018 emit(fs_inst(BRW_OPCODE_RNDZ
, this->result
, op
[0]));
1021 op
[0].negate
= !op
[0].negate
;
1022 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
1023 this->result
.negate
= true;
1026 inst
= emit(fs_inst(BRW_OPCODE_RNDD
, this->result
, op
[0]));
1029 inst
= emit(fs_inst(BRW_OPCODE_FRC
, this->result
, op
[0]));
1031 case ir_unop_round_even
:
1032 emit(fs_inst(BRW_OPCODE_RNDE
, this->result
, op
[0]));
1036 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1037 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1039 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
1040 inst
->predicated
= true;
1043 inst
= emit(fs_inst(BRW_OPCODE_CMP
, this->result
, op
[0], op
[1]));
1044 inst
->conditional_mod
= BRW_CONDITIONAL_G
;
1046 inst
= emit(fs_inst(BRW_OPCODE_SEL
, this->result
, op
[0], op
[1]));
1047 inst
->predicated
= true;
1051 emit_math(FS_OPCODE_POW
, this->result
, op
[0], op
[1]);
1054 case ir_unop_bit_not
:
1055 inst
= emit(fs_inst(BRW_OPCODE_NOT
, this->result
, op
[0]));
1057 case ir_binop_bit_and
:
1058 inst
= emit(fs_inst(BRW_OPCODE_AND
, this->result
, op
[0], op
[1]));
1060 case ir_binop_bit_xor
:
1061 inst
= emit(fs_inst(BRW_OPCODE_XOR
, this->result
, op
[0], op
[1]));
1063 case ir_binop_bit_or
:
1064 inst
= emit(fs_inst(BRW_OPCODE_OR
, this->result
, op
[0], op
[1]));
1068 case ir_binop_lshift
:
1069 case ir_binop_rshift
:
1070 assert(!"GLSL 1.30 features unsupported");
1076 fs_visitor::emit_assignment_writes(fs_reg
&l
, fs_reg
&r
,
1077 const glsl_type
*type
, bool predicated
)
1079 switch (type
->base_type
) {
1080 case GLSL_TYPE_FLOAT
:
1081 case GLSL_TYPE_UINT
:
1083 case GLSL_TYPE_BOOL
:
1084 for (unsigned int i
= 0; i
< type
->components(); i
++) {
1085 l
.type
= brw_type_for_base_type(type
);
1086 r
.type
= brw_type_for_base_type(type
);
1088 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
1089 inst
->predicated
= predicated
;
1095 case GLSL_TYPE_ARRAY
:
1096 for (unsigned int i
= 0; i
< type
->length
; i
++) {
1097 emit_assignment_writes(l
, r
, type
->fields
.array
, predicated
);
1101 case GLSL_TYPE_STRUCT
:
1102 for (unsigned int i
= 0; i
< type
->length
; i
++) {
1103 emit_assignment_writes(l
, r
, type
->fields
.structure
[i
].type
,
1108 case GLSL_TYPE_SAMPLER
:
1112 assert(!"not reached");
1118 fs_visitor::visit(ir_assignment
*ir
)
1123 /* FINISHME: arrays on the lhs */
1124 ir
->lhs
->accept(this);
1127 ir
->rhs
->accept(this);
1130 assert(l
.file
!= BAD_FILE
);
1131 assert(r
.file
!= BAD_FILE
);
1133 if (ir
->condition
) {
1134 emit_bool_to_cond_code(ir
->condition
);
1137 if (ir
->lhs
->type
->is_scalar() ||
1138 ir
->lhs
->type
->is_vector()) {
1139 for (int i
= 0; i
< ir
->lhs
->type
->vector_elements
; i
++) {
1140 if (ir
->write_mask
& (1 << i
)) {
1141 inst
= emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
1143 inst
->predicated
= true;
1149 emit_assignment_writes(l
, r
, ir
->lhs
->type
, ir
->condition
!= NULL
);
1154 fs_visitor::emit_texture_gen4(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1158 bool simd16
= false;
1164 if (ir
->shadow_comparitor
) {
1165 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1166 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1168 coordinate
.reg_offset
++;
1170 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1173 if (ir
->op
== ir_tex
) {
1174 /* There's no plain shadow compare message, so we use shadow
1175 * compare with a bias of 0.0.
1177 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1180 } else if (ir
->op
== ir_txb
) {
1181 ir
->lod_info
.bias
->accept(this);
1182 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1186 assert(ir
->op
== ir_txl
);
1187 ir
->lod_info
.lod
->accept(this);
1188 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1193 ir
->shadow_comparitor
->accept(this);
1194 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1196 } else if (ir
->op
== ir_tex
) {
1197 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1198 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1200 coordinate
.reg_offset
++;
1202 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
1204 } else if (ir
->op
== ir_txd
) {
1205 assert(!"TXD isn't supported on gen4 yet.");
1207 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod
1208 * instructions. We'll need to do SIMD16 here.
1210 assert(ir
->op
== ir_txb
|| ir
->op
== ir_txl
);
1212 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1213 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
* 2),
1215 coordinate
.reg_offset
++;
1218 /* lod/bias appears after u/v/r. */
1221 if (ir
->op
== ir_txb
) {
1222 ir
->lod_info
.bias
->accept(this);
1223 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1227 ir
->lod_info
.lod
->accept(this);
1228 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
),
1233 /* The unused upper half. */
1236 /* Now, since we're doing simd16, the return is 2 interleaved
1237 * vec4s where the odd-indexed ones are junk. We'll need to move
1238 * this weirdness around to the expected layout.
1242 dst
= fs_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type
,
1244 dst
.type
= BRW_REGISTER_TYPE_F
;
1247 fs_inst
*inst
= NULL
;
1250 inst
= emit(fs_inst(FS_OPCODE_TEX
, dst
));
1253 inst
= emit(fs_inst(FS_OPCODE_TXB
, dst
));
1256 inst
= emit(fs_inst(FS_OPCODE_TXL
, dst
));
1259 inst
= emit(fs_inst(FS_OPCODE_TXD
, dst
));
1262 assert(!"GLSL 1.30 features unsupported");
1265 inst
->base_mrf
= base_mrf
;
1269 for (int i
= 0; i
< 4; i
++) {
1270 emit(fs_inst(BRW_OPCODE_MOV
, orig_dst
, dst
));
1271 orig_dst
.reg_offset
++;
1272 dst
.reg_offset
+= 2;
1280 fs_visitor::emit_texture_gen5(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
)
1282 /* gen5's SIMD8 sampler has slots for u, v, r, array index, then
1283 * optional parameters like shadow comparitor or LOD bias. If
1284 * optional parameters aren't present, those base slots are
1285 * optional and don't need to be included in the message.
1287 * We don't fill in the unnecessary slots regardless, which may
1288 * look surprising in the disassembly.
1290 int mlen
= 1; /* g0 header always present. */
1293 for (int i
= 0; i
< ir
->coordinate
->type
->vector_elements
; i
++) {
1294 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
+ i
),
1296 coordinate
.reg_offset
++;
1298 mlen
+= ir
->coordinate
->type
->vector_elements
;
1300 if (ir
->shadow_comparitor
) {
1301 mlen
= MAX2(mlen
, 5);
1303 ir
->shadow_comparitor
->accept(this);
1304 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1308 fs_inst
*inst
= NULL
;
1311 inst
= emit(fs_inst(FS_OPCODE_TEX
, dst
));
1314 ir
->lod_info
.bias
->accept(this);
1315 mlen
= MAX2(mlen
, 5);
1316 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1319 inst
= emit(fs_inst(FS_OPCODE_TXB
, dst
));
1322 ir
->lod_info
.lod
->accept(this);
1323 mlen
= MAX2(mlen
, 5);
1324 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, base_mrf
+ mlen
), this->result
));
1327 inst
= emit(fs_inst(FS_OPCODE_TXL
, dst
));
1331 assert(!"GLSL 1.30 features unsupported");
1334 inst
->base_mrf
= base_mrf
;
1341 fs_visitor::visit(ir_texture
*ir
)
1344 fs_inst
*inst
= NULL
;
1346 ir
->coordinate
->accept(this);
1347 fs_reg coordinate
= this->result
;
1349 if (ir
->offset
!= NULL
) {
1350 ir_constant
*offset
= ir
->offset
->as_constant();
1351 assert(offset
!= NULL
);
1353 signed char offsets
[3];
1354 for (unsigned i
= 0; i
< ir
->offset
->type
->vector_elements
; i
++)
1355 offsets
[i
] = (signed char) offset
->value
.i
[i
];
1357 /* Combine all three offsets into a single unsigned dword:
1359 * bits 11:8 - U Offset (X component)
1360 * bits 7:4 - V Offset (Y component)
1361 * bits 3:0 - R Offset (Z component)
1363 unsigned offset_bits
= 0;
1364 for (unsigned i
= 0; i
< ir
->offset
->type
->vector_elements
; i
++) {
1365 const unsigned shift
= 4 * (2 - i
);
1366 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
1369 /* Explicitly set up the message header by copying g0 to msg reg m1. */
1370 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, 1, BRW_REGISTER_TYPE_UD
),
1371 fs_reg(GRF
, 0, BRW_REGISTER_TYPE_UD
)));
1373 /* Then set the offset bits in DWord 2 of the message header. */
1374 emit(fs_inst(BRW_OPCODE_MOV
,
1375 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 1, 2),
1376 BRW_REGISTER_TYPE_UD
)),
1377 fs_reg(brw_imm_uw(offset_bits
))));
1380 /* Should be lowered by do_lower_texture_projection */
1381 assert(!ir
->projector
);
1383 sampler
= _mesa_get_sampler_uniform_value(ir
->sampler
,
1384 ctx
->Shader
.CurrentFragmentProgram
,
1385 &brw
->fragment_program
->Base
);
1386 sampler
= c
->fp
->program
.Base
.SamplerUnits
[sampler
];
1388 /* The 965 requires the EU to do the normalization of GL rectangle
1389 * texture coordinates. We use the program parameter state
1390 * tracking to get the scaling factor.
1392 if (ir
->sampler
->type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_RECT
) {
1393 struct gl_program_parameter_list
*params
= c
->fp
->program
.Base
.Parameters
;
1394 int tokens
[STATE_LENGTH
] = {
1396 STATE_TEXRECT_SCALE
,
1402 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
] =
1404 c
->prog_data
.param_convert
[c
->prog_data
.nr_params
+ 1] =
1407 fs_reg scale_x
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
);
1408 fs_reg scale_y
= fs_reg(UNIFORM
, c
->prog_data
.nr_params
+ 1);
1409 GLuint index
= _mesa_add_state_reference(params
,
1410 (gl_state_index
*)tokens
);
1412 this->param_index
[c
->prog_data
.nr_params
] = index
;
1413 this->param_offset
[c
->prog_data
.nr_params
] = 0;
1414 c
->prog_data
.nr_params
++;
1415 this->param_index
[c
->prog_data
.nr_params
] = index
;
1416 this->param_offset
[c
->prog_data
.nr_params
] = 1;
1417 c
->prog_data
.nr_params
++;
1419 fs_reg dst
= fs_reg(this, ir
->coordinate
->type
);
1420 fs_reg src
= coordinate
;
1423 emit(fs_inst(BRW_OPCODE_MUL
, dst
, src
, scale_x
));
1426 emit(fs_inst(BRW_OPCODE_MUL
, dst
, src
, scale_y
));
1429 /* Writemasking doesn't eliminate channels on SIMD8 texture
1430 * samples, so don't worry about them.
1432 fs_reg dst
= fs_reg(this, glsl_type::vec4_type
);
1434 if (intel
->gen
< 5) {
1435 inst
= emit_texture_gen4(ir
, dst
, coordinate
);
1437 inst
= emit_texture_gen5(ir
, dst
, coordinate
);
1440 /* If there's an offset, we already set up m1. To avoid the implied move,
1441 * use the null register. Otherwise, we want an implied move from g0.
1443 if (ir
->offset
!= NULL
)
1444 inst
->src
[0] = fs_reg(brw_null_reg());
1446 inst
->src
[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
));
1448 inst
->sampler
= sampler
;
1452 if (ir
->shadow_comparitor
)
1453 inst
->shadow_compare
= true;
1455 if (ir
->type
== glsl_type::float_type
) {
1456 /* Ignore DEPTH_TEXTURE_MODE swizzling. */
1457 assert(ir
->sampler
->type
->sampler_shadow
);
1458 } else if (c
->key
.tex_swizzles
[inst
->sampler
] != SWIZZLE_NOOP
) {
1459 fs_reg swizzle_dst
= fs_reg(this, glsl_type::vec4_type
);
1461 for (int i
= 0; i
< 4; i
++) {
1462 int swiz
= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1463 fs_reg l
= swizzle_dst
;
1466 if (swiz
== SWIZZLE_ZERO
) {
1467 emit(fs_inst(BRW_OPCODE_MOV
, l
, fs_reg(0.0f
)));
1468 } else if (swiz
== SWIZZLE_ONE
) {
1469 emit(fs_inst(BRW_OPCODE_MOV
, l
, fs_reg(1.0f
)));
1472 r
.reg_offset
+= GET_SWZ(c
->key
.tex_swizzles
[inst
->sampler
], i
);
1473 emit(fs_inst(BRW_OPCODE_MOV
, l
, r
));
1476 this->result
= swizzle_dst
;
1481 fs_visitor::visit(ir_swizzle
*ir
)
1483 ir
->val
->accept(this);
1484 fs_reg val
= this->result
;
1486 if (ir
->type
->vector_elements
== 1) {
1487 this->result
.reg_offset
+= ir
->mask
.x
;
1491 fs_reg result
= fs_reg(this, ir
->type
);
1492 this->result
= result
;
1494 for (unsigned int i
= 0; i
< ir
->type
->vector_elements
; i
++) {
1495 fs_reg channel
= val
;
1513 channel
.reg_offset
+= swiz
;
1514 emit(fs_inst(BRW_OPCODE_MOV
, result
, channel
));
1515 result
.reg_offset
++;
1520 fs_visitor::visit(ir_discard
*ir
)
1522 fs_reg temp
= fs_reg(this, glsl_type::uint_type
);
1524 assert(ir
->condition
== NULL
); /* FINISHME */
1526 emit(fs_inst(FS_OPCODE_DISCARD_NOT
, temp
, reg_null_d
));
1527 emit(fs_inst(FS_OPCODE_DISCARD_AND
, reg_null_d
, temp
));
1528 kill_emitted
= true;
1532 fs_visitor::visit(ir_constant
*ir
)
1534 /* Set this->result to reg at the bottom of the function because some code
1535 * paths will cause this visitor to be applied to other fields. This will
1536 * cause the value stored in this->result to be modified.
1538 * Make reg constant so that it doesn't get accidentally modified along the
1539 * way. Yes, I actually had this problem. :(
1541 const fs_reg
reg(this, ir
->type
);
1542 fs_reg dst_reg
= reg
;
1544 if (ir
->type
->is_array()) {
1545 const unsigned size
= type_size(ir
->type
->fields
.array
);
1547 for (unsigned i
= 0; i
< ir
->type
->length
; i
++) {
1548 ir
->array_elements
[i
]->accept(this);
1549 fs_reg src_reg
= this->result
;
1551 dst_reg
.type
= src_reg
.type
;
1552 for (unsigned j
= 0; j
< size
; j
++) {
1553 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, src_reg
));
1554 src_reg
.reg_offset
++;
1555 dst_reg
.reg_offset
++;
1558 } else if (ir
->type
->is_record()) {
1559 foreach_list(node
, &ir
->components
) {
1560 ir_instruction
*const field
= (ir_instruction
*) node
;
1561 const unsigned size
= type_size(field
->type
);
1563 field
->accept(this);
1564 fs_reg src_reg
= this->result
;
1566 dst_reg
.type
= src_reg
.type
;
1567 for (unsigned j
= 0; j
< size
; j
++) {
1568 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, src_reg
));
1569 src_reg
.reg_offset
++;
1570 dst_reg
.reg_offset
++;
1574 const unsigned size
= type_size(ir
->type
);
1576 for (unsigned i
= 0; i
< size
; i
++) {
1577 switch (ir
->type
->base_type
) {
1578 case GLSL_TYPE_FLOAT
:
1579 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.f
[i
])));
1581 case GLSL_TYPE_UINT
:
1582 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.u
[i
])));
1585 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, fs_reg(ir
->value
.i
[i
])));
1587 case GLSL_TYPE_BOOL
:
1588 emit(fs_inst(BRW_OPCODE_MOV
, dst_reg
, fs_reg((int)ir
->value
.b
[i
])));
1591 assert(!"Non-float/uint/int/bool constant");
1593 dst_reg
.reg_offset
++;
1601 fs_visitor::emit_bool_to_cond_code(ir_rvalue
*ir
)
1603 ir_expression
*expr
= ir
->as_expression();
1609 assert(expr
->get_num_operands() <= 2);
1610 for (unsigned int i
= 0; i
< expr
->get_num_operands(); i
++) {
1611 assert(expr
->operands
[i
]->type
->is_scalar());
1613 expr
->operands
[i
]->accept(this);
1614 op
[i
] = this->result
;
1617 switch (expr
->operation
) {
1618 case ir_unop_logic_not
:
1619 inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
, op
[0], fs_reg(1)));
1620 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1623 case ir_binop_logic_xor
:
1624 inst
= emit(fs_inst(BRW_OPCODE_XOR
, reg_null_d
, op
[0], op
[1]));
1625 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1628 case ir_binop_logic_or
:
1629 inst
= emit(fs_inst(BRW_OPCODE_OR
, reg_null_d
, op
[0], op
[1]));
1630 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1633 case ir_binop_logic_and
:
1634 inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
, op
[0], op
[1]));
1635 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1639 if (intel
->gen
>= 6) {
1640 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
,
1641 op
[0], fs_reg(0.0f
)));
1643 inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_f
, op
[0]));
1645 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1649 if (intel
->gen
>= 6) {
1650 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_d
, op
[0], fs_reg(0)));
1652 inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_d
, op
[0]));
1654 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1657 case ir_binop_greater
:
1658 case ir_binop_gequal
:
1660 case ir_binop_lequal
:
1661 case ir_binop_equal
:
1662 case ir_binop_all_equal
:
1663 case ir_binop_nequal
:
1664 case ir_binop_any_nequal
:
1665 inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_cmp
, op
[0], op
[1]));
1666 inst
->conditional_mod
=
1667 brw_conditional_for_comparison(expr
->operation
);
1671 assert(!"not reached");
1680 if (intel
->gen
>= 6) {
1681 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_AND
, reg_null_d
,
1682 this->result
, fs_reg(1)));
1683 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1685 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_MOV
, reg_null_d
, this->result
));
1686 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1691 * Emit a gen6 IF statement with the comparison folded into the IF
1695 fs_visitor::emit_if_gen6(ir_if
*ir
)
1697 ir_expression
*expr
= ir
->condition
->as_expression();
1704 assert(expr
->get_num_operands() <= 2);
1705 for (unsigned int i
= 0; i
< expr
->get_num_operands(); i
++) {
1706 assert(expr
->operands
[i
]->type
->is_scalar());
1708 expr
->operands
[i
]->accept(this);
1709 op
[i
] = this->result
;
1712 switch (expr
->operation
) {
1713 case ir_unop_logic_not
:
1714 inst
= emit(fs_inst(BRW_OPCODE_IF
, temp
, op
[0], fs_reg(0)));
1715 inst
->conditional_mod
= BRW_CONDITIONAL_Z
;
1718 case ir_binop_logic_xor
:
1719 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, op
[0], op
[1]));
1720 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1723 case ir_binop_logic_or
:
1724 temp
= fs_reg(this, glsl_type::bool_type
);
1725 emit(fs_inst(BRW_OPCODE_OR
, temp
, op
[0], op
[1]));
1726 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, temp
, fs_reg(0)));
1727 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1730 case ir_binop_logic_and
:
1731 temp
= fs_reg(this, glsl_type::bool_type
);
1732 emit(fs_inst(BRW_OPCODE_AND
, temp
, op
[0], op
[1]));
1733 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, temp
, fs_reg(0)));
1734 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1738 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_f
, op
[0], fs_reg(0)));
1739 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1743 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, op
[0], fs_reg(0)));
1744 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1747 case ir_binop_greater
:
1748 case ir_binop_gequal
:
1750 case ir_binop_lequal
:
1751 case ir_binop_equal
:
1752 case ir_binop_all_equal
:
1753 case ir_binop_nequal
:
1754 case ir_binop_any_nequal
:
1755 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, op
[0], op
[1]));
1756 inst
->conditional_mod
=
1757 brw_conditional_for_comparison(expr
->operation
);
1760 assert(!"not reached");
1761 inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, op
[0], fs_reg(0)));
1762 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1769 ir
->condition
->accept(this);
1771 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_IF
, reg_null_d
, this->result
, fs_reg(0)));
1772 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1776 fs_visitor::visit(ir_if
*ir
)
1780 /* Don't point the annotation at the if statement, because then it plus
1781 * the then and else blocks get printed.
1783 this->base_ir
= ir
->condition
;
1785 if (intel
->gen
>= 6) {
1788 emit_bool_to_cond_code(ir
->condition
);
1790 inst
= emit(fs_inst(BRW_OPCODE_IF
));
1791 inst
->predicated
= true;
1794 foreach_iter(exec_list_iterator
, iter
, ir
->then_instructions
) {
1795 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1801 if (!ir
->else_instructions
.is_empty()) {
1802 emit(fs_inst(BRW_OPCODE_ELSE
));
1804 foreach_iter(exec_list_iterator
, iter
, ir
->else_instructions
) {
1805 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1812 emit(fs_inst(BRW_OPCODE_ENDIF
));
1816 fs_visitor::visit(ir_loop
*ir
)
1818 fs_reg counter
= reg_undef
;
1821 this->base_ir
= ir
->counter
;
1822 ir
->counter
->accept(this);
1823 counter
= *(variable_storage(ir
->counter
));
1826 this->base_ir
= ir
->from
;
1827 ir
->from
->accept(this);
1829 emit(fs_inst(BRW_OPCODE_MOV
, counter
, this->result
));
1833 emit(fs_inst(BRW_OPCODE_DO
));
1836 this->base_ir
= ir
->to
;
1837 ir
->to
->accept(this);
1839 fs_inst
*inst
= emit(fs_inst(BRW_OPCODE_CMP
, reg_null_cmp
,
1840 counter
, this->result
));
1841 inst
->conditional_mod
= brw_conditional_for_comparison(ir
->cmp
);
1843 inst
= emit(fs_inst(BRW_OPCODE_BREAK
));
1844 inst
->predicated
= true;
1847 foreach_iter(exec_list_iterator
, iter
, ir
->body_instructions
) {
1848 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1854 if (ir
->increment
) {
1855 this->base_ir
= ir
->increment
;
1856 ir
->increment
->accept(this);
1857 emit(fs_inst(BRW_OPCODE_ADD
, counter
, counter
, this->result
));
1860 emit(fs_inst(BRW_OPCODE_WHILE
));
1864 fs_visitor::visit(ir_loop_jump
*ir
)
1867 case ir_loop_jump::jump_break
:
1868 emit(fs_inst(BRW_OPCODE_BREAK
));
1870 case ir_loop_jump::jump_continue
:
1871 emit(fs_inst(BRW_OPCODE_CONTINUE
));
1877 fs_visitor::visit(ir_call
*ir
)
1879 assert(!"FINISHME");
1883 fs_visitor::visit(ir_return
*ir
)
1885 assert(!"FINISHME");
1889 fs_visitor::visit(ir_function
*ir
)
1891 /* Ignore function bodies other than main() -- we shouldn't see calls to
1892 * them since they should all be inlined before we get to ir_to_mesa.
1894 if (strcmp(ir
->name
, "main") == 0) {
1895 const ir_function_signature
*sig
;
1898 sig
= ir
->matching_signature(&empty
);
1902 foreach_iter(exec_list_iterator
, iter
, sig
->body
) {
1903 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
1912 fs_visitor::visit(ir_function_signature
*ir
)
1914 assert(!"not reached");
1919 fs_visitor::emit(fs_inst inst
)
1921 fs_inst
*list_inst
= new(mem_ctx
) fs_inst
;
1924 list_inst
->annotation
= this->current_annotation
;
1925 list_inst
->ir
= this->base_ir
;
1927 this->instructions
.push_tail(list_inst
);
1932 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1934 fs_visitor::emit_dummy_fs()
1936 /* Everyone's favorite color. */
1937 emit(fs_inst(BRW_OPCODE_MOV
,
1940 emit(fs_inst(BRW_OPCODE_MOV
,
1943 emit(fs_inst(BRW_OPCODE_MOV
,
1946 emit(fs_inst(BRW_OPCODE_MOV
,
1951 write
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
1954 write
->base_mrf
= 0;
1957 /* The register location here is relative to the start of the URB
1958 * data. It will get adjusted to be a real location before
1959 * generate_code() time.
1962 fs_visitor::interp_reg(int location
, int channel
)
1964 int regnr
= urb_setup
[location
] * 2 + channel
/ 2;
1965 int stride
= (channel
& 1) * 4;
1967 assert(urb_setup
[location
] != -1);
1969 return brw_vec1_grf(regnr
, stride
);
1972 /** Emits the interpolation for the varying inputs. */
1974 fs_visitor::emit_interpolation_setup_gen4()
1976 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
1978 this->current_annotation
= "compute pixel centers";
1979 this->pixel_x
= fs_reg(this, glsl_type::uint_type
);
1980 this->pixel_y
= fs_reg(this, glsl_type::uint_type
);
1981 this->pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
1982 this->pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
1983 emit(fs_inst(BRW_OPCODE_ADD
,
1985 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
1986 fs_reg(brw_imm_v(0x10101010))));
1987 emit(fs_inst(BRW_OPCODE_ADD
,
1989 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
1990 fs_reg(brw_imm_v(0x11001100))));
1992 this->current_annotation
= "compute pixel deltas from v0";
1994 this->delta_x
= fs_reg(this, glsl_type::vec2_type
);
1995 this->delta_y
= this->delta_x
;
1996 this->delta_y
.reg_offset
++;
1998 this->delta_x
= fs_reg(this, glsl_type::float_type
);
1999 this->delta_y
= fs_reg(this, glsl_type::float_type
);
2001 emit(fs_inst(BRW_OPCODE_ADD
,
2004 fs_reg(negate(brw_vec1_grf(1, 0)))));
2005 emit(fs_inst(BRW_OPCODE_ADD
,
2008 fs_reg(negate(brw_vec1_grf(1, 1)))));
2010 this->current_annotation
= "compute pos.w and 1/pos.w";
2011 /* Compute wpos.w. It's always in our setup, since it's needed to
2012 * interpolate the other attributes.
2014 this->wpos_w
= fs_reg(this, glsl_type::float_type
);
2015 emit(fs_inst(FS_OPCODE_LINTERP
, wpos_w
, this->delta_x
, this->delta_y
,
2016 interp_reg(FRAG_ATTRIB_WPOS
, 3)));
2017 /* Compute the pixel 1/W value from wpos.w. */
2018 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
2019 emit_math(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
);
2020 this->current_annotation
= NULL
;
2023 /** Emits the interpolation for the varying inputs. */
2025 fs_visitor::emit_interpolation_setup_gen6()
2027 struct brw_reg g1_uw
= retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW
);
2029 /* If the pixel centers end up used, the setup is the same as for gen4. */
2030 this->current_annotation
= "compute pixel centers";
2031 fs_reg int_pixel_x
= fs_reg(this, glsl_type::uint_type
);
2032 fs_reg int_pixel_y
= fs_reg(this, glsl_type::uint_type
);
2033 int_pixel_x
.type
= BRW_REGISTER_TYPE_UW
;
2034 int_pixel_y
.type
= BRW_REGISTER_TYPE_UW
;
2035 emit(fs_inst(BRW_OPCODE_ADD
,
2037 fs_reg(stride(suboffset(g1_uw
, 4), 2, 4, 0)),
2038 fs_reg(brw_imm_v(0x10101010))));
2039 emit(fs_inst(BRW_OPCODE_ADD
,
2041 fs_reg(stride(suboffset(g1_uw
, 5), 2, 4, 0)),
2042 fs_reg(brw_imm_v(0x11001100))));
2044 /* As of gen6, we can no longer mix float and int sources. We have
2045 * to turn the integer pixel centers into floats for their actual
2048 this->pixel_x
= fs_reg(this, glsl_type::float_type
);
2049 this->pixel_y
= fs_reg(this, glsl_type::float_type
);
2050 emit(fs_inst(BRW_OPCODE_MOV
, this->pixel_x
, int_pixel_x
));
2051 emit(fs_inst(BRW_OPCODE_MOV
, this->pixel_y
, int_pixel_y
));
2053 this->current_annotation
= "compute 1/pos.w";
2054 this->wpos_w
= fs_reg(brw_vec8_grf(c
->source_w_reg
, 0));
2055 this->pixel_w
= fs_reg(this, glsl_type::float_type
);
2056 emit_math(FS_OPCODE_RCP
, this->pixel_w
, wpos_w
);
2058 this->delta_x
= fs_reg(brw_vec8_grf(2, 0));
2059 this->delta_y
= fs_reg(brw_vec8_grf(3, 0));
2061 this->current_annotation
= NULL
;
2065 fs_visitor::emit_fb_writes()
2067 this->current_annotation
= "FB write header";
2068 GLboolean header_present
= GL_TRUE
;
2071 if (intel
->gen
>= 6 &&
2072 !this->kill_emitted
&&
2073 c
->key
.nr_color_regions
== 1) {
2074 header_present
= false;
2077 if (header_present
) {
2082 if (c
->aa_dest_stencil_reg
) {
2083 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
2084 fs_reg(brw_vec8_grf(c
->aa_dest_stencil_reg
, 0))));
2087 /* Reserve space for color. It'll be filled in per MRT below. */
2091 if (c
->source_depth_to_render_target
) {
2092 if (c
->computes_depth
) {
2093 /* Hand over gl_FragDepth. */
2094 assert(this->frag_depth
);
2095 fs_reg depth
= *(variable_storage(this->frag_depth
));
2097 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++), depth
));
2099 /* Pass through the payload depth. */
2100 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
2101 fs_reg(brw_vec8_grf(c
->source_depth_reg
, 0))));
2105 if (c
->dest_depth_reg
) {
2106 emit(fs_inst(BRW_OPCODE_MOV
, fs_reg(MRF
, nr
++),
2107 fs_reg(brw_vec8_grf(c
->dest_depth_reg
, 0))));
2110 fs_reg color
= reg_undef
;
2111 if (this->frag_color
)
2112 color
= *(variable_storage(this->frag_color
));
2113 else if (this->frag_data
) {
2114 color
= *(variable_storage(this->frag_data
));
2115 color
.type
= BRW_REGISTER_TYPE_F
;
2118 for (int target
= 0; target
< c
->key
.nr_color_regions
; target
++) {
2119 this->current_annotation
= ralloc_asprintf(this->mem_ctx
,
2120 "FB write target %d",
2122 if (this->frag_color
|| this->frag_data
) {
2123 for (int i
= 0; i
< 4; i
++) {
2124 emit(fs_inst(BRW_OPCODE_MOV
,
2125 fs_reg(MRF
, color_mrf
+ i
),
2131 if (this->frag_color
)
2132 color
.reg_offset
-= 4;
2134 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
2135 reg_undef
, reg_undef
));
2136 inst
->target
= target
;
2139 if (target
== c
->key
.nr_color_regions
- 1)
2141 inst
->header_present
= header_present
;
2144 if (c
->key
.nr_color_regions
== 0) {
2145 fs_inst
*inst
= emit(fs_inst(FS_OPCODE_FB_WRITE
,
2146 reg_undef
, reg_undef
));
2150 inst
->header_present
= header_present
;
2153 this->current_annotation
= NULL
;
2157 fs_visitor::generate_fb_write(fs_inst
*inst
)
2159 GLboolean eot
= inst
->eot
;
2160 struct brw_reg implied_header
;
2162 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
2165 brw_push_insn_state(p
);
2166 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2167 brw_set_compression_control(p
, BRW_COMPRESSION_NONE
);
2169 if (inst
->header_present
) {
2170 if (intel
->gen
>= 6) {
2172 brw_message_reg(inst
->base_mrf
),
2173 brw_vec8_grf(0, 0));
2175 if (inst
->target
> 0) {
2176 /* Set the render target index for choosing BLEND_STATE. */
2177 brw_MOV(p
, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 0, 2),
2178 BRW_REGISTER_TYPE_UD
),
2179 brw_imm_ud(inst
->target
));
2182 /* Clear viewport index, render target array index. */
2183 brw_AND(p
, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE
, 0, 0),
2184 BRW_REGISTER_TYPE_UD
),
2185 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD
),
2186 brw_imm_ud(0xf7ff));
2188 implied_header
= brw_null_reg();
2190 implied_header
= retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW
);
2194 brw_message_reg(inst
->base_mrf
+ 1),
2195 brw_vec8_grf(1, 0));
2197 implied_header
= brw_null_reg();
2200 brw_pop_insn_state(p
);
2203 8, /* dispatch_width */
2204 retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW
),
2211 inst
->header_present
);
2215 fs_visitor::generate_linterp(fs_inst
*inst
,
2216 struct brw_reg dst
, struct brw_reg
*src
)
2218 struct brw_reg delta_x
= src
[0];
2219 struct brw_reg delta_y
= src
[1];
2220 struct brw_reg interp
= src
[2];
2223 delta_y
.nr
== delta_x
.nr
+ 1 &&
2224 (intel
->gen
>= 6 || (delta_x
.nr
& 1) == 0)) {
2225 brw_PLN(p
, dst
, interp
, delta_x
);
2227 brw_LINE(p
, brw_null_reg(), interp
, delta_x
);
2228 brw_MAC(p
, dst
, suboffset(interp
, 1), delta_y
);
2233 fs_visitor::generate_math(fs_inst
*inst
,
2234 struct brw_reg dst
, struct brw_reg
*src
)
2238 switch (inst
->opcode
) {
2240 op
= BRW_MATH_FUNCTION_INV
;
2243 op
= BRW_MATH_FUNCTION_RSQ
;
2245 case FS_OPCODE_SQRT
:
2246 op
= BRW_MATH_FUNCTION_SQRT
;
2248 case FS_OPCODE_EXP2
:
2249 op
= BRW_MATH_FUNCTION_EXP
;
2251 case FS_OPCODE_LOG2
:
2252 op
= BRW_MATH_FUNCTION_LOG
;
2255 op
= BRW_MATH_FUNCTION_POW
;
2258 op
= BRW_MATH_FUNCTION_SIN
;
2261 op
= BRW_MATH_FUNCTION_COS
;
2264 assert(!"not reached: unknown math function");
2269 if (intel
->gen
>= 6) {
2270 assert(inst
->mlen
== 0);
2272 if (inst
->opcode
== FS_OPCODE_POW
) {
2273 brw_math2(p
, dst
, op
, src
[0], src
[1]);
2277 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2278 BRW_MATH_SATURATE_NONE
,
2280 BRW_MATH_DATA_VECTOR
,
2281 BRW_MATH_PRECISION_FULL
);
2284 assert(inst
->mlen
>= 1);
2288 inst
->saturate
? BRW_MATH_SATURATE_SATURATE
:
2289 BRW_MATH_SATURATE_NONE
,
2290 inst
->base_mrf
, src
[0],
2291 BRW_MATH_DATA_VECTOR
,
2292 BRW_MATH_PRECISION_FULL
);
2297 fs_visitor::generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2301 uint32_t simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD8
;
2303 if (intel
->gen
>= 5) {
2304 switch (inst
->opcode
) {
2306 if (inst
->shadow_compare
) {
2307 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5
;
2309 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_GEN5
;
2313 if (inst
->shadow_compare
) {
2314 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE_GEN5
;
2316 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5
;
2320 if (inst
->shadow_compare
) {
2321 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE_GEN5
;
2323 msg_type
= BRW_SAMPLER_MESSAGE_SAMPLE_LOD_GEN5
;
2327 assert(!"TXD isn't supported on gen5+ yet.");
2331 switch (inst
->opcode
) {
2333 /* Note that G45 and older determines shadow compare and dispatch width
2334 * from message length for most messages.
2336 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE
;
2337 if (inst
->shadow_compare
) {
2338 assert(inst
->mlen
== 6);
2340 assert(inst
->mlen
<= 4);
2344 if (inst
->shadow_compare
) {
2345 assert(inst
->mlen
== 6);
2346 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE
;
2348 assert(inst
->mlen
== 9);
2349 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS
;
2350 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2354 if (inst
->shadow_compare
) {
2355 assert(inst
->mlen
== 6);
2356 msg_type
= BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE
;
2358 assert(inst
->mlen
== 9);
2359 msg_type
= BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD
;
2360 simd_mode
= BRW_SAMPLER_SIMD_MODE_SIMD16
;
2364 assert(!"TXD isn't supported on gen4 yet.");
2368 assert(msg_type
!= -1);
2370 if (simd_mode
== BRW_SAMPLER_SIMD_MODE_SIMD16
) {
2376 retype(dst
, BRW_REGISTER_TYPE_UW
),
2379 SURF_INDEX_TEXTURE(inst
->sampler
),
2391 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
2394 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
2396 * and we're trying to produce:
2399 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
2400 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
2401 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
2402 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
2403 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
2404 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
2405 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
2406 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
2408 * and add another set of two more subspans if in 16-pixel dispatch mode.
2410 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
2411 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
2412 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
2413 * between each other. We could probably do it like ddx and swizzle the right
2414 * order later, but bail for now and just produce
2415 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
2418 fs_visitor::generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2420 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 1,
2421 BRW_REGISTER_TYPE_F
,
2422 BRW_VERTICAL_STRIDE_2
,
2424 BRW_HORIZONTAL_STRIDE_0
,
2425 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2426 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 0,
2427 BRW_REGISTER_TYPE_F
,
2428 BRW_VERTICAL_STRIDE_2
,
2430 BRW_HORIZONTAL_STRIDE_0
,
2431 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2432 brw_ADD(p
, dst
, src0
, negate(src1
));
2436 fs_visitor::generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
)
2438 struct brw_reg src0
= brw_reg(src
.file
, src
.nr
, 0,
2439 BRW_REGISTER_TYPE_F
,
2440 BRW_VERTICAL_STRIDE_4
,
2442 BRW_HORIZONTAL_STRIDE_0
,
2443 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2444 struct brw_reg src1
= brw_reg(src
.file
, src
.nr
, 2,
2445 BRW_REGISTER_TYPE_F
,
2446 BRW_VERTICAL_STRIDE_4
,
2448 BRW_HORIZONTAL_STRIDE_0
,
2449 BRW_SWIZZLE_XYZW
, WRITEMASK_XYZW
);
2450 brw_ADD(p
, dst
, src0
, negate(src1
));
2454 fs_visitor::generate_discard_not(fs_inst
*inst
, struct brw_reg mask
)
2456 if (intel
->gen
>= 6) {
2457 /* Gen6 no longer has the mask reg for us to just read the
2458 * active channels from. However, cmp updates just the channels
2459 * of the flag reg that are enabled, so we can get at the
2460 * channel enables that way. In this step, make a reg of ones
2463 brw_MOV(p
, mask
, brw_imm_ud(1));
2465 brw_push_insn_state(p
);
2466 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2467 brw_NOT(p
, mask
, brw_mask_reg(1)); /* IMASK */
2468 brw_pop_insn_state(p
);
2473 fs_visitor::generate_discard_and(fs_inst
*inst
, struct brw_reg mask
)
2475 if (intel
->gen
>= 6) {
2476 struct brw_reg f0
= brw_flag_reg();
2477 struct brw_reg g1
= retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW
);
2479 brw_push_insn_state(p
);
2480 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2481 brw_MOV(p
, f0
, brw_imm_uw(0xffff)); /* inactive channels undiscarded */
2482 brw_pop_insn_state(p
);
2484 brw_CMP(p
, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
),
2485 BRW_CONDITIONAL_Z
, mask
, brw_imm_ud(0)); /* active channels fail test */
2486 /* Undo CMP's whacking of predication*/
2487 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
2489 brw_push_insn_state(p
);
2490 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2491 brw_AND(p
, g1
, f0
, g1
);
2492 brw_pop_insn_state(p
);
2494 struct brw_reg g0
= retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW
);
2496 mask
= brw_uw1_reg(mask
.file
, mask
.nr
, 0);
2498 brw_push_insn_state(p
);
2499 brw_set_mask_control(p
, BRW_MASK_DISABLE
);
2500 brw_AND(p
, g0
, mask
, g0
);
2501 brw_pop_insn_state(p
);
2506 fs_visitor::generate_spill(fs_inst
*inst
, struct brw_reg src
)
2508 assert(inst
->mlen
!= 0);
2511 retype(brw_message_reg(inst
->base_mrf
+ 1), BRW_REGISTER_TYPE_UD
),
2512 retype(src
, BRW_REGISTER_TYPE_UD
));
2513 brw_oword_block_write_scratch(p
, brw_message_reg(inst
->base_mrf
), 1,
2518 fs_visitor::generate_unspill(fs_inst
*inst
, struct brw_reg dst
)
2520 assert(inst
->mlen
!= 0);
2522 /* Clear any post destination dependencies that would be ignored by
2523 * the block read. See the B-Spec for pre-gen5 send instruction.
2525 * This could use a better solution, since texture sampling and
2526 * math reads could potentially run into it as well -- anywhere
2527 * that we have a SEND with a destination that is a register that
2528 * was written but not read within the last N instructions (what's
2529 * N? unsure). This is rare because of dead code elimination, but
2532 if (intel
->gen
== 4 && !intel
->is_g4x
)
2533 brw_MOV(p
, brw_null_reg(), dst
);
2535 brw_oword_block_read_scratch(p
, dst
, brw_message_reg(inst
->base_mrf
), 1,
2538 if (intel
->gen
== 4 && !intel
->is_g4x
) {
2539 /* gen4 errata: destination from a send can't be used as a
2540 * destination until it's been read. Just read it so we don't
2543 brw_MOV(p
, brw_null_reg(), dst
);
2549 fs_visitor::generate_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
)
2551 assert(inst
->mlen
!= 0);
2553 /* Clear any post destination dependencies that would be ignored by
2554 * the block read. See the B-Spec for pre-gen5 send instruction.
2556 * This could use a better solution, since texture sampling and
2557 * math reads could potentially run into it as well -- anywhere
2558 * that we have a SEND with a destination that is a register that
2559 * was written but not read within the last N instructions (what's
2560 * N? unsure). This is rare because of dead code elimination, but
2563 if (intel
->gen
== 4 && !intel
->is_g4x
)
2564 brw_MOV(p
, brw_null_reg(), dst
);
2566 brw_oword_block_read(p
, dst
, brw_message_reg(inst
->base_mrf
),
2567 inst
->offset
, SURF_INDEX_FRAG_CONST_BUFFER
);
2569 if (intel
->gen
== 4 && !intel
->is_g4x
) {
2570 /* gen4 errata: destination from a send can't be used as a
2571 * destination until it's been read. Just read it so we don't
2574 brw_MOV(p
, brw_null_reg(), dst
);
2579 * To be called after the last _mesa_add_state_reference() call, to
2580 * set up prog_data.param[] for assign_curb_setup() and
2581 * setup_pull_constants().
2584 fs_visitor::setup_paramvalues_refs()
2586 /* Set up the pointers to ParamValues now that that array is finalized. */
2587 for (unsigned int i
= 0; i
< c
->prog_data
.nr_params
; i
++) {
2588 c
->prog_data
.param
[i
] =
2589 fp
->Base
.Parameters
->ParameterValues
[this->param_index
[i
]] +
2590 this->param_offset
[i
];
2595 fs_visitor::assign_curb_setup()
2597 c
->prog_data
.first_curbe_grf
= c
->nr_payload_regs
;
2598 c
->prog_data
.curb_read_length
= ALIGN(c
->prog_data
.nr_params
, 8) / 8;
2600 /* Map the offsets in the UNIFORM file to fixed HW regs. */
2601 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2602 fs_inst
*inst
= (fs_inst
*)iter
.get();
2604 for (unsigned int i
= 0; i
< 3; i
++) {
2605 if (inst
->src
[i
].file
== UNIFORM
) {
2606 int constant_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
2607 struct brw_reg brw_reg
= brw_vec1_grf(c
->prog_data
.first_curbe_grf
+
2611 inst
->src
[i
].file
= FIXED_HW_REG
;
2612 inst
->src
[i
].fixed_hw_reg
= retype(brw_reg
, inst
->src
[i
].type
);
2619 fs_visitor::calculate_urb_setup()
2621 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2626 /* Figure out where each of the incoming setup attributes lands. */
2627 if (intel
->gen
>= 6) {
2628 for (unsigned int i
= 0; i
< FRAG_ATTRIB_MAX
; i
++) {
2629 if (brw
->fragment_program
->Base
.InputsRead
& BITFIELD64_BIT(i
)) {
2630 urb_setup
[i
] = urb_next
++;
2634 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
2635 for (unsigned int i
= 0; i
< VERT_RESULT_MAX
; i
++) {
2636 if (c
->key
.vp_outputs_written
& BITFIELD64_BIT(i
)) {
2639 if (i
>= VERT_RESULT_VAR0
)
2640 fp_index
= i
- (VERT_RESULT_VAR0
- FRAG_ATTRIB_VAR0
);
2641 else if (i
<= VERT_RESULT_TEX7
)
2647 urb_setup
[fp_index
] = urb_next
++;
2652 /* Each attribute is 4 setup channels, each of which is half a reg. */
2653 c
->prog_data
.urb_read_length
= urb_next
* 2;
2657 fs_visitor::assign_urb_setup()
2659 int urb_start
= c
->prog_data
.first_curbe_grf
+ c
->prog_data
.curb_read_length
;
2661 /* Offset all the urb_setup[] index by the actual position of the
2662 * setup regs, now that the location of the constants has been chosen.
2664 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2665 fs_inst
*inst
= (fs_inst
*)iter
.get();
2667 if (inst
->opcode
== FS_OPCODE_LINTERP
) {
2668 assert(inst
->src
[2].file
== FIXED_HW_REG
);
2669 inst
->src
[2].fixed_hw_reg
.nr
+= urb_start
;
2672 if (inst
->opcode
== FS_OPCODE_CINTERP
) {
2673 assert(inst
->src
[0].file
== FIXED_HW_REG
);
2674 inst
->src
[0].fixed_hw_reg
.nr
+= urb_start
;
2678 this->first_non_payload_grf
= urb_start
+ c
->prog_data
.urb_read_length
;
2682 * Split large virtual GRFs into separate components if we can.
2684 * This is mostly duplicated with what brw_fs_vector_splitting does,
2685 * but that's really conservative because it's afraid of doing
2686 * splitting that doesn't result in real progress after the rest of
2687 * the optimization phases, which would cause infinite looping in
2688 * optimization. We can do it once here, safely. This also has the
2689 * opportunity to split interpolated values, or maybe even uniforms,
2690 * which we don't have at the IR level.
2692 * We want to split, because virtual GRFs are what we register
2693 * allocate and spill (due to contiguousness requirements for some
2694 * instructions), and they're what we naturally generate in the
2695 * codegen process, but most virtual GRFs don't actually need to be
2696 * contiguous sets of GRFs. If we split, we'll end up with reduced
2697 * live intervals and better dead code elimination and coalescing.
2700 fs_visitor::split_virtual_grfs()
2702 int num_vars
= this->virtual_grf_next
;
2703 bool split_grf
[num_vars
];
2704 int new_virtual_grf
[num_vars
];
2706 /* Try to split anything > 0 sized. */
2707 for (int i
= 0; i
< num_vars
; i
++) {
2708 if (this->virtual_grf_sizes
[i
] != 1)
2709 split_grf
[i
] = true;
2711 split_grf
[i
] = false;
2715 /* PLN opcodes rely on the delta_xy being contiguous. */
2716 split_grf
[this->delta_x
.reg
] = false;
2719 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2720 fs_inst
*inst
= (fs_inst
*)iter
.get();
2722 /* Texturing produces 4 contiguous registers, so no splitting. */
2723 if (inst
->is_tex()) {
2724 split_grf
[inst
->dst
.reg
] = false;
2728 /* Allocate new space for split regs. Note that the virtual
2729 * numbers will be contiguous.
2731 for (int i
= 0; i
< num_vars
; i
++) {
2733 new_virtual_grf
[i
] = virtual_grf_alloc(1);
2734 for (int j
= 2; j
< this->virtual_grf_sizes
[i
]; j
++) {
2735 int reg
= virtual_grf_alloc(1);
2736 assert(reg
== new_virtual_grf
[i
] + j
- 1);
2739 this->virtual_grf_sizes
[i
] = 1;
2743 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2744 fs_inst
*inst
= (fs_inst
*)iter
.get();
2746 if (inst
->dst
.file
== GRF
&&
2747 split_grf
[inst
->dst
.reg
] &&
2748 inst
->dst
.reg_offset
!= 0) {
2749 inst
->dst
.reg
= (new_virtual_grf
[inst
->dst
.reg
] +
2750 inst
->dst
.reg_offset
- 1);
2751 inst
->dst
.reg_offset
= 0;
2753 for (int i
= 0; i
< 3; i
++) {
2754 if (inst
->src
[i
].file
== GRF
&&
2755 split_grf
[inst
->src
[i
].reg
] &&
2756 inst
->src
[i
].reg_offset
!= 0) {
2757 inst
->src
[i
].reg
= (new_virtual_grf
[inst
->src
[i
].reg
] +
2758 inst
->src
[i
].reg_offset
- 1);
2759 inst
->src
[i
].reg_offset
= 0;
2763 this->live_intervals_valid
= false;
2767 * Choose accesses from the UNIFORM file to demote to using the pull
2770 * We allow a fragment shader to have more than the specified minimum
2771 * maximum number of fragment shader uniform components (64). If
2772 * there are too many of these, they'd fill up all of register space.
2773 * So, this will push some of them out to the pull constant buffer and
2774 * update the program to load them.
2777 fs_visitor::setup_pull_constants()
2779 /* Only allow 16 registers (128 uniform components) as push constants. */
2780 unsigned int max_uniform_components
= 16 * 8;
2781 if (c
->prog_data
.nr_params
<= max_uniform_components
)
2784 /* Just demote the end of the list. We could probably do better
2785 * here, demoting things that are rarely used in the program first.
2787 int pull_uniform_base
= max_uniform_components
;
2788 int pull_uniform_count
= c
->prog_data
.nr_params
- pull_uniform_base
;
2790 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2791 fs_inst
*inst
= (fs_inst
*)iter
.get();
2793 for (int i
= 0; i
< 3; i
++) {
2794 if (inst
->src
[i
].file
!= UNIFORM
)
2797 int uniform_nr
= inst
->src
[i
].hw_reg
+ inst
->src
[i
].reg_offset
;
2798 if (uniform_nr
< pull_uniform_base
)
2801 fs_reg dst
= fs_reg(this, glsl_type::float_type
);
2802 fs_inst
*pull
= new(mem_ctx
) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD
,
2804 pull
->offset
= ((uniform_nr
- pull_uniform_base
) * 4) & ~15;
2805 pull
->ir
= inst
->ir
;
2806 pull
->annotation
= inst
->annotation
;
2807 pull
->base_mrf
= 14;
2810 inst
->insert_before(pull
);
2812 inst
->src
[i
].file
= GRF
;
2813 inst
->src
[i
].reg
= dst
.reg
;
2814 inst
->src
[i
].reg_offset
= 0;
2815 inst
->src
[i
].smear
= (uniform_nr
- pull_uniform_base
) & 3;
2819 for (int i
= 0; i
< pull_uniform_count
; i
++) {
2820 c
->prog_data
.pull_param
[i
] = c
->prog_data
.param
[pull_uniform_base
+ i
];
2821 c
->prog_data
.pull_param_convert
[i
] =
2822 c
->prog_data
.param_convert
[pull_uniform_base
+ i
];
2824 c
->prog_data
.nr_params
-= pull_uniform_count
;
2825 c
->prog_data
.nr_pull_params
= pull_uniform_count
;
2829 fs_visitor::calculate_live_intervals()
2831 int num_vars
= this->virtual_grf_next
;
2832 int *def
= ralloc_array(mem_ctx
, int, num_vars
);
2833 int *use
= ralloc_array(mem_ctx
, int, num_vars
);
2836 int bb_header_ip
= 0;
2838 if (this->live_intervals_valid
)
2841 for (int i
= 0; i
< num_vars
; i
++) {
2842 def
[i
] = MAX_INSTRUCTION
;
2847 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2848 fs_inst
*inst
= (fs_inst
*)iter
.get();
2850 if (inst
->opcode
== BRW_OPCODE_DO
) {
2851 if (loop_depth
++ == 0)
2853 } else if (inst
->opcode
== BRW_OPCODE_WHILE
) {
2856 if (loop_depth
== 0) {
2857 /* Patches up the use of vars marked for being live across
2860 for (int i
= 0; i
< num_vars
; i
++) {
2861 if (use
[i
] == loop_start
) {
2867 for (unsigned int i
= 0; i
< 3; i
++) {
2868 if (inst
->src
[i
].file
== GRF
&& inst
->src
[i
].reg
!= 0) {
2869 int reg
= inst
->src
[i
].reg
;
2871 if (!loop_depth
|| (this->virtual_grf_sizes
[reg
] == 1 &&
2872 def
[reg
] >= bb_header_ip
)) {
2875 def
[reg
] = MIN2(loop_start
, def
[reg
]);
2876 use
[reg
] = loop_start
;
2878 /* Nobody else is going to go smash our start to
2879 * later in the loop now, because def[reg] now
2880 * points before the bb header.
2885 if (inst
->dst
.file
== GRF
&& inst
->dst
.reg
!= 0) {
2886 int reg
= inst
->dst
.reg
;
2888 if (!loop_depth
|| (this->virtual_grf_sizes
[reg
] == 1 &&
2889 !inst
->predicated
)) {
2890 def
[reg
] = MIN2(def
[reg
], ip
);
2892 def
[reg
] = MIN2(def
[reg
], loop_start
);
2899 /* Set the basic block header IP. This is used for determining
2900 * if a complete def of single-register virtual GRF in a loop
2901 * dominates a use in the same basic block. It's a quick way to
2902 * reduce the live interval range of most register used in a
2905 if (inst
->opcode
== BRW_OPCODE_IF
||
2906 inst
->opcode
== BRW_OPCODE_ELSE
||
2907 inst
->opcode
== BRW_OPCODE_ENDIF
||
2908 inst
->opcode
== BRW_OPCODE_DO
||
2909 inst
->opcode
== BRW_OPCODE_WHILE
||
2910 inst
->opcode
== BRW_OPCODE_BREAK
||
2911 inst
->opcode
== BRW_OPCODE_CONTINUE
) {
2916 ralloc_free(this->virtual_grf_def
);
2917 ralloc_free(this->virtual_grf_use
);
2918 this->virtual_grf_def
= def
;
2919 this->virtual_grf_use
= use
;
2921 this->live_intervals_valid
= true;
2925 * Attempts to move immediate constants into the immediate
2926 * constant slot of following instructions.
2928 * Immediate constants are a bit tricky -- they have to be in the last
2929 * operand slot, you can't do abs/negate on them,
2933 fs_visitor::propagate_constants()
2935 bool progress
= false;
2937 calculate_live_intervals();
2939 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
2940 fs_inst
*inst
= (fs_inst
*)iter
.get();
2942 if (inst
->opcode
!= BRW_OPCODE_MOV
||
2944 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= IMM
||
2945 inst
->dst
.type
!= inst
->src
[0].type
)
2948 /* Don't bother with cases where we should have had the
2949 * operation on the constant folded in GLSL already.
2954 /* Found a move of a constant to a GRF. Find anything else using the GRF
2955 * before it's written, and replace it with the constant if we can.
2957 exec_list_iterator scan_iter
= iter
;
2959 for (; scan_iter
.has_next(); scan_iter
.next()) {
2960 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
2962 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
2963 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
2964 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
2965 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
2969 for (int i
= 2; i
>= 0; i
--) {
2970 if (scan_inst
->src
[i
].file
!= GRF
||
2971 scan_inst
->src
[i
].reg
!= inst
->dst
.reg
||
2972 scan_inst
->src
[i
].reg_offset
!= inst
->dst
.reg_offset
)
2975 /* Don't bother with cases where we should have had the
2976 * operation on the constant folded in GLSL already.
2978 if (scan_inst
->src
[i
].negate
|| scan_inst
->src
[i
].abs
)
2981 switch (scan_inst
->opcode
) {
2982 case BRW_OPCODE_MOV
:
2983 scan_inst
->src
[i
] = inst
->src
[0];
2987 case BRW_OPCODE_MUL
:
2988 case BRW_OPCODE_ADD
:
2990 scan_inst
->src
[i
] = inst
->src
[0];
2992 } else if (i
== 0 && scan_inst
->src
[1].file
!= IMM
) {
2993 /* Fit this constant in by commuting the operands */
2994 scan_inst
->src
[0] = scan_inst
->src
[1];
2995 scan_inst
->src
[1] = inst
->src
[0];
2999 case BRW_OPCODE_CMP
:
3000 case BRW_OPCODE_SEL
:
3002 scan_inst
->src
[i
] = inst
->src
[0];
3008 if (scan_inst
->dst
.file
== GRF
&&
3009 scan_inst
->dst
.reg
== inst
->dst
.reg
&&
3010 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
3011 scan_inst
->is_tex())) {
3018 this->live_intervals_valid
= false;
3023 * Must be called after calculate_live_intervales() to remove unused
3024 * writes to registers -- register allocation will fail otherwise
3025 * because something deffed but not used won't be considered to
3026 * interfere with other regs.
3029 fs_visitor::dead_code_eliminate()
3031 bool progress
= false;
3034 calculate_live_intervals();
3036 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3037 fs_inst
*inst
= (fs_inst
*)iter
.get();
3039 if (inst
->dst
.file
== GRF
&& this->virtual_grf_use
[inst
->dst
.reg
] <= pc
) {
3048 live_intervals_valid
= false;
3054 fs_visitor::register_coalesce()
3056 bool progress
= false;
3060 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3061 fs_inst
*inst
= (fs_inst
*)iter
.get();
3063 /* Make sure that we dominate the instructions we're going to
3064 * scan for interfering with our coalescing, or we won't have
3065 * scanned enough to see if anything interferes with our
3066 * coalescing. We don't dominate the following instructions if
3067 * we're in a loop or an if block.
3069 switch (inst
->opcode
) {
3073 case BRW_OPCODE_WHILE
:
3079 case BRW_OPCODE_ENDIF
:
3083 if (loop_depth
|| if_depth
)
3086 if (inst
->opcode
!= BRW_OPCODE_MOV
||
3089 inst
->dst
.file
!= GRF
|| inst
->src
[0].file
!= GRF
||
3090 inst
->dst
.type
!= inst
->src
[0].type
)
3093 bool has_source_modifiers
= inst
->src
[0].abs
|| inst
->src
[0].negate
;
3095 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
3096 * them: check for no writes to either one until the exit of the
3099 bool interfered
= false;
3100 exec_list_iterator scan_iter
= iter
;
3102 for (; scan_iter
.has_next(); scan_iter
.next()) {
3103 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
3105 if (scan_inst
->dst
.file
== GRF
) {
3106 if (scan_inst
->dst
.reg
== inst
->dst
.reg
&&
3107 (scan_inst
->dst
.reg_offset
== inst
->dst
.reg_offset
||
3108 scan_inst
->is_tex())) {
3112 if (scan_inst
->dst
.reg
== inst
->src
[0].reg
&&
3113 (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
||
3114 scan_inst
->is_tex())) {
3120 /* The gen6 MATH instruction can't handle source modifiers, so avoid
3121 * coalescing those for now. We should do something more specific.
3123 if (intel
->gen
== 6 && scan_inst
->is_math() && has_source_modifiers
) {
3132 /* Rewrite the later usage to point at the source of the move to
3135 for (exec_list_iterator scan_iter
= iter
; scan_iter
.has_next();
3137 fs_inst
*scan_inst
= (fs_inst
*)scan_iter
.get();
3139 for (int i
= 0; i
< 3; i
++) {
3140 if (scan_inst
->src
[i
].file
== GRF
&&
3141 scan_inst
->src
[i
].reg
== inst
->dst
.reg
&&
3142 scan_inst
->src
[i
].reg_offset
== inst
->dst
.reg_offset
) {
3143 scan_inst
->src
[i
].reg
= inst
->src
[0].reg
;
3144 scan_inst
->src
[i
].reg_offset
= inst
->src
[0].reg_offset
;
3145 scan_inst
->src
[i
].abs
|= inst
->src
[0].abs
;
3146 scan_inst
->src
[i
].negate
^= inst
->src
[0].negate
;
3147 scan_inst
->src
[i
].smear
= inst
->src
[0].smear
;
3157 live_intervals_valid
= false;
3164 fs_visitor::compute_to_mrf()
3166 bool progress
= false;
3169 calculate_live_intervals();
3171 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3172 fs_inst
*inst
= (fs_inst
*)iter
.get();
3177 if (inst
->opcode
!= BRW_OPCODE_MOV
||
3179 inst
->dst
.file
!= MRF
|| inst
->src
[0].file
!= GRF
||
3180 inst
->dst
.type
!= inst
->src
[0].type
||
3181 inst
->src
[0].abs
|| inst
->src
[0].negate
|| inst
->src
[0].smear
!= -1)
3184 /* Can't compute-to-MRF this GRF if someone else was going to
3187 if (this->virtual_grf_use
[inst
->src
[0].reg
] > ip
)
3190 /* Found a move of a GRF to a MRF. Let's see if we can go
3191 * rewrite the thing that made this GRF to write into the MRF.
3194 for (scan_inst
= (fs_inst
*)inst
->prev
;
3195 scan_inst
->prev
!= NULL
;
3196 scan_inst
= (fs_inst
*)scan_inst
->prev
) {
3197 if (scan_inst
->dst
.file
== GRF
&&
3198 scan_inst
->dst
.reg
== inst
->src
[0].reg
) {
3199 /* Found the last thing to write our reg we want to turn
3200 * into a compute-to-MRF.
3203 if (scan_inst
->is_tex()) {
3204 /* texturing writes several continuous regs, so we can't
3205 * compute-to-mrf that.
3210 /* If it's predicated, it (probably) didn't populate all
3213 if (scan_inst
->predicated
)
3216 /* SEND instructions can't have MRF as a destination. */
3217 if (scan_inst
->mlen
)
3220 if (intel
->gen
>= 6) {
3221 /* gen6 math instructions must have the destination be
3222 * GRF, so no compute-to-MRF for them.
3224 if (scan_inst
->is_math()) {
3229 if (scan_inst
->dst
.reg_offset
== inst
->src
[0].reg_offset
) {
3230 /* Found the creator of our MRF's source value. */
3231 scan_inst
->dst
.file
= MRF
;
3232 scan_inst
->dst
.hw_reg
= inst
->dst
.hw_reg
;
3233 scan_inst
->saturate
|= inst
->saturate
;
3240 /* We don't handle flow control here. Most computation of
3241 * values that end up in MRFs are shortly before the MRF
3244 if (scan_inst
->opcode
== BRW_OPCODE_DO
||
3245 scan_inst
->opcode
== BRW_OPCODE_WHILE
||
3246 scan_inst
->opcode
== BRW_OPCODE_ELSE
||
3247 scan_inst
->opcode
== BRW_OPCODE_ENDIF
) {
3251 /* You can't read from an MRF, so if someone else reads our
3252 * MRF's source GRF that we wanted to rewrite, that stops us.
3254 bool interfered
= false;
3255 for (int i
= 0; i
< 3; i
++) {
3256 if (scan_inst
->src
[i
].file
== GRF
&&
3257 scan_inst
->src
[i
].reg
== inst
->src
[0].reg
&&
3258 scan_inst
->src
[i
].reg_offset
== inst
->src
[0].reg_offset
) {
3265 if (scan_inst
->dst
.file
== MRF
&&
3266 scan_inst
->dst
.hw_reg
== inst
->dst
.hw_reg
) {
3267 /* Somebody else wrote our MRF here, so we can't can't
3268 * compute-to-MRF before that.
3273 if (scan_inst
->mlen
> 0) {
3274 /* Found a SEND instruction, which means that there are
3275 * live values in MRFs from base_mrf to base_mrf +
3276 * scan_inst->mlen - 1. Don't go pushing our MRF write up
3279 if (inst
->dst
.hw_reg
>= scan_inst
->base_mrf
&&
3280 inst
->dst
.hw_reg
< scan_inst
->base_mrf
+ scan_inst
->mlen
) {
3291 * Walks through basic blocks, locking for repeated MRF writes and
3292 * removing the later ones.
3295 fs_visitor::remove_duplicate_mrf_writes()
3297 fs_inst
*last_mrf_move
[16];
3298 bool progress
= false;
3300 memset(last_mrf_move
, 0, sizeof(last_mrf_move
));
3302 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3303 fs_inst
*inst
= (fs_inst
*)iter
.get();
3305 switch (inst
->opcode
) {
3307 case BRW_OPCODE_WHILE
:
3309 case BRW_OPCODE_ELSE
:
3310 case BRW_OPCODE_ENDIF
:
3311 memset(last_mrf_move
, 0, sizeof(last_mrf_move
));
3317 if (inst
->opcode
== BRW_OPCODE_MOV
&&
3318 inst
->dst
.file
== MRF
) {
3319 fs_inst
*prev_inst
= last_mrf_move
[inst
->dst
.hw_reg
];
3320 if (prev_inst
&& inst
->equals(prev_inst
)) {
3327 /* Clear out the last-write records for MRFs that were overwritten. */
3328 if (inst
->dst
.file
== MRF
) {
3329 last_mrf_move
[inst
->dst
.hw_reg
] = NULL
;
3332 if (inst
->mlen
> 0) {
3333 /* Found a SEND instruction, which will include two or fewer
3334 * implied MRF writes. We could do better here.
3336 for (int i
= 0; i
< implied_mrf_writes(inst
); i
++) {
3337 last_mrf_move
[inst
->base_mrf
+ i
] = NULL
;
3341 /* Clear out any MRF move records whose sources got overwritten. */
3342 if (inst
->dst
.file
== GRF
) {
3343 for (unsigned int i
= 0; i
< Elements(last_mrf_move
); i
++) {
3344 if (last_mrf_move
[i
] &&
3345 last_mrf_move
[i
]->src
[0].reg
== inst
->dst
.reg
) {
3346 last_mrf_move
[i
] = NULL
;
3351 if (inst
->opcode
== BRW_OPCODE_MOV
&&
3352 inst
->dst
.file
== MRF
&&
3353 inst
->src
[0].file
== GRF
&&
3354 !inst
->predicated
) {
3355 last_mrf_move
[inst
->dst
.hw_reg
] = inst
;
3363 fs_visitor::virtual_grf_interferes(int a
, int b
)
3365 int start
= MAX2(this->virtual_grf_def
[a
], this->virtual_grf_def
[b
]);
3366 int end
= MIN2(this->virtual_grf_use
[a
], this->virtual_grf_use
[b
]);
3368 /* We can't handle dead register writes here, without iterating
3369 * over the whole instruction stream to find every single dead
3370 * write to that register to compare to the live interval of the
3371 * other register. Just assert that dead_code_eliminate() has been
3374 assert((this->virtual_grf_use
[a
] != -1 ||
3375 this->virtual_grf_def
[a
] == MAX_INSTRUCTION
) &&
3376 (this->virtual_grf_use
[b
] != -1 ||
3377 this->virtual_grf_def
[b
] == MAX_INSTRUCTION
));
3382 static struct brw_reg
brw_reg_from_fs_reg(fs_reg
*reg
)
3384 struct brw_reg brw_reg
;
3386 switch (reg
->file
) {
3390 if (reg
->smear
== -1) {
3391 brw_reg
= brw_vec8_reg(reg
->file
,
3394 brw_reg
= brw_vec1_reg(reg
->file
,
3395 reg
->hw_reg
, reg
->smear
);
3397 brw_reg
= retype(brw_reg
, reg
->type
);
3400 switch (reg
->type
) {
3401 case BRW_REGISTER_TYPE_F
:
3402 brw_reg
= brw_imm_f(reg
->imm
.f
);
3404 case BRW_REGISTER_TYPE_D
:
3405 brw_reg
= brw_imm_d(reg
->imm
.i
);
3407 case BRW_REGISTER_TYPE_UD
:
3408 brw_reg
= brw_imm_ud(reg
->imm
.u
);
3411 assert(!"not reached");
3412 brw_reg
= brw_null_reg();
3417 brw_reg
= reg
->fixed_hw_reg
;
3420 /* Probably unused. */
3421 brw_reg
= brw_null_reg();
3424 assert(!"not reached");
3425 brw_reg
= brw_null_reg();
3428 assert(!"not reached");
3429 brw_reg
= brw_null_reg();
3433 brw_reg
= brw_abs(brw_reg
);
3435 brw_reg
= negate(brw_reg
);
3441 fs_visitor::generate_code()
3443 int last_native_inst
= 0;
3444 const char *last_annotation_string
= NULL
;
3445 ir_instruction
*last_annotation_ir
= NULL
;
3447 int if_stack_array_size
= 16;
3448 int loop_stack_array_size
= 16;
3449 int if_stack_depth
= 0, loop_stack_depth
= 0;
3450 brw_instruction
**if_stack
=
3451 rzalloc_array(this->mem_ctx
, brw_instruction
*, if_stack_array_size
);
3452 brw_instruction
**loop_stack
=
3453 rzalloc_array(this->mem_ctx
, brw_instruction
*, loop_stack_array_size
);
3454 int *if_depth_in_loop
=
3455 rzalloc_array(this->mem_ctx
, int, loop_stack_array_size
);
3458 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3459 printf("Native code for fragment shader %d:\n",
3460 ctx
->Shader
.CurrentFragmentProgram
->Name
);
3463 foreach_iter(exec_list_iterator
, iter
, this->instructions
) {
3464 fs_inst
*inst
= (fs_inst
*)iter
.get();
3465 struct brw_reg src
[3], dst
;
3467 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3468 if (last_annotation_ir
!= inst
->ir
) {
3469 last_annotation_ir
= inst
->ir
;
3470 if (last_annotation_ir
) {
3472 last_annotation_ir
->print();
3476 if (last_annotation_string
!= inst
->annotation
) {
3477 last_annotation_string
= inst
->annotation
;
3478 if (last_annotation_string
)
3479 printf(" %s\n", last_annotation_string
);
3483 for (unsigned int i
= 0; i
< 3; i
++) {
3484 src
[i
] = brw_reg_from_fs_reg(&inst
->src
[i
]);
3486 dst
= brw_reg_from_fs_reg(&inst
->dst
);
3488 brw_set_conditionalmod(p
, inst
->conditional_mod
);
3489 brw_set_predicate_control(p
, inst
->predicated
);
3490 brw_set_saturate(p
, inst
->saturate
);
3492 switch (inst
->opcode
) {
3493 case BRW_OPCODE_MOV
:
3494 brw_MOV(p
, dst
, src
[0]);
3496 case BRW_OPCODE_ADD
:
3497 brw_ADD(p
, dst
, src
[0], src
[1]);
3499 case BRW_OPCODE_MUL
:
3500 brw_MUL(p
, dst
, src
[0], src
[1]);
3503 case BRW_OPCODE_FRC
:
3504 brw_FRC(p
, dst
, src
[0]);
3506 case BRW_OPCODE_RNDD
:
3507 brw_RNDD(p
, dst
, src
[0]);
3509 case BRW_OPCODE_RNDE
:
3510 brw_RNDE(p
, dst
, src
[0]);
3512 case BRW_OPCODE_RNDZ
:
3513 brw_RNDZ(p
, dst
, src
[0]);
3516 case BRW_OPCODE_AND
:
3517 brw_AND(p
, dst
, src
[0], src
[1]);
3520 brw_OR(p
, dst
, src
[0], src
[1]);
3522 case BRW_OPCODE_XOR
:
3523 brw_XOR(p
, dst
, src
[0], src
[1]);
3525 case BRW_OPCODE_NOT
:
3526 brw_NOT(p
, dst
, src
[0]);
3528 case BRW_OPCODE_ASR
:
3529 brw_ASR(p
, dst
, src
[0], src
[1]);
3531 case BRW_OPCODE_SHR
:
3532 brw_SHR(p
, dst
, src
[0], src
[1]);
3534 case BRW_OPCODE_SHL
:
3535 brw_SHL(p
, dst
, src
[0], src
[1]);
3538 case BRW_OPCODE_CMP
:
3539 brw_CMP(p
, dst
, inst
->conditional_mod
, src
[0], src
[1]);
3541 case BRW_OPCODE_SEL
:
3542 brw_SEL(p
, dst
, src
[0], src
[1]);
3546 if (inst
->src
[0].file
!= BAD_FILE
) {
3547 assert(intel
->gen
>= 6);
3548 if_stack
[if_stack_depth
] = brw_IF_gen6(p
, inst
->conditional_mod
, src
[0], src
[1]);
3550 if_stack
[if_stack_depth
] = brw_IF(p
, BRW_EXECUTE_8
);
3552 if_depth_in_loop
[loop_stack_depth
]++;
3554 if (if_stack_array_size
<= if_stack_depth
) {
3555 if_stack_array_size
*= 2;
3556 if_stack
= reralloc(this->mem_ctx
, if_stack
, brw_instruction
*,
3557 if_stack_array_size
);
3561 case BRW_OPCODE_ELSE
:
3562 if_stack
[if_stack_depth
- 1] =
3563 brw_ELSE(p
, if_stack
[if_stack_depth
- 1]);
3565 case BRW_OPCODE_ENDIF
:
3567 brw_ENDIF(p
, if_stack
[if_stack_depth
]);
3568 if_depth_in_loop
[loop_stack_depth
]--;
3572 loop_stack
[loop_stack_depth
++] = brw_DO(p
, BRW_EXECUTE_8
);
3573 if (loop_stack_array_size
<= loop_stack_depth
) {
3574 loop_stack_array_size
*= 2;
3575 loop_stack
= reralloc(this->mem_ctx
, loop_stack
, brw_instruction
*,
3576 loop_stack_array_size
);
3577 if_depth_in_loop
= reralloc(this->mem_ctx
, if_depth_in_loop
, int,
3578 loop_stack_array_size
);
3580 if_depth_in_loop
[loop_stack_depth
] = 0;
3583 case BRW_OPCODE_BREAK
:
3584 brw_BREAK(p
, if_depth_in_loop
[loop_stack_depth
]);
3585 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3587 case BRW_OPCODE_CONTINUE
:
3588 /* FINISHME: We need to write the loop instruction support still. */
3589 if (intel
->gen
>= 6)
3590 brw_CONT_gen6(p
, loop_stack
[loop_stack_depth
- 1]);
3592 brw_CONT(p
, if_depth_in_loop
[loop_stack_depth
]);
3593 brw_set_predicate_control(p
, BRW_PREDICATE_NONE
);
3596 case BRW_OPCODE_WHILE
: {
3597 struct brw_instruction
*inst0
, *inst1
;
3600 if (intel
->gen
>= 5)
3603 assert(loop_stack_depth
> 0);
3605 inst0
= inst1
= brw_WHILE(p
, loop_stack
[loop_stack_depth
]);
3606 if (intel
->gen
< 6) {
3607 /* patch all the BREAK/CONT instructions from last BGNLOOP */
3608 while (inst0
> loop_stack
[loop_stack_depth
]) {
3610 if (inst0
->header
.opcode
== BRW_OPCODE_BREAK
&&
3611 inst0
->bits3
.if_else
.jump_count
== 0) {
3612 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
+ 1);
3614 else if (inst0
->header
.opcode
== BRW_OPCODE_CONTINUE
&&
3615 inst0
->bits3
.if_else
.jump_count
== 0) {
3616 inst0
->bits3
.if_else
.jump_count
= br
* (inst1
- inst0
);
3625 case FS_OPCODE_SQRT
:
3626 case FS_OPCODE_EXP2
:
3627 case FS_OPCODE_LOG2
:
3631 generate_math(inst
, dst
, src
);
3633 case FS_OPCODE_CINTERP
:
3634 brw_MOV(p
, dst
, src
[0]);
3636 case FS_OPCODE_LINTERP
:
3637 generate_linterp(inst
, dst
, src
);
3643 generate_tex(inst
, dst
, src
[0]);
3645 case FS_OPCODE_DISCARD_NOT
:
3646 generate_discard_not(inst
, dst
);
3648 case FS_OPCODE_DISCARD_AND
:
3649 generate_discard_and(inst
, src
[0]);
3652 generate_ddx(inst
, dst
, src
[0]);
3655 generate_ddy(inst
, dst
, src
[0]);
3658 case FS_OPCODE_SPILL
:
3659 generate_spill(inst
, src
[0]);
3662 case FS_OPCODE_UNSPILL
:
3663 generate_unspill(inst
, dst
);
3666 case FS_OPCODE_PULL_CONSTANT_LOAD
:
3667 generate_pull_constant_load(inst
, dst
);
3670 case FS_OPCODE_FB_WRITE
:
3671 generate_fb_write(inst
);
3674 if (inst
->opcode
< (int)ARRAY_SIZE(brw_opcodes
)) {
3675 _mesa_problem(ctx
, "Unsupported opcode `%s' in FS",
3676 brw_opcodes
[inst
->opcode
].name
);
3678 _mesa_problem(ctx
, "Unsupported opcode %d in FS", inst
->opcode
);
3683 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3684 for (unsigned int i
= last_native_inst
; i
< p
->nr_insn
; i
++) {
3686 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3687 ((uint32_t *)&p
->store
[i
])[3],
3688 ((uint32_t *)&p
->store
[i
])[2],
3689 ((uint32_t *)&p
->store
[i
])[1],
3690 ((uint32_t *)&p
->store
[i
])[0]);
3692 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
3696 last_native_inst
= p
->nr_insn
;
3699 ralloc_free(if_stack
);
3700 ralloc_free(loop_stack
);
3701 ralloc_free(if_depth_in_loop
);
3705 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
3706 * emit issues, it doesn't get the jump distances into the output,
3707 * which is often something we want to debug. So this is here in
3708 * case you're doing that.
3711 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3712 for (unsigned int i
= 0; i
< p
->nr_insn
; i
++) {
3713 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
3714 ((uint32_t *)&p
->store
[i
])[3],
3715 ((uint32_t *)&p
->store
[i
])[2],
3716 ((uint32_t *)&p
->store
[i
])[1],
3717 ((uint32_t *)&p
->store
[i
])[0]);
3718 brw_disasm(stdout
, &p
->store
[i
], intel
->gen
);
3725 brw_wm_fs_emit(struct brw_context
*brw
, struct brw_wm_compile
*c
)
3727 struct intel_context
*intel
= &brw
->intel
;
3728 struct gl_context
*ctx
= &intel
->ctx
;
3729 struct gl_shader_program
*prog
= ctx
->Shader
.CurrentFragmentProgram
;
3734 struct brw_shader
*shader
=
3735 (brw_shader
*) prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
3739 /* We always use 8-wide mode, at least for now. For one, flow
3740 * control only works in 8-wide. Also, when we're fragment shader
3741 * bound, we're almost always under register pressure as well, so
3742 * 8-wide would save us from the performance cliff of spilling
3745 c
->dispatch_width
= 8;
3747 if (unlikely(INTEL_DEBUG
& DEBUG_WM
)) {
3748 printf("GLSL IR for native fragment shader %d:\n", prog
->Name
);
3749 _mesa_print_ir(shader
->ir
, NULL
);
3753 /* Now the main event: Visit the shader IR and generate our FS IR for it.
3755 fs_visitor
v(c
, shader
);
3760 v
.calculate_urb_setup();
3762 v
.emit_interpolation_setup_gen4();
3764 v
.emit_interpolation_setup_gen6();
3766 /* Generate FS IR for main(). (the visitor only descends into
3767 * functions called "main").
3769 foreach_iter(exec_list_iterator
, iter
, *shader
->ir
) {
3770 ir_instruction
*ir
= (ir_instruction
*)iter
.get();
3777 v
.split_virtual_grfs();
3779 v
.setup_paramvalues_refs();
3780 v
.setup_pull_constants();
3786 progress
= v
.remove_duplicate_mrf_writes() || progress
;
3788 progress
= v
.propagate_constants() || progress
;
3789 progress
= v
.register_coalesce() || progress
;
3790 progress
= v
.compute_to_mrf() || progress
;
3791 progress
= v
.dead_code_eliminate() || progress
;
3794 v
.schedule_instructions();
3796 v
.assign_curb_setup();
3797 v
.assign_urb_setup();
3800 /* Debug of register spilling: Go spill everything. */
3801 int virtual_grf_count
= v
.virtual_grf_next
;
3802 for (int i
= 1; i
< virtual_grf_count
; i
++) {
3808 v
.assign_regs_trivial();
3810 while (!v
.assign_regs()) {
3820 assert(!v
.fail
); /* FINISHME: Cleanly fail, tested at link time, etc. */
3825 c
->prog_data
.total_grf
= v
.grf_used
;