2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "program/prog_to_nir.h"
32 nir_optimize(nir_shader
*nir
)
37 nir_lower_vars_to_ssa(nir
);
38 nir_validate_shader(nir
);
39 nir_lower_alu_to_scalar(nir
);
40 nir_validate_shader(nir
);
41 progress
|= nir_copy_prop(nir
);
42 nir_validate_shader(nir
);
43 nir_lower_phis_to_scalar(nir
);
44 nir_validate_shader(nir
);
45 progress
|= nir_copy_prop(nir
);
46 nir_validate_shader(nir
);
47 progress
|= nir_opt_dce(nir
);
48 nir_validate_shader(nir
);
49 progress
|= nir_opt_cse(nir
);
50 nir_validate_shader(nir
);
51 progress
|= nir_opt_peephole_select(nir
);
52 nir_validate_shader(nir
);
53 progress
|= nir_opt_algebraic(nir
);
54 nir_validate_shader(nir
);
55 progress
|= nir_opt_constant_folding(nir
);
56 nir_validate_shader(nir
);
57 progress
|= nir_opt_remove_phis(nir
);
58 nir_validate_shader(nir
);
63 count_nir_instrs_in_block(nir_block
*block
, void *state
)
65 int *count
= (int *) state
;
66 nir_foreach_instr(block
, instr
) {
73 count_nir_instrs(nir_shader
*nir
)
76 nir_foreach_overload(nir
, overload
) {
79 nir_foreach_block(overload
->impl
, count_nir_instrs_in_block
, &count
);
85 fs_visitor::emit_nir_code()
87 const nir_shader_compiler_options
*options
=
88 ctx
->Const
.ShaderCompilerOptions
[stage
].NirOptions
;
91 /* First, lower the GLSL IR or Mesa IR to NIR */
93 lower_output_reads(shader
->base
.ir
);
94 nir
= glsl_to_nir(&shader
->base
, options
);
96 nir
= prog_to_nir(prog
, options
);
97 nir_convert_to_ssa(nir
); /* turn registers into SSA */
99 nir_validate_shader(nir
);
101 nir_lower_global_vars_to_local(nir
);
102 nir_validate_shader(nir
);
104 nir_lower_tex_projector(nir
);
105 nir_validate_shader(nir
);
107 nir_normalize_cubemap_coords(nir
);
108 nir_validate_shader(nir
);
110 nir_split_var_copies(nir
);
111 nir_validate_shader(nir
);
115 /* Lower a bunch of stuff */
116 nir_lower_var_copies(nir
);
117 nir_validate_shader(nir
);
119 /* Get rid of split copies */
123 nir_assign_var_locations_scalar_direct_first(nir
, &nir
->uniforms
,
124 &num_direct_uniforms
,
127 /* ARB programs generally create a giant array of "uniform" data, and allow
128 * indirect addressing without any boundaries. In the absence of bounds
129 * analysis, it's all or nothing. num_direct_uniforms is only useful when
130 * we have some direct and some indirect access; it doesn't matter here.
132 num_direct_uniforms
= 0;
134 nir_assign_var_locations_scalar(&nir
->inputs
, &nir
->num_inputs
);
135 nir_assign_var_locations_scalar(&nir
->outputs
, &nir
->num_outputs
);
138 nir_validate_shader(nir
);
140 nir_remove_dead_variables(nir
);
141 nir_validate_shader(nir
);
144 nir_lower_samplers(nir
, shader_prog
, stage
);
145 nir_validate_shader(nir
);
148 nir_lower_system_values(nir
);
149 nir_validate_shader(nir
);
151 nir_lower_atomics(nir
);
152 nir_validate_shader(nir
);
157 /* Try and fuse multiply-adds */
158 nir_opt_peephole_ffma(nir
);
159 nir_validate_shader(nir
);
162 nir_opt_algebraic_late(nir
);
163 nir_validate_shader(nir
);
165 nir_lower_locals_to_regs(nir
);
166 nir_validate_shader(nir
);
168 nir_lower_to_source_mods(nir
);
169 nir_validate_shader(nir
);
171 nir_validate_shader(nir
);
173 nir_validate_shader(nir
);
175 if (unlikely(debug_enabled
)) {
176 fprintf(stderr
, "NIR (SSA form) for %s shader:\n", stage_name
);
177 nir_print_shader(nir
, stderr
);
180 if (dispatch_width
== 8) {
181 static GLuint msg_id
= 0;
182 _mesa_gl_debug(&brw
->ctx
, &msg_id
,
183 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
184 MESA_DEBUG_TYPE_OTHER
,
185 MESA_DEBUG_SEVERITY_NOTIFICATION
,
186 "%s NIR shader: %d inst\n",
188 count_nir_instrs(nir
));
191 nir_convert_from_ssa(nir
);
192 nir_validate_shader(nir
);
194 /* This is the last pass we run before we start emitting stuff. It
195 * determines when we need to insert boolean resolves on Gen <= 5. We
196 * run it last because it stashes data in instr->pass_flags and we don't
197 * want that to be squashed by other NIR passes.
200 brw_nir_analyze_boolean_resolves(nir
);
202 /* emit the arrays used for inputs and outputs - load/store intrinsics will
203 * be converted to reads/writes of these arrays
206 if (nir
->num_inputs
> 0) {
207 nir_inputs
= vgrf(nir
->num_inputs
);
208 nir_setup_inputs(nir
);
211 if (nir
->num_outputs
> 0) {
212 nir_outputs
= vgrf(nir
->num_outputs
);
213 nir_setup_outputs(nir
);
216 if (nir
->num_uniforms
> 0) {
217 nir_setup_uniforms(nir
);
220 nir_emit_system_values(nir
);
222 nir_globals
= ralloc_array(mem_ctx
, fs_reg
, nir
->reg_alloc
);
223 foreach_list_typed(nir_register
, reg
, node
, &nir
->registers
) {
224 unsigned array_elems
=
225 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
226 unsigned size
= array_elems
* reg
->num_components
;
227 nir_globals
[reg
->index
] = vgrf(size
);
230 /* get the main function and emit it */
231 nir_foreach_overload(nir
, overload
) {
232 assert(strcmp(overload
->function
->name
, "main") == 0);
233 assert(overload
->impl
);
234 nir_emit_impl(overload
->impl
);
237 if (unlikely(debug_enabled
)) {
238 fprintf(stderr
, "NIR (final form) for %s shader:\n", stage_name
);
239 nir_print_shader(nir
, stderr
);
246 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
248 foreach_list_typed(nir_variable
, var
, node
, &shader
->inputs
) {
249 enum brw_reg_type type
= brw_type_for_base_type(var
->type
);
250 fs_reg input
= offset(nir_inputs
, var
->data
.driver_location
);
254 case MESA_SHADER_VERTEX
: {
255 /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
256 * stored in nir_variable::location.
258 * However, NIR's load_input intrinsics use a different index - an
259 * offset into a single contiguous array containing all inputs.
260 * This index corresponds to the nir_variable::driver_location field.
262 * So, we need to copy from fs_reg(ATTR, var->location) to
263 * offset(nir_inputs, var->data.driver_location).
265 unsigned components
= var
->type
->without_array()->components();
266 unsigned array_length
= var
->type
->is_array() ? var
->type
->length
: 1;
267 for (unsigned i
= 0; i
< array_length
; i
++) {
268 for (unsigned j
= 0; j
< components
; j
++) {
269 emit(MOV(retype(offset(input
, components
* i
+ j
), type
),
270 offset(fs_reg(ATTR
, var
->data
.location
+ i
, type
), j
)));
275 case MESA_SHADER_GEOMETRY
:
276 case MESA_SHADER_COMPUTE
:
277 unreachable("fs_visitor not used for these stages yet.");
279 case MESA_SHADER_FRAGMENT
:
280 if (var
->data
.location
== VARYING_SLOT_POS
) {
281 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
282 var
->data
.origin_upper_left
);
283 emit_percomp(MOV(input
, reg
), 0xF);
285 emit_general_interpolation(input
, var
->name
, var
->type
,
286 (glsl_interp_qualifier
) var
->data
.interpolation
,
287 var
->data
.location
, var
->data
.centroid
,
296 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
298 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
300 foreach_list_typed(nir_variable
, var
, node
, &shader
->outputs
) {
301 fs_reg reg
= offset(nir_outputs
, var
->data
.driver_location
);
303 int vector_elements
=
304 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
305 : var
->type
->vector_elements
;
307 if (stage
== MESA_SHADER_VERTEX
) {
308 for (int i
= 0; i
< ALIGN(type_size(var
->type
), 4) / 4; i
++) {
309 int output
= var
->data
.location
+ i
;
310 this->outputs
[output
] = offset(reg
, 4 * i
);
311 this->output_components
[output
] = vector_elements
;
313 } else if (var
->data
.index
> 0) {
314 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
315 assert(var
->data
.index
== 1);
316 this->dual_src_output
= reg
;
317 this->do_dual_src
= true;
318 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
319 /* Writing gl_FragColor outputs to all color regions. */
320 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
321 this->outputs
[i
] = reg
;
322 this->output_components
[i
] = 4;
324 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
325 this->frag_depth
= reg
;
326 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
327 this->sample_mask
= reg
;
329 /* gl_FragData or a user-defined FS output */
330 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
331 var
->data
.location
< FRAG_RESULT_DATA0
+ BRW_MAX_DRAW_BUFFERS
);
333 /* General color output. */
334 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
335 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
336 this->outputs
[output
] = offset(reg
, vector_elements
* i
);
337 this->output_components
[output
] = vector_elements
;
344 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
346 uniforms
= shader
->num_uniforms
;
348 /* We split the uniform register file in half. The first half is
349 * entirely direct uniforms. The second half is indirect.
351 param_size
[0] = num_direct_uniforms
;
352 if (shader
->num_uniforms
> num_direct_uniforms
)
353 param_size
[num_direct_uniforms
] = shader
->num_uniforms
- num_direct_uniforms
;
355 if (dispatch_width
!= 8)
359 foreach_list_typed(nir_variable
, var
, node
, &shader
->uniforms
) {
360 /* UBO's and atomics don't take up space in the uniform file */
361 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
364 if (strncmp(var
->name
, "gl_", 3) == 0)
365 nir_setup_builtin_uniform(var
);
367 nir_setup_uniform(var
);
370 /* prog_to_nir doesn't create uniform variables; set param up directly. */
371 for (unsigned p
= 0; p
< prog
->Parameters
->NumParameters
; p
++) {
372 for (unsigned int i
= 0; i
< 4; i
++) {
373 stage_prog_data
->param
[4 * p
+ i
] =
374 &prog
->Parameters
->ParameterValues
[p
][i
];
381 fs_visitor::nir_setup_uniform(nir_variable
*var
)
383 int namelen
= strlen(var
->name
);
385 /* The data for our (non-builtin) uniforms is stored in a series of
386 * gl_uniform_driver_storage structs for each subcomponent that
387 * glGetUniformLocation() could name. We know it's been set up in the
388 * same order we'd walk the type, so walk the list of storage and find
389 * anything with our name, or the prefix of a component that starts with
392 unsigned index
= var
->data
.driver_location
;
393 for (unsigned u
= 0; u
< shader_prog
->NumUserUniformStorage
; u
++) {
394 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
396 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
397 (storage
->name
[namelen
] != 0 &&
398 storage
->name
[namelen
] != '.' &&
399 storage
->name
[namelen
] != '[')) {
403 unsigned slots
= storage
->type
->component_slots();
404 if (storage
->array_elements
)
405 slots
*= storage
->array_elements
;
407 for (unsigned i
= 0; i
< slots
; i
++) {
408 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
412 /* Make sure we actually initialized the right amount of stuff here. */
413 assert(var
->data
.driver_location
+ var
->type
->component_slots() == index
);
417 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
419 const nir_state_slot
*const slots
= var
->state_slots
;
420 assert(var
->state_slots
!= NULL
);
422 unsigned uniform_index
= var
->data
.driver_location
;
423 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
424 /* This state reference has already been setup by ir_to_mesa, but we'll
425 * get the same index back here.
427 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
428 (gl_state_index
*)slots
[i
].tokens
);
430 /* Add each of the unique swizzles of the element as a parameter.
431 * This'll end up matching the expected layout of the
432 * array/matrix/structure we're trying to fill in.
435 for (unsigned int j
= 0; j
< 4; j
++) {
436 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
437 if (swiz
== last_swiz
)
441 stage_prog_data
->param
[uniform_index
++] =
442 &prog
->Parameters
->ParameterValues
[index
][swiz
];
448 emit_system_values_block(nir_block
*block
, void *void_visitor
)
450 fs_visitor
*v
= (fs_visitor
*)void_visitor
;
453 nir_foreach_instr(block
, instr
) {
454 if (instr
->type
!= nir_instr_type_intrinsic
)
457 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
458 switch (intrin
->intrinsic
) {
459 case nir_intrinsic_load_vertex_id
:
460 unreachable("should be lowered by lower_vertex_id().");
462 case nir_intrinsic_load_vertex_id_zero_base
:
463 assert(v
->stage
== MESA_SHADER_VERTEX
);
464 reg
= &v
->nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
465 if (reg
->file
== BAD_FILE
)
466 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
);
469 case nir_intrinsic_load_base_vertex
:
470 assert(v
->stage
== MESA_SHADER_VERTEX
);
471 reg
= &v
->nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
472 if (reg
->file
== BAD_FILE
)
473 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX
);
476 case nir_intrinsic_load_instance_id
:
477 assert(v
->stage
== MESA_SHADER_VERTEX
);
478 reg
= &v
->nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
479 if (reg
->file
== BAD_FILE
)
480 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID
);
483 case nir_intrinsic_load_sample_pos
:
484 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
485 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
486 if (reg
->file
== BAD_FILE
)
487 *reg
= *v
->emit_samplepos_setup();
490 case nir_intrinsic_load_sample_id
:
491 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
492 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
493 if (reg
->file
== BAD_FILE
)
494 *reg
= *v
->emit_sampleid_setup();
497 case nir_intrinsic_load_sample_mask_in
:
498 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
499 assert(v
->brw
->gen
>= 7);
500 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
501 if (reg
->file
== BAD_FILE
)
502 *reg
= fs_reg(retype(brw_vec8_grf(v
->payload
.sample_mask_in_reg
, 0),
503 BRW_REGISTER_TYPE_D
));
515 fs_visitor::nir_emit_system_values(nir_shader
*shader
)
517 nir_system_values
= ralloc_array(mem_ctx
, fs_reg
, SYSTEM_VALUE_MAX
);
518 nir_foreach_overload(shader
, overload
) {
519 assert(strcmp(overload
->function
->name
, "main") == 0);
520 assert(overload
->impl
);
521 nir_foreach_block(overload
->impl
, emit_system_values_block
, this);
526 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
528 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
529 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
530 unsigned array_elems
=
531 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
532 unsigned size
= array_elems
* reg
->num_components
;
533 nir_locals
[reg
->index
] = vgrf(size
);
536 nir_emit_cf_list(&impl
->body
);
540 fs_visitor::nir_emit_cf_list(exec_list
*list
)
542 exec_list_validate(list
);
543 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
544 switch (node
->type
) {
546 nir_emit_if(nir_cf_node_as_if(node
));
549 case nir_cf_node_loop
:
550 nir_emit_loop(nir_cf_node_as_loop(node
));
553 case nir_cf_node_block
:
554 nir_emit_block(nir_cf_node_as_block(node
));
558 unreachable("Invalid CFG node block");
564 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
566 /* first, put the condition into f0 */
567 fs_inst
*inst
= emit(MOV(reg_null_d
,
568 retype(get_nir_src(if_stmt
->condition
),
569 BRW_REGISTER_TYPE_D
)));
570 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
572 emit(IF(BRW_PREDICATE_NORMAL
));
574 nir_emit_cf_list(&if_stmt
->then_list
);
576 /* note: if the else is empty, dead CF elimination will remove it */
577 emit(BRW_OPCODE_ELSE
);
579 nir_emit_cf_list(&if_stmt
->else_list
);
581 emit(BRW_OPCODE_ENDIF
);
583 if (!try_replace_with_sel() && brw
->gen
< 6) {
584 no16("Can't support (non-uniform) control flow on SIMD16\n");
589 fs_visitor::nir_emit_loop(nir_loop
*loop
)
592 no16("Can't support (non-uniform) control flow on SIMD16\n");
597 nir_emit_cf_list(&loop
->body
);
599 emit(BRW_OPCODE_WHILE
);
603 fs_visitor::nir_emit_block(nir_block
*block
)
605 nir_foreach_instr(block
, instr
) {
606 nir_emit_instr(instr
);
611 fs_visitor::nir_emit_instr(nir_instr
*instr
)
613 switch (instr
->type
) {
614 case nir_instr_type_alu
:
615 nir_emit_alu(nir_instr_as_alu(instr
));
618 case nir_instr_type_intrinsic
:
619 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
622 case nir_instr_type_tex
:
623 nir_emit_texture(nir_instr_as_tex(instr
));
626 case nir_instr_type_load_const
:
627 /* We can hit these, but we do nothing now and use them as
632 case nir_instr_type_jump
:
633 nir_emit_jump(nir_instr_as_jump(instr
));
637 unreachable("unknown instruction type");
642 brw_type_for_nir_type(nir_alu_type type
)
645 case nir_type_unsigned
:
646 return BRW_REGISTER_TYPE_UD
;
649 return BRW_REGISTER_TYPE_D
;
651 return BRW_REGISTER_TYPE_F
;
653 unreachable("unknown type");
656 return BRW_REGISTER_TYPE_F
;
660 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr
*instr
,
661 const fs_reg
&result
)
663 if (instr
->src
[0].src
.is_ssa
||
664 !instr
->src
[0].src
.reg
.reg
||
665 !instr
->src
[0].src
.reg
.reg
->parent_instr
)
668 if (instr
->src
[0].src
.reg
.reg
->parent_instr
->type
!=
669 nir_instr_type_intrinsic
)
672 nir_intrinsic_instr
*src0
=
673 nir_instr_as_intrinsic(instr
->src
[0].src
.reg
.reg
->parent_instr
);
675 if (src0
->intrinsic
!= nir_intrinsic_load_front_face
)
678 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
679 if (!value1
|| fabsf(value1
->f
[0]) != 1.0f
)
682 nir_const_value
*value2
= nir_src_as_const_value(instr
->src
[2].src
);
683 if (!value2
|| fabsf(value2
->f
[0]) != 1.0f
)
686 fs_reg tmp
= vgrf(glsl_type::int_type
);
689 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
690 fs_reg g0
= fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W
));
692 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
694 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
695 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
697 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
699 * This negation looks like it's safe in practice, because bits 0:4 will
700 * surely be TRIANGLES
703 if (value1
->f
[0] == -1.0f
) {
707 tmp
.type
= BRW_REGISTER_TYPE_W
;
708 tmp
.subreg_offset
= 2;
711 fs_inst
*or_inst
= emit(OR(tmp
, g0
, fs_reg(0x3f80)));
712 or_inst
->src
[1].type
= BRW_REGISTER_TYPE_UW
;
714 tmp
.type
= BRW_REGISTER_TYPE_D
;
715 tmp
.subreg_offset
= 0;
718 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
719 fs_reg g1_6
= fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D
));
721 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
723 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
724 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
726 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
728 * This negation looks like it's safe in practice, because bits 0:4 will
729 * surely be TRIANGLES
732 if (value1
->f
[0] == -1.0f
) {
736 emit(OR(tmp
, g1_6
, fs_reg(0x3f800000)));
738 emit(AND(retype(result
, BRW_REGISTER_TYPE_D
), tmp
, fs_reg(0xbf800000)));
744 fs_visitor::nir_emit_alu(nir_alu_instr
*instr
)
746 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
749 fs_reg result
= get_nir_dest(instr
->dest
.dest
);
750 result
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
753 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
754 op
[i
] = get_nir_src(instr
->src
[i
].src
);
755 op
[i
].type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[i
]);
756 op
[i
].abs
= instr
->src
[i
].abs
;
757 op
[i
].negate
= instr
->src
[i
].negate
;
760 /* We get a bunch of mov's out of the from_ssa pass and they may still
761 * be vectorized. We'll handle them as a special-case. We'll also
762 * handle vecN here because it's basically the same thing.
770 fs_reg temp
= result
;
771 bool need_extra_copy
= false;
772 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
773 if (!instr
->src
[i
].src
.is_ssa
&&
774 instr
->dest
.dest
.reg
.reg
== instr
->src
[i
].src
.reg
.reg
) {
775 need_extra_copy
= true;
776 temp
= retype(vgrf(4), result
.type
);
781 for (unsigned i
= 0; i
< 4; i
++) {
782 if (!(instr
->dest
.write_mask
& (1 << i
)))
785 if (instr
->op
== nir_op_imov
|| instr
->op
== nir_op_fmov
) {
786 inst
= emit(MOV(offset(temp
, i
),
787 offset(op
[0], instr
->src
[0].swizzle
[i
])));
789 inst
= emit(MOV(offset(temp
, i
),
790 offset(op
[i
], instr
->src
[i
].swizzle
[0])));
792 inst
->saturate
= instr
->dest
.saturate
;
795 /* In this case the source and destination registers were the same,
796 * so we need to insert an extra set of moves in order to deal with
799 if (need_extra_copy
) {
800 for (unsigned i
= 0; i
< 4; i
++) {
801 if (!(instr
->dest
.write_mask
& (1 << i
)))
804 emit(MOV(offset(result
, i
), offset(temp
, i
)));
813 /* At this point, we have dealt with any instruction that operates on
814 * more than a single channel. Therefore, we can just adjust the source
815 * and destination registers for that channel and emit the instruction.
817 unsigned channel
= 0;
818 if (nir_op_infos
[instr
->op
].output_size
== 0) {
819 /* Since NIR is doing the scalarizing for us, we should only ever see
820 * vectorized operations with a single channel.
822 assert(_mesa_bitcount(instr
->dest
.write_mask
) == 1);
823 channel
= ffs(instr
->dest
.write_mask
) - 1;
825 result
= offset(result
, channel
);
828 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
829 assert(nir_op_infos
[instr
->op
].input_sizes
[i
] < 2);
830 op
[i
] = offset(op
[i
], instr
->src
[i
].swizzle
[channel
]);
836 inst
= emit(MOV(result
, op
[0]));
837 inst
->saturate
= instr
->dest
.saturate
;
842 emit(MOV(result
, op
[0]));
846 /* AND(val, 0x80000000) gives the sign bit.
848 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
851 emit(CMP(reg_null_f
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
));
853 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
854 op
[0].type
= BRW_REGISTER_TYPE_UD
;
855 result
.type
= BRW_REGISTER_TYPE_UD
;
856 emit(AND(result_int
, op
[0], fs_reg(0x80000000u
)));
858 inst
= emit(OR(result_int
, result_int
, fs_reg(0x3f800000u
)));
859 inst
->predicate
= BRW_PREDICATE_NORMAL
;
860 if (instr
->dest
.saturate
) {
861 inst
= emit(MOV(result
, result
));
862 inst
->saturate
= true;
868 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
869 * -> non-negative val generates 0x00000000.
870 * Predicated OR sets 1 if val is positive.
872 emit(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_G
));
873 emit(ASR(result
, op
[0], fs_reg(31)));
874 inst
= emit(OR(result
, result
, fs_reg(1)));
875 inst
->predicate
= BRW_PREDICATE_NORMAL
;
879 inst
= emit_math(SHADER_OPCODE_RCP
, result
, op
[0]);
880 inst
->saturate
= instr
->dest
.saturate
;
884 inst
= emit_math(SHADER_OPCODE_EXP2
, result
, op
[0]);
885 inst
->saturate
= instr
->dest
.saturate
;
889 inst
= emit_math(SHADER_OPCODE_LOG2
, result
, op
[0]);
890 inst
->saturate
= instr
->dest
.saturate
;
895 unreachable("not reached: should be handled by ir_explog_to_explog2");
898 inst
= emit_math(SHADER_OPCODE_SIN
, result
, op
[0]);
899 inst
->saturate
= instr
->dest
.saturate
;
903 inst
= emit_math(SHADER_OPCODE_COS
, result
, op
[0]);
904 inst
->saturate
= instr
->dest
.saturate
;
908 if (fs_key
->high_quality_derivatives
) {
909 inst
= emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
911 inst
= emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
913 inst
->saturate
= instr
->dest
.saturate
;
915 case nir_op_fddx_fine
:
916 inst
= emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
917 inst
->saturate
= instr
->dest
.saturate
;
919 case nir_op_fddx_coarse
:
920 inst
= emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
921 inst
->saturate
= instr
->dest
.saturate
;
924 if (fs_key
->high_quality_derivatives
) {
925 inst
= emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
926 fs_reg(fs_key
->render_to_fbo
));
928 inst
= emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
929 fs_reg(fs_key
->render_to_fbo
));
931 inst
->saturate
= instr
->dest
.saturate
;
933 case nir_op_fddy_fine
:
934 inst
= emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
935 fs_reg(fs_key
->render_to_fbo
));
936 inst
->saturate
= instr
->dest
.saturate
;
938 case nir_op_fddy_coarse
:
939 inst
= emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
940 fs_reg(fs_key
->render_to_fbo
));
941 inst
->saturate
= instr
->dest
.saturate
;
946 inst
= emit(ADD(result
, op
[0], op
[1]));
947 inst
->saturate
= instr
->dest
.saturate
;
951 inst
= emit(MUL(result
, op
[0], op
[1]));
952 inst
->saturate
= instr
->dest
.saturate
;
957 emit(MUL(result
, op
[0], op
[1]));
960 nir_const_value
*value0
= nir_src_as_const_value(instr
->src
[0].src
);
961 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
963 if (value0
&& value0
->u
[0] < (1 << 16)) {
965 emit(MUL(result
, op
[0], op
[1]));
967 emit(MUL(result
, op
[1], op
[0]));
970 } else if (value1
&& value1
->u
[0] < (1 << 16)) {
972 emit(MUL(result
, op
[1], op
[0]));
974 emit(MUL(result
, op
[0], op
[1]));
981 no16("SIMD16 explicit accumulator operands unsupported\n");
983 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
985 emit(MUL(acc
, op
[0], op
[1]));
986 emit(MACH(reg_null_d
, op
[0], op
[1]));
987 emit(MOV(result
, fs_reg(acc
)));
991 case nir_op_imul_high
:
992 case nir_op_umul_high
: {
994 no16("SIMD16 explicit accumulator operands unsupported\n");
996 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
998 emit(MUL(acc
, op
[0], op
[1]));
999 emit(MACH(result
, op
[0], op
[1]));
1005 emit_math(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1]);
1008 case nir_op_uadd_carry
: {
1010 no16("SIMD16 explicit accumulator operands unsupported\n");
1012 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
1013 BRW_REGISTER_TYPE_UD
);
1015 emit(ADDC(reg_null_ud
, op
[0], op
[1]));
1016 emit(MOV(result
, fs_reg(acc
)));
1020 case nir_op_usub_borrow
: {
1022 no16("SIMD16 explicit accumulator operands unsupported\n");
1024 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
1025 BRW_REGISTER_TYPE_UD
);
1027 emit(SUBB(reg_null_ud
, op
[0], op
[1]));
1028 emit(MOV(result
, fs_reg(acc
)));
1033 emit_math(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0], op
[1]);
1039 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
));
1045 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
));
1050 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
));
1055 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
));
1059 if (brw
->gen
>= 8) {
1060 resolve_source_modifiers(&op
[0]);
1062 emit(NOT(result
, op
[0]));
1065 if (brw
->gen
>= 8) {
1066 resolve_source_modifiers(&op
[0]);
1067 resolve_source_modifiers(&op
[1]);
1069 emit(XOR(result
, op
[0], op
[1]));
1072 if (brw
->gen
>= 8) {
1073 resolve_source_modifiers(&op
[0]);
1074 resolve_source_modifiers(&op
[1]);
1076 emit(OR(result
, op
[0], op
[1]));
1079 if (brw
->gen
>= 8) {
1080 resolve_source_modifiers(&op
[0]);
1081 resolve_source_modifiers(&op
[1]);
1083 emit(AND(result
, op
[0], op
[1]));
1095 case nir_op_ball_fequal2
:
1096 case nir_op_ball_iequal2
:
1097 case nir_op_ball_fequal3
:
1098 case nir_op_ball_iequal3
:
1099 case nir_op_ball_fequal4
:
1100 case nir_op_ball_iequal4
:
1101 case nir_op_bany_fnequal2
:
1102 case nir_op_bany_inequal2
:
1103 case nir_op_bany_fnequal3
:
1104 case nir_op_bany_inequal3
:
1105 case nir_op_bany_fnequal4
:
1106 case nir_op_bany_inequal4
:
1107 unreachable("Lowered by nir_lower_alu_reductions");
1109 case nir_op_fnoise1_1
:
1110 case nir_op_fnoise1_2
:
1111 case nir_op_fnoise1_3
:
1112 case nir_op_fnoise1_4
:
1113 case nir_op_fnoise2_1
:
1114 case nir_op_fnoise2_2
:
1115 case nir_op_fnoise2_3
:
1116 case nir_op_fnoise2_4
:
1117 case nir_op_fnoise3_1
:
1118 case nir_op_fnoise3_2
:
1119 case nir_op_fnoise3_3
:
1120 case nir_op_fnoise3_4
:
1121 case nir_op_fnoise4_1
:
1122 case nir_op_fnoise4_2
:
1123 case nir_op_fnoise4_3
:
1124 case nir_op_fnoise4_4
:
1125 unreachable("not reached: should be handled by lower_noise");
1128 unreachable("not reached: should be handled by ldexp_to_arith()");
1131 inst
= emit_math(SHADER_OPCODE_SQRT
, result
, op
[0]);
1132 inst
->saturate
= instr
->dest
.saturate
;
1136 inst
= emit_math(SHADER_OPCODE_RSQ
, result
, op
[0]);
1137 inst
->saturate
= instr
->dest
.saturate
;
1141 emit(AND(result
, op
[0], fs_reg(1)));
1144 emit(AND(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0], fs_reg(0x3f800000u
)));
1148 emit(CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
));
1151 emit(CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
));
1155 inst
= emit(RNDZ(result
, op
[0]));
1156 inst
->saturate
= instr
->dest
.saturate
;
1159 case nir_op_fceil
: {
1160 op
[0].negate
= !op
[0].negate
;
1161 fs_reg temp
= vgrf(glsl_type::float_type
);
1162 emit(RNDD(temp
, op
[0]));
1164 inst
= emit(MOV(result
, temp
));
1165 inst
->saturate
= instr
->dest
.saturate
;
1169 inst
= emit(RNDD(result
, op
[0]));
1170 inst
->saturate
= instr
->dest
.saturate
;
1173 inst
= emit(FRC(result
, op
[0]));
1174 inst
->saturate
= instr
->dest
.saturate
;
1176 case nir_op_fround_even
:
1177 inst
= emit(RNDE(result
, op
[0]));
1178 inst
->saturate
= instr
->dest
.saturate
;
1184 if (brw
->gen
>= 6) {
1185 inst
= emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
1186 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1188 emit(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_L
));
1189 inst
= emit(SEL(result
, op
[0], op
[1]));
1190 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1192 inst
->saturate
= instr
->dest
.saturate
;
1198 if (brw
->gen
>= 6) {
1199 inst
= emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
1200 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1202 emit(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_GE
));
1203 inst
= emit(SEL(result
, op
[0], op
[1]));
1204 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1206 inst
->saturate
= instr
->dest
.saturate
;
1209 case nir_op_pack_snorm_2x16
:
1210 case nir_op_pack_snorm_4x8
:
1211 case nir_op_pack_unorm_2x16
:
1212 case nir_op_pack_unorm_4x8
:
1213 case nir_op_unpack_snorm_2x16
:
1214 case nir_op_unpack_snorm_4x8
:
1215 case nir_op_unpack_unorm_2x16
:
1216 case nir_op_unpack_unorm_4x8
:
1217 case nir_op_unpack_half_2x16
:
1218 case nir_op_pack_half_2x16
:
1219 unreachable("not reached: should be handled by lower_packing_builtins");
1221 case nir_op_unpack_half_2x16_split_x
:
1222 inst
= emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0]);
1223 inst
->saturate
= instr
->dest
.saturate
;
1225 case nir_op_unpack_half_2x16_split_y
:
1226 inst
= emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0]);
1227 inst
->saturate
= instr
->dest
.saturate
;
1231 inst
= emit_math(SHADER_OPCODE_POW
, result
, op
[0], op
[1]);
1232 inst
->saturate
= instr
->dest
.saturate
;
1235 case nir_op_bitfield_reverse
:
1236 emit(BFREV(result
, op
[0]));
1239 case nir_op_bit_count
:
1240 emit(CBIT(result
, op
[0]));
1243 case nir_op_ufind_msb
:
1244 case nir_op_ifind_msb
: {
1245 emit(FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]));
1247 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1248 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1249 * subtract the result from 31 to convert the MSB count into an LSB count.
1252 emit(CMP(reg_null_d
, result
, fs_reg(-1), BRW_CONDITIONAL_NZ
));
1253 fs_reg
neg_result(result
);
1254 neg_result
.negate
= true;
1255 inst
= emit(ADD(result
, neg_result
, fs_reg(31)));
1256 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1260 case nir_op_find_lsb
:
1261 emit(FBL(result
, op
[0]));
1264 case nir_op_ubitfield_extract
:
1265 case nir_op_ibitfield_extract
:
1266 emit(BFE(result
, op
[2], op
[1], op
[0]));
1269 emit(BFI1(result
, op
[0], op
[1]));
1272 emit(BFI2(result
, op
[0], op
[1], op
[2]));
1275 case nir_op_bitfield_insert
:
1276 unreachable("not reached: should be handled by "
1277 "lower_instructions::bitfield_insert_to_bfm_bfi");
1280 emit(SHL(result
, op
[0], op
[1]));
1283 emit(ASR(result
, op
[0], op
[1]));
1286 emit(SHR(result
, op
[0], op
[1]));
1289 case nir_op_pack_half_2x16_split
:
1290 emit(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1]);
1294 inst
= emit(MAD(result
, op
[2], op
[1], op
[0]));
1295 inst
->saturate
= instr
->dest
.saturate
;
1299 inst
= emit_lrp(result
, op
[0], op
[1], op
[2]);
1300 inst
->saturate
= instr
->dest
.saturate
;
1304 if (optimize_frontfacing_ternary(instr
, result
))
1307 emit(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
));
1308 inst
= emit(SEL(result
, op
[1], op
[2]));
1309 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1313 unreachable("unhandled instruction");
1316 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1317 * to sign extend the low bit to 0/~0
1319 if (brw
->gen
<= 5 &&
1320 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1321 fs_reg masked
= vgrf(glsl_type::int_type
);
1322 emit(AND(masked
, result
, fs_reg(1)));
1323 masked
.negate
= true;
1324 emit(MOV(retype(result
, BRW_REGISTER_TYPE_D
), masked
));
1329 fs_visitor::get_nir_src(nir_src src
)
1332 assert(src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
);
1333 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1334 fs_reg reg
= vgrf(src
.ssa
->num_components
);
1335 reg
.type
= BRW_REGISTER_TYPE_D
;
1337 for (unsigned i
= 0; i
< src
.ssa
->num_components
; ++i
)
1338 emit(MOV(offset(reg
, i
), fs_reg(load
->value
.i
[i
])));
1343 if (src
.reg
.reg
->is_global
)
1344 reg
= nir_globals
[src
.reg
.reg
->index
];
1346 reg
= nir_locals
[src
.reg
.reg
->index
];
1348 /* to avoid floating-point denorm flushing problems, set the type by
1349 * default to D - instructions that need floating point semantics will set
1350 * this to F if they need to
1352 reg
= retype(offset(reg
, src
.reg
.base_offset
), BRW_REGISTER_TYPE_D
);
1353 if (src
.reg
.indirect
) {
1354 reg
.reladdr
= new(mem_ctx
) fs_reg();
1355 *reg
.reladdr
= retype(get_nir_src(*src
.reg
.indirect
),
1356 BRW_REGISTER_TYPE_D
);
1364 fs_visitor::get_nir_dest(nir_dest dest
)
1367 if (dest
.reg
.reg
->is_global
)
1368 reg
= nir_globals
[dest
.reg
.reg
->index
];
1370 reg
= nir_locals
[dest
.reg
.reg
->index
];
1372 reg
= offset(reg
, dest
.reg
.base_offset
);
1373 if (dest
.reg
.indirect
) {
1374 reg
.reladdr
= new(mem_ctx
) fs_reg();
1375 *reg
.reladdr
= retype(get_nir_src(*dest
.reg
.indirect
),
1376 BRW_REGISTER_TYPE_D
);
1383 fs_visitor::emit_percomp(fs_inst
*inst
, unsigned wr_mask
)
1385 for (unsigned i
= 0; i
< 4; i
++) {
1386 if (!((wr_mask
>> i
) & 1))
1389 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(*inst
);
1390 new_inst
->dst
= offset(new_inst
->dst
, i
);
1391 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1392 if (inst
->src
[j
].file
== GRF
)
1393 new_inst
->src
[j
] = offset(new_inst
->src
[j
], i
);
1400 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
1403 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1404 dest
= get_nir_dest(instr
->dest
);
1406 bool has_indirect
= false;
1408 switch (instr
->intrinsic
) {
1409 case nir_intrinsic_discard
:
1410 case nir_intrinsic_discard_if
: {
1411 /* We track our discarded pixels in f0.1. By predicating on it, we can
1412 * update just the flag bits that aren't yet discarded. If there's no
1413 * condition, we emit a CMP of g0 != g0, so all currently executing
1414 * channels will get turned off.
1417 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
1418 cmp
= emit(CMP(reg_null_f
, get_nir_src(instr
->src
[0]),
1419 fs_reg(0), BRW_CONDITIONAL_Z
));
1421 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1422 BRW_REGISTER_TYPE_UW
));
1423 cmp
= emit(CMP(reg_null_f
, some_reg
, some_reg
, BRW_CONDITIONAL_NZ
));
1425 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1426 cmp
->flag_subreg
= 1;
1428 if (brw
->gen
>= 6) {
1429 emit_discard_jump();
1434 case nir_intrinsic_atomic_counter_inc
:
1435 case nir_intrinsic_atomic_counter_dec
:
1436 case nir_intrinsic_atomic_counter_read
: {
1437 unsigned surf_index
= prog_data
->binding_table
.abo_start
+
1438 (unsigned) instr
->const_index
[0];
1439 fs_reg offset
= fs_reg(get_nir_src(instr
->src
[0]));
1441 switch (instr
->intrinsic
) {
1442 case nir_intrinsic_atomic_counter_inc
:
1443 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
1444 fs_reg(), fs_reg());
1446 case nir_intrinsic_atomic_counter_dec
:
1447 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
1448 fs_reg(), fs_reg());
1450 case nir_intrinsic_atomic_counter_read
:
1451 emit_untyped_surface_read(surf_index
, dest
, offset
);
1454 unreachable("Unreachable");
1459 case nir_intrinsic_load_front_face
:
1460 emit(MOV(retype(dest
, BRW_REGISTER_TYPE_D
),
1461 *emit_frontfacing_interpolation()));
1464 case nir_intrinsic_load_vertex_id
:
1465 unreachable("should be lowered by lower_vertex_id()");
1467 case nir_intrinsic_load_vertex_id_zero_base
: {
1468 fs_reg vertex_id
= nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
1469 assert(vertex_id
.file
!= BAD_FILE
);
1470 dest
.type
= vertex_id
.type
;
1471 emit(MOV(dest
, vertex_id
));
1475 case nir_intrinsic_load_base_vertex
: {
1476 fs_reg base_vertex
= nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
1477 assert(base_vertex
.file
!= BAD_FILE
);
1478 dest
.type
= base_vertex
.type
;
1479 emit(MOV(dest
, base_vertex
));
1483 case nir_intrinsic_load_instance_id
: {
1484 fs_reg instance_id
= nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
1485 assert(instance_id
.file
!= BAD_FILE
);
1486 dest
.type
= instance_id
.type
;
1487 emit(MOV(dest
, instance_id
));
1491 case nir_intrinsic_load_sample_mask_in
: {
1492 fs_reg sample_mask_in
= nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
1493 assert(sample_mask_in
.file
!= BAD_FILE
);
1494 dest
.type
= sample_mask_in
.type
;
1495 emit(MOV(dest
, sample_mask_in
));
1499 case nir_intrinsic_load_sample_pos
: {
1500 fs_reg sample_pos
= nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
1501 assert(sample_pos
.file
!= BAD_FILE
);
1502 dest
.type
= sample_pos
.type
;
1503 emit(MOV(dest
, sample_pos
));
1504 emit(MOV(offset(dest
, 1), offset(sample_pos
, 1)));
1508 case nir_intrinsic_load_sample_id
: {
1509 fs_reg sample_id
= nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
1510 assert(sample_id
.file
!= BAD_FILE
);
1511 dest
.type
= sample_id
.type
;
1512 emit(MOV(dest
, sample_id
));
1516 case nir_intrinsic_load_uniform_indirect
:
1517 has_indirect
= true;
1519 case nir_intrinsic_load_uniform
: {
1520 unsigned index
= instr
->const_index
[0];
1523 if (index
< num_direct_uniforms
) {
1524 uniform_reg
= fs_reg(UNIFORM
, 0);
1526 uniform_reg
= fs_reg(UNIFORM
, num_direct_uniforms
);
1527 index
-= num_direct_uniforms
;
1530 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1531 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1532 fs_reg src
= offset(retype(uniform_reg
, dest
.type
), index
);
1534 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1537 emit(MOV(dest
, src
));
1538 dest
= offset(dest
, 1);
1544 case nir_intrinsic_load_ubo_indirect
:
1545 has_indirect
= true;
1547 case nir_intrinsic_load_ubo
: {
1548 nir_const_value
*const_index
= nir_src_as_const_value(instr
->src
[0]);
1552 surf_index
= fs_reg(stage_prog_data
->binding_table
.ubo_start
+
1555 /* The block index is not a constant. Evaluate the index expression
1556 * per-channel and add the base UBO index; the generator will select
1557 * a value from any live channel.
1559 surf_index
= vgrf(glsl_type::uint_type
);
1560 emit(ADD(surf_index
, get_nir_src(instr
->src
[0]),
1561 fs_reg(stage_prog_data
->binding_table
.ubo_start
)))
1562 ->force_writemask_all
= true;
1564 /* Assume this may touch any UBO. It would be nice to provide
1565 * a tighter bound, but the array information is already lowered away.
1567 brw_mark_surface_used(prog_data
,
1568 stage_prog_data
->binding_table
.ubo_start
+
1569 shader_prog
->NumUniformBlocks
- 1);
1573 /* Turn the byte offset into a dword offset. */
1574 fs_reg base_offset
= vgrf(glsl_type::int_type
);
1575 emit(SHR(base_offset
, retype(get_nir_src(instr
->src
[1]),
1576 BRW_REGISTER_TYPE_D
),
1579 unsigned vec4_offset
= instr
->const_index
[0] / 4;
1580 for (int i
= 0; i
< instr
->num_components
; i
++)
1581 emit(VARYING_PULL_CONSTANT_LOAD(offset(dest
, i
), surf_index
,
1582 base_offset
, vec4_offset
+ i
));
1584 fs_reg packed_consts
= vgrf(glsl_type::float_type
);
1585 packed_consts
.type
= dest
.type
;
1587 fs_reg
const_offset_reg((unsigned) instr
->const_index
[0] & ~15);
1588 emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
, packed_consts
,
1589 surf_index
, const_offset_reg
);
1591 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1592 packed_consts
.set_smear(instr
->const_index
[0] % 16 / 4 + i
);
1594 /* The std140 packing rules don't allow vectors to cross 16-byte
1595 * boundaries, and a reg is 32 bytes.
1597 assert(packed_consts
.subreg_offset
< 32);
1599 emit(MOV(dest
, packed_consts
));
1600 dest
= offset(dest
, 1);
1606 case nir_intrinsic_load_input_indirect
:
1607 has_indirect
= true;
1609 case nir_intrinsic_load_input
: {
1611 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1612 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1613 fs_reg src
= offset(retype(nir_inputs
, dest
.type
),
1614 instr
->const_index
[0] + index
);
1616 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1619 emit(MOV(dest
, src
));
1620 dest
= offset(dest
, 1);
1626 /* Handle ARB_gpu_shader5 interpolation intrinsics
1628 * It's worth a quick word of explanation as to why we handle the full
1629 * variable-based interpolation intrinsic rather than a lowered version
1630 * with like we do for other inputs. We have to do that because the way
1631 * we set up inputs doesn't allow us to use the already setup inputs for
1632 * interpolation. At the beginning of the shader, we go through all of
1633 * the input variables and do the initial interpolation and put it in
1634 * the nir_inputs array based on its location as determined in
1635 * nir_lower_io. If the input isn't used, dead code cleans up and
1636 * everything works fine. However, when we get to the ARB_gpu_shader5
1637 * interpolation intrinsics, we need to reinterpolate the input
1638 * differently. If we used an intrinsic that just had an index it would
1639 * only give us the offset into the nir_inputs array. However, this is
1640 * useless because that value is post-interpolation and we need
1641 * pre-interpolation. In order to get the actual location of the bits
1642 * we get from the vertex fetching hardware, we need the variable.
1644 case nir_intrinsic_interp_var_at_centroid
:
1645 case nir_intrinsic_interp_var_at_sample
:
1646 case nir_intrinsic_interp_var_at_offset
: {
1647 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
1648 * 8 channels at a time, same as the barycentric coords presented in
1649 * the FS payload. this requires a bit of extra work to support.
1651 no16("interpolate_at_* not yet supported in SIMD16 mode.");
1653 fs_reg dst_x
= vgrf(2);
1654 fs_reg dst_y
= offset(dst_x
, 1);
1656 /* For most messages, we need one reg of ignored data; the hardware
1657 * requires mlen==1 even when there is no payload. in the per-slot
1658 * offset case, we'll replace this with the proper source data.
1660 fs_reg src
= vgrf(glsl_type::float_type
);
1661 int mlen
= 1; /* one reg unless overriden */
1664 switch (instr
->intrinsic
) {
1665 case nir_intrinsic_interp_var_at_centroid
:
1666 inst
= emit(FS_OPCODE_INTERPOLATE_AT_CENTROID
, dst_x
, src
, fs_reg(0u));
1669 case nir_intrinsic_interp_var_at_sample
: {
1670 /* XXX: We should probably handle non-constant sample id's */
1671 nir_const_value
*const_sample
= nir_src_as_const_value(instr
->src
[0]);
1672 assert(const_sample
);
1673 unsigned msg_data
= const_sample
? const_sample
->i
[0] << 4 : 0;
1674 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE
, dst_x
, src
,
1679 case nir_intrinsic_interp_var_at_offset
: {
1680 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
1683 unsigned off_x
= MIN2((int)(const_offset
->f
[0] * 16), 7) & 0xf;
1684 unsigned off_y
= MIN2((int)(const_offset
->f
[1] * 16), 7) & 0xf;
1686 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
, dst_x
, src
,
1687 fs_reg(off_x
| (off_y
<< 4)));
1689 src
= vgrf(glsl_type::ivec2_type
);
1690 fs_reg offset_src
= retype(get_nir_src(instr
->src
[0]),
1691 BRW_REGISTER_TYPE_F
);
1692 for (int i
= 0; i
< 2; i
++) {
1693 fs_reg temp
= vgrf(glsl_type::float_type
);
1694 emit(MUL(temp
, offset(offset_src
, i
), fs_reg(16.0f
)));
1695 fs_reg itemp
= vgrf(glsl_type::int_type
);
1696 emit(MOV(itemp
, temp
)); /* float to int */
1698 /* Clamp the upper end of the range to +7/16.
1699 * ARB_gpu_shader5 requires that we support a maximum offset
1700 * of +0.5, which isn't representable in a S0.4 value -- if
1701 * we didn't clamp it, we'd end up with -8/16, which is the
1702 * opposite of what the shader author wanted.
1704 * This is legal due to ARB_gpu_shader5's quantization
1707 * "Not all values of <offset> may be supported; x and y
1708 * offsets may be rounded to fixed-point values with the
1709 * number of fraction bits given by the
1710 * implementation-dependent constant
1711 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1714 emit(BRW_OPCODE_SEL
, offset(src
, i
), itemp
, fs_reg(7))
1715 ->conditional_mod
= BRW_CONDITIONAL_L
; /* min(src2, 7) */
1719 inst
= emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
, dst_x
, src
,
1726 unreachable("Invalid intrinsic");
1730 inst
->regs_written
= 2; /* 2 floats per slot returned */
1731 inst
->pi_noperspective
= instr
->variables
[0]->var
->data
.interpolation
==
1732 INTERP_QUALIFIER_NOPERSPECTIVE
;
1734 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1735 fs_reg src
= interp_reg(instr
->variables
[0]->var
->data
.location
, j
);
1736 src
.type
= dest
.type
;
1738 emit(FS_OPCODE_LINTERP
, dest
, dst_x
, dst_y
, src
);
1739 dest
= offset(dest
, 1);
1744 case nir_intrinsic_store_output_indirect
:
1745 has_indirect
= true;
1747 case nir_intrinsic_store_output
: {
1748 fs_reg src
= get_nir_src(instr
->src
[0]);
1750 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1751 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1752 fs_reg new_dest
= offset(retype(nir_outputs
, src
.type
),
1753 instr
->const_index
[0] + index
);
1755 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[1]));
1757 emit(MOV(new_dest
, src
));
1758 src
= offset(src
, 1);
1765 unreachable("unknown intrinsic");
1770 fs_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1772 unsigned sampler
= instr
->sampler_index
;
1773 fs_reg
sampler_reg(sampler
);
1775 /* FINISHME: We're failing to recompile our programs when the sampler is
1776 * updated. This only matters for the texture rectangle scale parameters
1777 * (pre-gen6, or gen6+ with GL_CLAMP).
1779 int texunit
= prog
->SamplerUnits
[sampler
];
1781 int gather_component
= instr
->component
;
1783 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1785 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1788 int lod_components
= 0, offset_components
= 0;
1790 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, tex_offset
;
1792 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1793 fs_reg src
= get_nir_src(instr
->src
[i
].src
);
1794 switch (instr
->src
[i
].src_type
) {
1795 case nir_tex_src_bias
:
1796 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1798 case nir_tex_src_comparitor
:
1799 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1801 case nir_tex_src_coord
:
1802 switch (instr
->op
) {
1804 case nir_texop_txf_ms
:
1805 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1808 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1812 case nir_tex_src_ddx
:
1813 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1814 lod_components
= nir_tex_instr_src_size(instr
, i
);
1816 case nir_tex_src_ddy
:
1817 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1819 case nir_tex_src_lod
:
1820 switch (instr
->op
) {
1822 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1825 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1828 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1832 case nir_tex_src_ms_index
:
1833 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1835 case nir_tex_src_offset
:
1836 tex_offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1837 if (instr
->is_array
)
1838 offset_components
= instr
->coord_components
- 1;
1840 offset_components
= instr
->coord_components
;
1842 case nir_tex_src_projector
:
1843 unreachable("should be lowered");
1845 case nir_tex_src_sampler_offset
: {
1846 /* Figure out the highest possible sampler index and mark it as used */
1847 uint32_t max_used
= sampler
+ instr
->sampler_array_size
- 1;
1848 if (instr
->op
== nir_texop_tg4
&& brw
->gen
< 8) {
1849 max_used
+= stage_prog_data
->binding_table
.gather_texture_start
;
1851 max_used
+= stage_prog_data
->binding_table
.texture_start
;
1853 brw_mark_surface_used(prog_data
, max_used
);
1855 /* Emit code to evaluate the actual indexing expression */
1856 sampler_reg
= vgrf(glsl_type::uint_type
);
1857 emit(ADD(sampler_reg
, src
, fs_reg(sampler
)))
1858 ->force_writemask_all
= true;
1863 unreachable("unknown texture source");
1867 if (instr
->op
== nir_texop_txf_ms
) {
1868 if (brw
->gen
>= 7 &&
1869 key_tex
->compressed_multisample_layout_mask
& (1 << sampler
)) {
1870 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, sampler_reg
);
1876 for (unsigned i
= 0; i
< 3; i
++) {
1877 if (instr
->const_offset
[i
] != 0) {
1878 assert(offset_components
== 0);
1879 tex_offset
= fs_reg(brw_texture_offset(ctx
, instr
->const_offset
, 3));
1884 enum glsl_base_type dest_base_type
;
1885 switch (instr
->dest_type
) {
1886 case nir_type_float
:
1887 dest_base_type
= GLSL_TYPE_FLOAT
;
1890 dest_base_type
= GLSL_TYPE_INT
;
1892 case nir_type_unsigned
:
1893 dest_base_type
= GLSL_TYPE_UINT
;
1896 unreachable("bad type");
1899 const glsl_type
*dest_type
=
1900 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1903 ir_texture_opcode op
;
1904 switch (instr
->op
) {
1905 case nir_texop_lod
: op
= ir_lod
; break;
1906 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1907 case nir_texop_tex
: op
= ir_tex
; break;
1908 case nir_texop_tg4
: op
= ir_tg4
; break;
1909 case nir_texop_txb
: op
= ir_txb
; break;
1910 case nir_texop_txd
: op
= ir_txd
; break;
1911 case nir_texop_txf
: op
= ir_txf
; break;
1912 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1913 case nir_texop_txl
: op
= ir_txl
; break;
1914 case nir_texop_txs
: op
= ir_txs
; break;
1916 unreachable("unknown texture opcode");
1919 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1920 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1921 tex_offset
, mcs
, gather_component
,
1922 is_cube_array
, is_rect
, sampler
, sampler_reg
, texunit
);
1924 fs_reg dest
= get_nir_dest(instr
->dest
);
1925 dest
.type
= this->result
.type
;
1926 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1927 emit_percomp(MOV(dest
, this->result
), (1 << num_components
) - 1);
1931 fs_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1933 switch (instr
->type
) {
1934 case nir_jump_break
:
1935 emit(BRW_OPCODE_BREAK
);
1937 case nir_jump_continue
:
1938 emit(BRW_OPCODE_CONTINUE
);
1940 case nir_jump_return
:
1942 unreachable("unknown jump");