2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
30 fs_visitor::emit_nir_code()
32 /* first, lower the GLSL IR shader to NIR */
33 lower_output_reads(shader
->base
.ir
);
34 nir_shader
*nir
= glsl_to_nir(shader
->base
.ir
, NULL
, true);
35 nir_validate_shader(nir
);
37 nir_lower_global_vars_to_local(nir
);
38 nir_validate_shader(nir
);
40 nir_split_var_copies(nir
);
41 nir_validate_shader(nir
);
46 nir_lower_variables(nir
);
47 nir_validate_shader(nir
);
48 progress
|= nir_copy_prop(nir
);
49 nir_validate_shader(nir
);
50 progress
|= nir_opt_dce(nir
);
51 nir_validate_shader(nir
);
52 progress
|= nir_opt_cse(nir
);
53 nir_validate_shader(nir
);
54 progress
|= nir_opt_peephole_select(nir
);
55 nir_validate_shader(nir
);
56 progress
|= nir_opt_algebraic(nir
);
57 nir_validate_shader(nir
);
58 progress
|= nir_opt_constant_folding(nir
);
59 nir_validate_shader(nir
);
62 /* Lower a bunch of stuff */
64 nir_validate_shader(nir
);
66 nir_lower_locals_to_regs(nir
);
67 nir_validate_shader(nir
);
69 nir_remove_dead_variables(nir
);
70 nir_validate_shader(nir
);
72 nir_lower_to_source_mods(nir
);
73 nir_validate_shader(nir
);
75 nir_validate_shader(nir
);
76 nir_convert_from_ssa(nir
);
77 nir_validate_shader(nir
);
78 nir_lower_vec_to_movs(nir
);
79 nir_validate_shader(nir
);
81 nir_lower_samplers(nir
, shader_prog
, shader
->base
.Program
);
82 nir_validate_shader(nir
);
84 nir_lower_system_values(nir
);
85 nir_validate_shader(nir
);
87 nir_lower_atomics(nir
);
88 nir_validate_shader(nir
);
90 /* emit the arrays used for inputs and outputs - load/store intrinsics will
91 * be converted to reads/writes of these arrays
94 if (nir
->num_inputs
> 0) {
95 nir_inputs
= fs_reg(GRF
, virtual_grf_alloc(nir
->num_inputs
));
96 nir_setup_inputs(nir
);
99 if (nir
->num_outputs
> 0) {
100 nir_outputs
= fs_reg(GRF
, virtual_grf_alloc(nir
->num_outputs
));
101 nir_setup_outputs(nir
);
104 if (nir
->num_uniforms
> 0) {
105 nir_uniforms
= fs_reg(UNIFORM
, 0);
106 nir_setup_uniforms(nir
);
109 nir_globals
= ralloc_array(mem_ctx
, fs_reg
, nir
->reg_alloc
);
110 foreach_list_typed(nir_register
, reg
, node
, &nir
->registers
) {
111 unsigned array_elems
=
112 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
113 unsigned size
= array_elems
* reg
->num_components
;
114 nir_globals
[reg
->index
] = fs_reg(GRF
, virtual_grf_alloc(size
));
117 /* get the main function and emit it */
118 nir_foreach_overload(nir
, overload
) {
119 assert(strcmp(overload
->function
->name
, "main") == 0);
120 assert(overload
->impl
);
121 nir_emit_impl(overload
->impl
);
128 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
130 fs_reg varying
= nir_inputs
;
132 struct hash_entry
*entry
;
133 hash_table_foreach(shader
->inputs
, entry
) {
134 nir_variable
*var
= (nir_variable
*) entry
->data
;
135 varying
.reg_offset
= var
->data
.driver_location
;
138 if (!strcmp(var
->name
, "gl_FragCoord")) {
139 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
140 var
->data
.origin_upper_left
);
141 emit_percomp(MOV(varying
, reg
), 0xF);
142 } else if (!strcmp(var
->name
, "gl_FrontFacing")) {
143 reg
= *emit_frontfacing_interpolation();
144 emit(MOV(retype(varying
, BRW_REGISTER_TYPE_UD
), reg
));
146 emit_general_interpolation(varying
, var
->name
, var
->type
,
147 (glsl_interp_qualifier
) var
->data
.interpolation
,
148 var
->data
.location
, var
->data
.centroid
,
155 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
157 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
158 fs_reg reg
= nir_outputs
;
160 struct hash_entry
*entry
;
161 hash_table_foreach(shader
->outputs
, entry
) {
162 nir_variable
*var
= (nir_variable
*) entry
->data
;
163 reg
.reg_offset
= var
->data
.driver_location
;
165 if (var
->data
.index
> 0) {
166 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
167 assert(var
->data
.index
== 1);
168 this->dual_src_output
= reg
;
169 this->do_dual_src
= true;
170 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
171 /* Writing gl_FragColor outputs to all color regions. */
172 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
173 this->outputs
[i
] = reg
;
174 this->output_components
[i
] = 4;
176 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
177 this->frag_depth
= reg
;
178 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
179 this->sample_mask
= reg
;
181 /* gl_FragData or a user-defined FS output */
182 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
183 var
->data
.location
< FRAG_RESULT_DATA0
+ BRW_MAX_DRAW_BUFFERS
);
185 int vector_elements
=
186 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
187 : var
->type
->vector_elements
;
189 /* General color output. */
190 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
191 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
192 this->outputs
[output
] = reg
;
193 this->outputs
[output
].reg_offset
+= vector_elements
* i
;
194 this->output_components
[output
] = vector_elements
;
201 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
203 uniforms
= shader
->num_uniforms
;
204 param_size
[0] = shader
->num_uniforms
;
206 if (dispatch_width
!= 8)
209 struct hash_entry
*entry
;
210 hash_table_foreach(shader
->uniforms
, entry
) {
211 nir_variable
*var
= (nir_variable
*) entry
->data
;
213 /* UBO's and atomics don't take up space in the uniform file */
215 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
218 if (strncmp(var
->name
, "gl_", 3) == 0)
219 nir_setup_builtin_uniform(var
);
221 nir_setup_uniform(var
);
226 fs_visitor::nir_setup_uniform(nir_variable
*var
)
228 int namelen
= strlen(var
->name
);
230 /* The data for our (non-builtin) uniforms is stored in a series of
231 * gl_uniform_driver_storage structs for each subcomponent that
232 * glGetUniformLocation() could name. We know it's been set up in the
233 * same order we'd walk the type, so walk the list of storage and find
234 * anything with our name, or the prefix of a component that starts with
237 unsigned index
= var
->data
.driver_location
;
238 for (unsigned u
= 0; u
< shader_prog
->NumUserUniformStorage
; u
++) {
239 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
241 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
242 (storage
->name
[namelen
] != 0 &&
243 storage
->name
[namelen
] != '.' &&
244 storage
->name
[namelen
] != '[')) {
248 unsigned slots
= storage
->type
->component_slots();
249 if (storage
->array_elements
)
250 slots
*= storage
->array_elements
;
252 for (unsigned i
= 0; i
< slots
; i
++) {
253 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
257 /* Make sure we actually initialized the right amount of stuff here. */
258 assert(var
->data
.driver_location
+ var
->type
->component_slots() == index
);
262 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
264 const nir_state_slot
*const slots
= var
->state_slots
;
265 assert(var
->state_slots
!= NULL
);
267 unsigned uniform_index
= var
->data
.driver_location
;
268 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
269 /* This state reference has already been setup by ir_to_mesa, but we'll
270 * get the same index back here.
272 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
273 (gl_state_index
*)slots
[i
].tokens
);
275 /* Add each of the unique swizzles of the element as a parameter.
276 * This'll end up matching the expected layout of the
277 * array/matrix/structure we're trying to fill in.
280 for (unsigned int j
= 0; j
< 4; j
++) {
281 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
282 if (swiz
== last_swiz
)
286 stage_prog_data
->param
[uniform_index
++] =
287 &prog
->Parameters
->ParameterValues
[index
][swiz
];
293 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
295 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
296 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
297 unsigned array_elems
=
298 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
299 unsigned size
= array_elems
* reg
->num_components
;
300 nir_locals
[reg
->index
] = fs_reg(GRF
, virtual_grf_alloc(size
));
303 nir_emit_cf_list(&impl
->body
);
307 fs_visitor::nir_emit_cf_list(exec_list
*list
)
309 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
310 switch (node
->type
) {
312 nir_emit_if(nir_cf_node_as_if(node
));
315 case nir_cf_node_loop
:
316 nir_emit_loop(nir_cf_node_as_loop(node
));
319 case nir_cf_node_block
:
320 nir_emit_block(nir_cf_node_as_block(node
));
324 unreachable("Invalid CFG node block");
330 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
333 no16("Can't support (non-uniform) control flow on SIMD16\n");
336 /* first, put the condition into f0 */
337 fs_inst
*inst
= emit(MOV(reg_null_d
,
338 retype(get_nir_src(if_stmt
->condition
),
339 BRW_REGISTER_TYPE_UD
)));
340 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
342 emit(IF(BRW_PREDICATE_NORMAL
));
344 nir_emit_cf_list(&if_stmt
->then_list
);
346 /* note: if the else is empty, dead CF elimination will remove it */
347 emit(BRW_OPCODE_ELSE
);
349 nir_emit_cf_list(&if_stmt
->else_list
);
351 emit(BRW_OPCODE_ENDIF
);
353 try_replace_with_sel();
357 fs_visitor::nir_emit_loop(nir_loop
*loop
)
360 no16("Can't support (non-uniform) control flow on SIMD16\n");
365 nir_emit_cf_list(&loop
->body
);
367 emit(BRW_OPCODE_WHILE
);
371 fs_visitor::nir_emit_block(nir_block
*block
)
373 nir_foreach_instr(block
, instr
) {
374 nir_emit_instr(instr
);
379 fs_visitor::nir_emit_instr(nir_instr
*instr
)
381 switch (instr
->type
) {
382 case nir_instr_type_alu
:
383 nir_emit_alu(nir_instr_as_alu(instr
));
386 case nir_instr_type_intrinsic
:
387 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
390 case nir_instr_type_tex
:
391 nir_emit_texture(nir_instr_as_tex(instr
));
394 case nir_instr_type_load_const
:
395 nir_emit_load_const(nir_instr_as_load_const(instr
));
398 case nir_instr_type_jump
:
399 nir_emit_jump(nir_instr_as_jump(instr
));
403 unreachable("unknown instruction type");
408 brw_type_for_nir_type(nir_alu_type type
)
412 case nir_type_unsigned
:
413 return BRW_REGISTER_TYPE_UD
;
415 return BRW_REGISTER_TYPE_D
;
417 return BRW_REGISTER_TYPE_F
;
419 unreachable("unknown type");
422 return BRW_REGISTER_TYPE_F
;
426 fs_visitor::nir_emit_alu(nir_alu_instr
*instr
)
428 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
431 fs_reg dest
= get_nir_dest(instr
->dest
.dest
);
432 dest
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
435 if (instr
->has_predicate
) {
436 result
= fs_reg(GRF
, virtual_grf_alloc(4));
437 result
.type
= dest
.type
;
443 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
444 op
[i
] = get_nir_alu_src(instr
, i
);
450 fs_inst
*inst
= MOV(result
, op
[0]);
451 inst
->saturate
= instr
->dest
.saturate
;
452 emit_percomp(inst
, instr
->dest
.write_mask
);
459 emit_percomp(MOV(result
, op
[0]), instr
->dest
.write_mask
);
463 /* AND(val, 0x80000000) gives the sign bit.
465 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
468 emit_percomp(CMP(reg_null_f
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
),
469 instr
->dest
.write_mask
);
471 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
472 op
[0].type
= BRW_REGISTER_TYPE_UD
;
473 result
.type
= BRW_REGISTER_TYPE_UD
;
474 emit_percomp(AND(result_int
, op
[0], fs_reg(0x80000000u
)),
475 instr
->dest
.write_mask
);
477 fs_inst
*inst
= OR(result_int
, result_int
, fs_reg(0x3f800000u
));
478 inst
->predicate
= BRW_PREDICATE_NORMAL
;
479 emit_percomp(inst
, instr
->dest
.write_mask
);
480 if (instr
->dest
.saturate
) {
481 fs_inst
*inst
= MOV(result
, result
);
482 inst
->saturate
= true;
483 emit_percomp(inst
, instr
->dest
.write_mask
);
489 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
490 * -> non-negative val generates 0x00000000.
491 * Predicated OR sets 1 if val is positive.
493 emit_percomp(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_G
),
494 instr
->dest
.write_mask
);
496 emit_percomp(ASR(result
, op
[0], fs_reg(31)), instr
->dest
.write_mask
);
498 fs_inst
*inst
= OR(result
, result
, fs_reg(1));
499 inst
->predicate
= BRW_PREDICATE_NORMAL
;
500 emit_percomp(inst
, instr
->dest
.write_mask
);
505 emit_math_percomp(SHADER_OPCODE_RCP
, result
, op
[0],
506 instr
->dest
.write_mask
, instr
->dest
.saturate
);
510 emit_math_percomp(SHADER_OPCODE_EXP2
, result
, op
[0],
511 instr
->dest
.write_mask
, instr
->dest
.saturate
);
515 emit_math_percomp(SHADER_OPCODE_LOG2
, result
, op
[0],
516 instr
->dest
.write_mask
, instr
->dest
.saturate
);
521 unreachable("not reached: should be handled by ir_explog_to_explog2");
524 case nir_op_fsin_reduced
:
525 emit_math_percomp(SHADER_OPCODE_SIN
, result
, op
[0],
526 instr
->dest
.write_mask
, instr
->dest
.saturate
);
530 case nir_op_fcos_reduced
:
531 emit_math_percomp(SHADER_OPCODE_COS
, result
, op
[0],
532 instr
->dest
.write_mask
, instr
->dest
.saturate
);
536 if (fs_key
->high_quality_derivatives
)
537 emit_percomp(FS_OPCODE_DDX_FINE
, result
, op
[0],
538 instr
->dest
.write_mask
, instr
->dest
.saturate
);
540 emit_percomp(FS_OPCODE_DDX_COARSE
, result
, op
[0],
541 instr
->dest
.write_mask
, instr
->dest
.saturate
);
543 case nir_op_fddx_fine
:
544 emit_percomp(FS_OPCODE_DDX_FINE
, result
, op
[0],
545 instr
->dest
.write_mask
, instr
->dest
.saturate
);
547 case nir_op_fddx_coarse
:
548 emit_percomp(FS_OPCODE_DDX_COARSE
, result
, op
[0],
549 instr
->dest
.write_mask
, instr
->dest
.saturate
);
552 if (fs_key
->high_quality_derivatives
)
553 emit_percomp(FS_OPCODE_DDY_FINE
, result
, op
[0],
554 fs_reg(fs_key
->render_to_fbo
),
555 instr
->dest
.write_mask
, instr
->dest
.saturate
);
557 emit_percomp(FS_OPCODE_DDY_COARSE
, result
, op
[0],
558 fs_reg(fs_key
->render_to_fbo
),
559 instr
->dest
.write_mask
, instr
->dest
.saturate
);
561 case nir_op_fddy_fine
:
562 emit_percomp(FS_OPCODE_DDY_FINE
, result
, op
[0],
563 fs_reg(fs_key
->render_to_fbo
),
564 instr
->dest
.write_mask
, instr
->dest
.saturate
);
566 case nir_op_fddy_coarse
:
567 emit_percomp(FS_OPCODE_DDY_COARSE
, result
, op
[0],
568 fs_reg(fs_key
->render_to_fbo
),
569 instr
->dest
.write_mask
, instr
->dest
.saturate
);
574 fs_inst
*inst
= ADD(result
, op
[0], op
[1]);
575 inst
->saturate
= instr
->dest
.saturate
;
576 emit_percomp(inst
, instr
->dest
.write_mask
);
581 fs_inst
*inst
= MUL(result
, op
[0], op
[1]);
582 inst
->saturate
= instr
->dest
.saturate
;
583 emit_percomp(inst
, instr
->dest
.write_mask
);
588 /* TODO put in the 16-bit constant optimization once we have SSA */
591 no16("SIMD16 explicit accumulator operands unsupported\n");
593 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
595 emit_percomp(MUL(acc
, op
[0], op
[1]), instr
->dest
.write_mask
);
596 emit_percomp(MACH(reg_null_d
, op
[0], op
[1]), instr
->dest
.write_mask
);
597 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
601 case nir_op_imul_high
:
602 case nir_op_umul_high
: {
604 no16("SIMD16 explicit accumulator operands unsupported\n");
606 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
608 emit_percomp(MUL(acc
, op
[0], op
[1]), instr
->dest
.write_mask
);
609 emit_percomp(MACH(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
615 emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1],
616 instr
->dest
.write_mask
);
619 case nir_op_uadd_carry
: {
621 no16("SIMD16 explicit accumulator operands unsupported\n");
623 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
624 BRW_REGISTER_TYPE_UD
);
626 emit_percomp(ADDC(reg_null_ud
, op
[0], op
[1]), instr
->dest
.write_mask
);
627 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
631 case nir_op_usub_borrow
: {
633 no16("SIMD16 explicit accumulator operands unsupported\n");
635 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
636 BRW_REGISTER_TYPE_UD
);
638 emit_percomp(SUBB(reg_null_ud
, op
[0], op
[1]), instr
->dest
.write_mask
);
639 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
644 emit_math_percomp(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0],
645 op
[1], instr
->dest
.write_mask
);
651 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
),
652 instr
->dest
.write_mask
);
658 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
),
659 instr
->dest
.write_mask
);
664 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
),
665 instr
->dest
.write_mask
);
670 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
),
671 instr
->dest
.write_mask
);
674 case nir_op_ball_fequal2
:
675 case nir_op_ball_iequal2
:
676 case nir_op_ball_fequal3
:
677 case nir_op_ball_iequal3
:
678 case nir_op_ball_fequal4
:
679 case nir_op_ball_iequal4
: {
680 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
681 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
682 emit_percomp(CMP(temp
, op
[0], op
[1], BRW_CONDITIONAL_Z
),
683 (1 << num_components
) - 1);
684 emit_reduction(BRW_OPCODE_AND
, result
, temp
, num_components
);
688 case nir_op_bany_fnequal2
:
689 case nir_op_bany_inequal2
:
690 case nir_op_bany_fnequal3
:
691 case nir_op_bany_inequal3
:
692 case nir_op_bany_fnequal4
:
693 case nir_op_bany_inequal4
: {
694 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
695 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
696 temp
.type
= BRW_REGISTER_TYPE_UD
;
697 emit_percomp(CMP(temp
, op
[0], op
[1], BRW_CONDITIONAL_NZ
),
698 (1 << num_components
) - 1);
699 emit_reduction(BRW_OPCODE_OR
, result
, temp
, num_components
);
704 emit_percomp(NOT(result
, op
[0]), instr
->dest
.write_mask
);
707 emit_percomp(XOR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
710 emit_percomp(OR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
713 emit_percomp(AND(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
719 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
720 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
721 emit_percomp(MUL(temp
, op
[0], op
[1]), (1 << num_components
) - 1);
722 emit_reduction(BRW_OPCODE_ADD
, result
, temp
, num_components
);
723 if (instr
->dest
.saturate
) {
724 fs_inst
*inst
= emit(MOV(result
, result
));
725 inst
->saturate
= true;
733 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
734 emit_reduction(BRW_OPCODE_OR
, result
, op
[0], num_components
);
741 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
742 emit_reduction(BRW_OPCODE_AND
, result
, op
[0], num_components
);
746 case nir_op_fnoise1_1
:
747 case nir_op_fnoise1_2
:
748 case nir_op_fnoise1_3
:
749 case nir_op_fnoise1_4
:
750 case nir_op_fnoise2_1
:
751 case nir_op_fnoise2_2
:
752 case nir_op_fnoise2_3
:
753 case nir_op_fnoise2_4
:
754 case nir_op_fnoise3_1
:
755 case nir_op_fnoise3_2
:
756 case nir_op_fnoise3_3
:
757 case nir_op_fnoise3_4
:
758 case nir_op_fnoise4_1
:
759 case nir_op_fnoise4_2
:
760 case nir_op_fnoise4_3
:
761 case nir_op_fnoise4_4
:
762 unreachable("not reached: should be handled by lower_noise");
767 unreachable("not reached: should be handled by lower_quadop_vector");
770 unreachable("not reached: should be handled by ldexp_to_arith()");
773 emit_math_percomp(SHADER_OPCODE_SQRT
, result
, op
[0],
774 instr
->dest
.write_mask
, instr
->dest
.saturate
);
778 emit_math_percomp(SHADER_OPCODE_RSQ
, result
, op
[0],
779 instr
->dest
.write_mask
, instr
->dest
.saturate
);
783 emit_percomp(AND(result
, op
[0], fs_reg(1)), instr
->dest
.write_mask
);
786 emit_percomp(AND(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0],
787 fs_reg(0x3f800000u
)),
788 instr
->dest
.write_mask
);
793 emit_percomp(CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
),
794 instr
->dest
.write_mask
);
797 emit_percomp(CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
),
798 instr
->dest
.write_mask
);
801 case nir_op_ftrunc
: {
802 fs_inst
*inst
= RNDZ(result
, op
[0]);
803 inst
->saturate
= instr
->dest
.saturate
;
804 emit_percomp(inst
, instr
->dest
.write_mask
);
808 op
[0].negate
= !op
[0].negate
;
809 fs_reg temp
= fs_reg(this, glsl_type::vec4_type
);
810 emit_percomp(RNDD(temp
, op
[0]), instr
->dest
.write_mask
);
812 fs_inst
*inst
= MOV(result
, temp
);
813 inst
->saturate
= instr
->dest
.saturate
;
814 emit_percomp(inst
, instr
->dest
.write_mask
);
817 case nir_op_ffloor
: {
818 fs_inst
*inst
= RNDD(result
, op
[0]);
819 inst
->saturate
= instr
->dest
.saturate
;
820 emit_percomp(inst
, instr
->dest
.write_mask
);
823 case nir_op_ffract
: {
824 fs_inst
*inst
= FRC(result
, op
[0]);
825 inst
->saturate
= instr
->dest
.saturate
;
826 emit_percomp(inst
, instr
->dest
.write_mask
);
829 case nir_op_fround_even
: {
830 fs_inst
*inst
= RNDE(result
, op
[0]);
831 inst
->saturate
= instr
->dest
.saturate
;
832 emit_percomp(inst
, instr
->dest
.write_mask
);
840 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
841 instr
->dest
.write_mask
, instr
->dest
.saturate
,
842 BRW_PREDICATE_NONE
, BRW_CONDITIONAL_L
);
844 emit_percomp(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_L
),
845 instr
->dest
.write_mask
);
847 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
848 instr
->dest
.write_mask
, instr
->dest
.saturate
,
849 BRW_PREDICATE_NORMAL
);
857 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
858 instr
->dest
.write_mask
, instr
->dest
.saturate
,
859 BRW_PREDICATE_NONE
, BRW_CONDITIONAL_GE
);
861 emit_percomp(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_GE
),
862 instr
->dest
.write_mask
);
864 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
865 instr
->dest
.write_mask
, instr
->dest
.saturate
,
866 BRW_PREDICATE_NORMAL
);
870 case nir_op_pack_snorm_2x16
:
871 case nir_op_pack_snorm_4x8
:
872 case nir_op_pack_unorm_2x16
:
873 case nir_op_pack_unorm_4x8
:
874 case nir_op_unpack_snorm_2x16
:
875 case nir_op_unpack_snorm_4x8
:
876 case nir_op_unpack_unorm_2x16
:
877 case nir_op_unpack_unorm_4x8
:
878 case nir_op_unpack_half_2x16
:
879 case nir_op_pack_half_2x16
:
880 unreachable("not reached: should be handled by lower_packing_builtins");
882 case nir_op_unpack_half_2x16_split_x
:
883 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0],
884 instr
->dest
.write_mask
, instr
->dest
.saturate
);
886 case nir_op_unpack_half_2x16_split_y
:
887 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0],
888 instr
->dest
.write_mask
, instr
->dest
.saturate
);
892 emit_percomp(SHADER_OPCODE_POW
, result
, op
[0], op
[1],
893 instr
->dest
.write_mask
, instr
->dest
.saturate
);
896 case nir_op_bitfield_reverse
:
897 emit_percomp(BFREV(result
, op
[0]), instr
->dest
.write_mask
);
900 case nir_op_bit_count
:
901 emit_percomp(CBIT(result
, op
[0]), instr
->dest
.write_mask
);
904 case nir_op_ufind_msb
:
905 case nir_op_ifind_msb
: {
906 emit_percomp(FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]),
907 instr
->dest
.write_mask
);
909 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
910 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
911 * subtract the result from 31 to convert the MSB count into an LSB count.
914 emit_percomp(CMP(reg_null_d
, result
, fs_reg(-1), BRW_CONDITIONAL_NZ
),
915 instr
->dest
.write_mask
);
916 fs_reg
neg_result(result
);
917 neg_result
.negate
= true;
918 fs_inst
*inst
= ADD(result
, neg_result
, fs_reg(31));
919 inst
->predicate
= BRW_PREDICATE_NORMAL
;
920 emit_percomp(inst
, instr
->dest
.write_mask
);
924 case nir_op_find_lsb
:
925 emit_percomp(FBL(result
, op
[0]), instr
->dest
.write_mask
);
928 case nir_op_ubitfield_extract
:
929 case nir_op_ibitfield_extract
:
930 emit_percomp(BFE(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
933 emit_percomp(BFI1(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
936 emit_percomp(BFI2(result
, op
[0], op
[1], op
[2]), instr
->dest
.write_mask
);
939 case nir_op_bitfield_insert
:
940 unreachable("not reached: should be handled by "
941 "lower_instructions::bitfield_insert_to_bfm_bfi");
944 emit_percomp(SHL(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
947 emit_percomp(ASR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
950 emit_percomp(SHR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
953 case nir_op_pack_half_2x16_split
:
954 emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1],
955 instr
->dest
.write_mask
);
959 emit_percomp(MAD(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
963 /* TODO emulate for gen < 6 */
964 emit_percomp(LRP(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
968 for (unsigned i
= 0; i
< 4; i
++) {
969 if (!((instr
->dest
.write_mask
>> i
) & 1))
972 emit(CMP(reg_null_d
, offset(op
[0], i
), fs_reg(0), BRW_CONDITIONAL_NZ
));
973 emit(SEL(offset(result
, i
), offset(op
[1], i
), offset(op
[2], i
)))
974 ->predicate
= BRW_PREDICATE_NORMAL
;
979 unreachable("unhandled instruction");
982 /* emit a predicated move if there was predication */
983 if (instr
->has_predicate
) {
984 fs_inst
*inst
= emit(MOV(reg_null_d
,
985 retype(get_nir_src(instr
->predicate
),
986 BRW_REGISTER_TYPE_UD
)));
987 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
988 inst
= MOV(dest
, result
);
989 inst
->predicate
= BRW_PREDICATE_NORMAL
;
990 emit_percomp(inst
, instr
->dest
.write_mask
);
995 fs_visitor::get_nir_src(nir_src src
)
998 assert(src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
);
999 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1000 fs_reg
reg(GRF
, virtual_grf_alloc(src
.ssa
->num_components
),
1001 BRW_REGISTER_TYPE_D
);
1003 for (unsigned i
= 0; i
< src
.ssa
->num_components
; ++i
)
1004 emit(MOV(offset(reg
, i
), fs_reg(load
->value
.i
[i
])));
1009 if (src
.reg
.reg
->is_global
)
1010 reg
= nir_globals
[src
.reg
.reg
->index
];
1012 reg
= nir_locals
[src
.reg
.reg
->index
];
1014 /* to avoid floating-point denorm flushing problems, set the type by
1015 * default to D - instructions that need floating point semantics will set
1016 * this to F if they need to
1018 reg
.type
= BRW_REGISTER_TYPE_D
;
1019 reg
.reg_offset
= src
.reg
.base_offset
;
1020 if (src
.reg
.indirect
) {
1021 reg
.reladdr
= new(mem_ctx
) fs_reg();
1022 *reg
.reladdr
= retype(get_nir_src(*src
.reg
.indirect
),
1023 BRW_REGISTER_TYPE_D
);
1031 fs_visitor::get_nir_alu_src(nir_alu_instr
*instr
, unsigned src
)
1033 fs_reg reg
= get_nir_src(instr
->src
[src
].src
);
1035 reg
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[src
]);
1036 reg
.abs
= instr
->src
[src
].abs
;
1037 reg
.negate
= instr
->src
[src
].negate
;
1039 bool needs_swizzle
= false;
1040 unsigned num_components
= 0;
1041 for (unsigned i
= 0; i
< 4; i
++) {
1042 if (!nir_alu_instr_channel_used(instr
, src
, i
))
1045 if (instr
->src
[src
].swizzle
[i
] != i
)
1046 needs_swizzle
= true;
1048 num_components
= i
+ 1;
1051 if (needs_swizzle
) {
1052 /* resolve the swizzle through MOV's */
1053 fs_reg new_reg
= fs_reg(GRF
, virtual_grf_alloc(num_components
), reg
.type
);
1055 for (unsigned i
= 0; i
< 4; i
++) {
1056 if (!nir_alu_instr_channel_used(instr
, src
, i
))
1059 emit(MOV(offset(new_reg
, i
),
1060 offset(reg
, instr
->src
[src
].swizzle
[i
])));
1070 fs_visitor::get_nir_dest(nir_dest dest
)
1073 if (dest
.reg
.reg
->is_global
)
1074 reg
= nir_globals
[dest
.reg
.reg
->index
];
1076 reg
= nir_locals
[dest
.reg
.reg
->index
];
1078 reg
.reg_offset
= dest
.reg
.base_offset
;
1079 if (dest
.reg
.indirect
) {
1080 reg
.reladdr
= new(mem_ctx
) fs_reg();
1081 *reg
.reladdr
= retype(get_nir_src(*dest
.reg
.indirect
),
1082 BRW_REGISTER_TYPE_D
);
1089 fs_visitor::emit_percomp(fs_inst
*inst
, unsigned wr_mask
)
1091 for (unsigned i
= 0; i
< 4; i
++) {
1092 if (!((wr_mask
>> i
) & 1))
1095 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(*inst
);
1096 new_inst
->dst
.reg_offset
+= i
;
1097 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1098 if (inst
->src
[j
].file
== GRF
)
1099 new_inst
->src
[j
].reg_offset
+= i
;
1106 fs_visitor::emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1107 unsigned wr_mask
, bool saturate
,
1108 enum brw_predicate predicate
,
1109 enum brw_conditional_mod mod
)
1111 for (unsigned i
= 0; i
< 4; i
++) {
1112 if (!((wr_mask
>> i
) & 1))
1115 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(op
, dest
, src0
);
1116 new_inst
->dst
.reg_offset
+= i
;
1117 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1118 if (new_inst
->src
[j
].file
== GRF
)
1119 new_inst
->src
[j
].reg_offset
+= i
;
1121 new_inst
->predicate
= predicate
;
1122 new_inst
->conditional_mod
= mod
;
1123 new_inst
->saturate
= saturate
;
1129 fs_visitor::emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
, fs_reg src1
,
1130 unsigned wr_mask
, bool saturate
,
1131 enum brw_predicate predicate
,
1132 enum brw_conditional_mod mod
)
1134 for (unsigned i
= 0; i
< 4; i
++) {
1135 if (!((wr_mask
>> i
) & 1))
1138 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(op
, dest
, src0
, src1
);
1139 new_inst
->dst
.reg_offset
+= i
;
1140 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1141 if (new_inst
->src
[j
].file
== GRF
)
1142 new_inst
->src
[j
].reg_offset
+= i
;
1144 new_inst
->predicate
= predicate
;
1145 new_inst
->conditional_mod
= mod
;
1146 new_inst
->saturate
= saturate
;
1152 fs_visitor::emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1153 unsigned wr_mask
, bool saturate
)
1155 for (unsigned i
= 0; i
< 4; i
++) {
1156 if (!((wr_mask
>> i
) & 1))
1159 fs_reg new_dest
= dest
;
1160 new_dest
.reg_offset
+= i
;
1161 fs_reg new_src0
= src0
;
1162 if (src0
.file
== GRF
)
1163 new_src0
.reg_offset
+= i
;
1165 fs_inst
*new_inst
= emit_math(op
, new_dest
, new_src0
);
1166 new_inst
->saturate
= saturate
;
1171 fs_visitor::emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1172 fs_reg src1
, unsigned wr_mask
,
1175 for (unsigned i
= 0; i
< 4; i
++) {
1176 if (!((wr_mask
>> i
) & 1))
1179 fs_reg new_dest
= dest
;
1180 new_dest
.reg_offset
+= i
;
1181 fs_reg new_src0
= src0
;
1182 if (src0
.file
== GRF
)
1183 new_src0
.reg_offset
+= i
;
1184 fs_reg new_src1
= src1
;
1185 if (src1
.file
== GRF
)
1186 new_src1
.reg_offset
+= i
;
1188 fs_inst
*new_inst
= emit_math(op
, new_dest
, new_src0
, new_src1
);
1189 new_inst
->saturate
= saturate
;
1194 fs_visitor::emit_reduction(enum opcode op
, fs_reg dest
, fs_reg src
,
1195 unsigned num_components
)
1201 if (num_components
== 2) {
1202 emit(op
, dest
, src0
, src1
);
1206 fs_reg temp1
= fs_reg(GRF
, virtual_grf_alloc(1));
1207 temp1
.type
= src
.type
;
1208 emit(op
, temp1
, src0
, src1
);
1211 src2
.reg_offset
+= 2;
1213 if (num_components
== 3) {
1214 emit(op
, dest
, temp1
, src2
);
1218 assert(num_components
== 4);
1221 src3
.reg_offset
+= 3;
1222 fs_reg temp2
= fs_reg(GRF
, virtual_grf_alloc(1));
1223 temp2
.type
= src
.type
;
1225 emit(op
, temp2
, src2
, src3
);
1226 emit(op
, dest
, temp1
, temp2
);
1230 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
1233 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1234 dest
= get_nir_dest(instr
->dest
);
1235 if (instr
->has_predicate
) {
1236 fs_inst
*inst
= emit(MOV(reg_null_d
,
1237 retype(get_nir_src(instr
->predicate
),
1238 BRW_REGISTER_TYPE_UD
)));
1239 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1242 bool has_indirect
= false;
1244 switch (instr
->intrinsic
) {
1245 case nir_intrinsic_discard
: {
1246 /* We track our discarded pixels in f0.1. By predicating on it, we can
1247 * update just the flag bits that aren't yet discarded. By emitting a
1248 * CMP of g0 != g0, all our currently executing channels will get turned
1251 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1252 BRW_REGISTER_TYPE_UW
));
1253 fs_inst
*cmp
= emit(CMP(reg_null_f
, some_reg
, some_reg
,
1254 BRW_CONDITIONAL_NZ
));
1255 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1256 cmp
->flag_subreg
= 1;
1258 if (brw
->gen
>= 6) {
1259 /* For performance, after a discard, jump to the end of the shader.
1260 * Only jump if all relevant channels have been discarded.
1262 fs_inst
*discard_jump
= emit(FS_OPCODE_DISCARD_JUMP
);
1263 discard_jump
->flag_subreg
= 1;
1265 discard_jump
->predicate
= (dispatch_width
== 8)
1266 ? BRW_PREDICATE_ALIGN1_ANY8H
1267 : BRW_PREDICATE_ALIGN1_ANY16H
;
1268 discard_jump
->predicate_inverse
= true;
1274 case nir_intrinsic_atomic_counter_inc
:
1275 case nir_intrinsic_atomic_counter_dec
:
1276 case nir_intrinsic_atomic_counter_read
: {
1277 unsigned surf_index
= prog_data
->binding_table
.abo_start
+
1278 (unsigned) instr
->const_index
[0];
1279 fs_reg offset
= fs_reg(get_nir_src(instr
->src
[0]));
1281 switch (instr
->intrinsic
) {
1282 case nir_intrinsic_atomic_counter_inc
:
1283 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
1284 fs_reg(), fs_reg());
1286 case nir_intrinsic_atomic_counter_dec
:
1287 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
1288 fs_reg(), fs_reg());
1290 case nir_intrinsic_atomic_counter_read
:
1291 emit_untyped_surface_read(surf_index
, dest
, offset
);
1294 unreachable("Unreachable");
1299 case nir_intrinsic_load_front_face
:
1302 case nir_intrinsic_load_sample_mask_in
: {
1303 assert(brw
->gen
>= 7);
1304 fs_reg reg
= fs_reg(retype(brw_vec8_grf(payload
.sample_mask_in_reg
, 0),
1305 BRW_REGISTER_TYPE_D
));
1306 dest
.type
= reg
.type
;
1307 fs_inst
*inst
= MOV(dest
, reg
);
1308 if (instr
->has_predicate
)
1309 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1314 case nir_intrinsic_load_sample_pos
: {
1315 fs_reg
*reg
= emit_samplepos_setup();
1316 dest
.type
= reg
->type
;
1317 emit(MOV(dest
, *reg
));
1318 emit(MOV(offset(dest
, 1), offset(*reg
, 1)));
1322 case nir_intrinsic_load_sample_id
: {
1323 fs_reg
*reg
= emit_sampleid_setup();
1324 dest
.type
= reg
->type
;
1325 emit(MOV(dest
, *reg
));
1329 case nir_intrinsic_load_uniform_indirect
:
1330 has_indirect
= true;
1331 case nir_intrinsic_load_uniform
: {
1333 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1334 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1335 fs_reg src
= nir_uniforms
;
1336 src
.reg_offset
= instr
->const_index
[0] + index
;
1338 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1339 src
.type
= dest
.type
;
1342 fs_inst
*inst
= MOV(dest
, src
);
1343 if (instr
->has_predicate
)
1344 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1352 case nir_intrinsic_load_ubo_indirect
:
1353 has_indirect
= true;
1354 case nir_intrinsic_load_ubo
: {
1355 nir_const_value
*const_index
= nir_src_as_const_value(instr
->src
[0]);
1359 surf_index
= fs_reg(stage_prog_data
->binding_table
.ubo_start
+
1362 /* The block index is not a constant. Evaluate the index expression
1363 * per-channel and add the base UBO index; the generator will select
1364 * a value from any live channel.
1366 surf_index
= fs_reg(this, glsl_type::uint_type
);
1367 emit(ADD(surf_index
, get_nir_src(instr
->src
[0]),
1368 fs_reg(stage_prog_data
->binding_table
.ubo_start
)))
1369 ->force_writemask_all
= true;
1371 /* Assume this may touch any UBO. It would be nice to provide
1372 * a tighter bound, but the array information is already lowered away.
1374 brw_mark_surface_used(prog_data
,
1375 stage_prog_data
->binding_table
.ubo_start
+
1376 shader_prog
->NumUniformBlocks
- 1);
1380 /* Turn the byte offset into a dword offset. */
1381 fs_reg base_offset
= fs_reg(this, glsl_type::int_type
);
1382 emit(SHR(base_offset
, retype(get_nir_src(instr
->src
[1]),
1383 BRW_REGISTER_TYPE_D
),
1386 unsigned vec4_offset
= instr
->const_index
[0] / 4;
1387 for (int i
= 0; i
< instr
->num_components
; i
++) {
1388 exec_list list
= VARYING_PULL_CONSTANT_LOAD(offset(dest
, i
),
1389 surf_index
, base_offset
,
1392 fs_inst
*last_inst
= (fs_inst
*) list
.get_tail();
1393 if (instr
->has_predicate
)
1394 last_inst
->predicate
= BRW_PREDICATE_NORMAL
;
1398 fs_reg packed_consts
= fs_reg(this, glsl_type::float_type
);
1399 packed_consts
.type
= dest
.type
;
1401 fs_reg
const_offset_reg((unsigned) instr
->const_index
[0] & ~15);
1402 emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
, packed_consts
,
1403 surf_index
, const_offset_reg
);
1405 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1406 packed_consts
.set_smear(instr
->const_index
[0] % 16 / 4 + i
);
1408 /* The std140 packing rules don't allow vectors to cross 16-byte
1409 * boundaries, and a reg is 32 bytes.
1411 assert(packed_consts
.subreg_offset
< 32);
1413 fs_inst
*inst
= MOV(dest
, packed_consts
);
1414 if (instr
->has_predicate
)
1415 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1424 case nir_intrinsic_load_input_indirect
:
1425 has_indirect
= true;
1426 case nir_intrinsic_load_input
: {
1428 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1429 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1430 fs_reg src
= nir_inputs
;
1431 src
.reg_offset
= instr
->const_index
[0] + index
;
1433 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1434 src
.type
= dest
.type
;
1437 fs_inst
*inst
= MOV(dest
, src
);
1438 if (instr
->has_predicate
)
1439 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1447 /* Handle ARB_gpu_shader5 interpolation intrinsics
1449 * It's worth a quick word of explanation as to why we handle the full
1450 * variable-based interpolation intrinsic rather than a lowered version
1451 * with like we do for other inputs. We have to do that because the way
1452 * we set up inputs doesn't allow us to use the already setup inputs for
1453 * interpolation. At the beginning of the shader, we go through all of
1454 * the input variables and do the initial interpolation and put it in
1455 * the nir_inputs array based on its location as determined in
1456 * nir_lower_io. If the input isn't used, dead code cleans up and
1457 * everything works fine. However, when we get to the ARB_gpu_shader5
1458 * interpolation intrinsics, we need to reinterpolate the input
1459 * differently. If we used an intrinsic that just had an index it would
1460 * only give us the offset into the nir_inputs array. However, this is
1461 * useless because that value is post-interpolation and we need
1462 * pre-interpolation. In order to get the actual location of the bits
1463 * we get from the vertex fetching hardware, we need the variable.
1465 case nir_intrinsic_interp_var_at_centroid
:
1466 case nir_intrinsic_interp_var_at_sample
:
1467 case nir_intrinsic_interp_var_at_offset
: {
1468 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
1469 * 8 channels at a time, same as the barycentric coords presented in
1470 * the FS payload. this requires a bit of extra work to support.
1472 no16("interpolate_at_* not yet supported in SIMD16 mode.");
1474 fs_reg
dst_x(GRF
, virtual_grf_alloc(2), BRW_REGISTER_TYPE_F
);
1475 fs_reg dst_y
= offset(dst_x
, 1);
1477 /* For most messages, we need one reg of ignored data; the hardware
1478 * requires mlen==1 even when there is no payload. in the per-slot
1479 * offset case, we'll replace this with the proper source data.
1481 fs_reg
src(this, glsl_type::float_type
);
1482 int mlen
= 1; /* one reg unless overriden */
1485 switch (instr
->intrinsic
) {
1486 case nir_intrinsic_interp_var_at_centroid
:
1487 inst
= emit(FS_OPCODE_INTERPOLATE_AT_CENTROID
, dst_x
, src
, fs_reg(0u));
1490 case nir_intrinsic_interp_var_at_sample
: {
1491 /* XXX: We should probably handle non-constant sample id's */
1492 nir_const_value
*const_sample
= nir_src_as_const_value(instr
->src
[0]);
1493 assert(const_sample
);
1494 unsigned msg_data
= const_sample
? const_sample
->i
[0] << 4 : 0;
1495 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE
, dst_x
, src
,
1500 case nir_intrinsic_interp_var_at_offset
: {
1501 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
1504 unsigned off_x
= MIN2((int)(const_offset
->f
[0] * 16), 7) & 0xf;
1505 unsigned off_y
= MIN2((int)(const_offset
->f
[1] * 16), 7) & 0xf;
1507 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
, dst_x
, src
,
1508 fs_reg(off_x
| (off_y
<< 4)));
1510 src
= fs_reg(this, glsl_type::ivec2_type
);
1511 fs_reg offset_src
= retype(get_nir_src(instr
->src
[0]),
1512 BRW_REGISTER_TYPE_F
);
1513 for (int i
= 0; i
< 2; i
++) {
1514 fs_reg
temp(this, glsl_type::float_type
);
1515 emit(MUL(temp
, offset(offset_src
, i
), fs_reg(16.0f
)));
1516 fs_reg
itemp(this, glsl_type::int_type
);
1517 emit(MOV(itemp
, temp
)); /* float to int */
1519 /* Clamp the upper end of the range to +7/16.
1520 * ARB_gpu_shader5 requires that we support a maximum offset
1521 * of +0.5, which isn't representable in a S0.4 value -- if
1522 * we didn't clamp it, we'd end up with -8/16, which is the
1523 * opposite of what the shader author wanted.
1525 * This is legal due to ARB_gpu_shader5's quantization
1528 * "Not all values of <offset> may be supported; x and y
1529 * offsets may be rounded to fixed-point values with the
1530 * number of fraction bits given by the
1531 * implementation-dependent constant
1532 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1535 emit(BRW_OPCODE_SEL
, offset(src
, i
), itemp
, fs_reg(7))
1536 ->conditional_mod
= BRW_CONDITIONAL_L
; /* min(src2, 7) */
1540 inst
= emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
, dst_x
, src
,
1547 unreachable("Invalid intrinsic");
1551 inst
->regs_written
= 2; /* 2 floats per slot returned */
1552 inst
->pi_noperspective
= instr
->variables
[0]->var
->data
.interpolation
==
1553 INTERP_QUALIFIER_NOPERSPECTIVE
;
1555 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1556 fs_reg src
= interp_reg(instr
->variables
[0]->var
->data
.location
, j
);
1557 src
.type
= dest
.type
;
1559 fs_inst
*inst
= emit(FS_OPCODE_LINTERP
, dest
, dst_x
, dst_y
, src
);
1560 if (instr
->has_predicate
)
1561 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1567 case nir_intrinsic_store_output_indirect
:
1568 has_indirect
= true;
1569 case nir_intrinsic_store_output
: {
1570 fs_reg src
= get_nir_src(instr
->src
[0]);
1572 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1573 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1574 fs_reg new_dest
= nir_outputs
;
1575 new_dest
.reg_offset
= instr
->const_index
[0] + index
;
1577 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[1]));
1578 new_dest
.type
= src
.type
;
1580 fs_inst
*inst
= MOV(new_dest
, src
);
1581 if (instr
->has_predicate
)
1582 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1591 unreachable("unknown intrinsic");
1596 fs_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1598 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
1599 unsigned sampler
= instr
->sampler_index
;
1600 fs_reg
sampler_reg(sampler
);
1602 /* FINISHME: We're failing to recompile our programs when the sampler is
1603 * updated. This only matters for the texture rectangle scale parameters
1604 * (pre-gen6, or gen6+ with GL_CLAMP).
1606 int texunit
= prog
->SamplerUnits
[sampler
];
1608 int gather_component
= instr
->component
;
1610 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1612 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1615 int lod_components
, offset_components
= 0;
1617 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, offset
;
1619 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1620 fs_reg src
= get_nir_src(instr
->src
[i
]);
1621 switch (instr
->src_type
[i
]) {
1622 case nir_tex_src_bias
:
1623 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1625 case nir_tex_src_comparitor
:
1626 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1628 case nir_tex_src_coord
:
1629 switch (instr
->op
) {
1631 case nir_texop_txf_ms
:
1632 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1635 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1639 case nir_tex_src_ddx
:
1640 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1641 lod_components
= nir_tex_instr_src_size(instr
, i
);
1643 case nir_tex_src_ddy
:
1644 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1646 case nir_tex_src_lod
:
1647 switch (instr
->op
) {
1649 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1652 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1655 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1659 case nir_tex_src_ms_index
:
1660 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1662 case nir_tex_src_offset
:
1663 offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1664 if (instr
->is_array
)
1665 offset_components
= instr
->coord_components
- 1;
1667 offset_components
= instr
->coord_components
;
1669 case nir_tex_src_projector
:
1670 unreachable("should be lowered");
1672 case nir_tex_src_sampler_offset
: {
1673 /* Figure out the highest possible sampler index and mark it as used */
1674 uint32_t max_used
= sampler
+ instr
->sampler_array_size
- 1;
1675 if (instr
->op
== nir_texop_tg4
&& brw
->gen
< 8) {
1676 max_used
+= stage_prog_data
->binding_table
.gather_texture_start
;
1678 max_used
+= stage_prog_data
->binding_table
.texture_start
;
1680 brw_mark_surface_used(prog_data
, max_used
);
1682 /* Emit code to evaluate the actual indexing expression */
1683 sampler_reg
= fs_reg(this, glsl_type::uint_type
);
1684 emit(ADD(sampler_reg
, src
, fs_reg(sampler
)))
1685 ->force_writemask_all
= true;
1690 unreachable("unknown texture source");
1694 if (instr
->op
== nir_texop_txf_ms
) {
1695 if (brw
->gen
>= 7 && key
->tex
.compressed_multisample_layout_mask
& (1<<sampler
))
1696 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, sampler_reg
);
1701 for (unsigned i
= 0; i
< 3; i
++) {
1702 if (instr
->const_offset
[i
] != 0) {
1703 assert(offset_components
== 0);
1704 offset
= fs_reg(brw_texture_offset(ctx
, instr
->const_offset
, 3));
1709 enum glsl_base_type dest_base_type
;
1710 switch (instr
->dest_type
) {
1711 case nir_type_float
:
1712 dest_base_type
= GLSL_TYPE_FLOAT
;
1715 dest_base_type
= GLSL_TYPE_INT
;
1717 case nir_type_unsigned
:
1718 dest_base_type
= GLSL_TYPE_UINT
;
1721 unreachable("bad type");
1724 const glsl_type
*dest_type
=
1725 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1728 ir_texture_opcode op
;
1729 switch (instr
->op
) {
1730 case nir_texop_lod
: op
= ir_lod
; break;
1731 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1732 case nir_texop_tex
: op
= ir_tex
; break;
1733 case nir_texop_tg4
: op
= ir_tg4
; break;
1734 case nir_texop_txb
: op
= ir_txb
; break;
1735 case nir_texop_txd
: op
= ir_txd
; break;
1736 case nir_texop_txf
: op
= ir_txf
; break;
1737 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1738 case nir_texop_txl
: op
= ir_txl
; break;
1739 case nir_texop_txs
: op
= ir_txs
; break;
1741 unreachable("unknown texture opcode");
1744 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1745 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1746 offset
, offset_components
, mcs
, gather_component
,
1747 is_cube_array
, is_rect
, sampler
, sampler_reg
, texunit
);
1749 fs_reg dest
= get_nir_dest(instr
->dest
);
1750 dest
.type
= this->result
.type
;
1751 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1752 emit_percomp(MOV(dest
, this->result
), (1 << num_components
) - 1);
1756 fs_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
1758 /* Bail on SSA constant loads. These are used for immediates. */
1759 if (instr
->dest
.is_ssa
)
1762 fs_reg dest
= get_nir_dest(instr
->dest
);
1763 dest
.type
= BRW_REGISTER_TYPE_UD
;
1764 if (instr
->array_elems
== 0) {
1765 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1766 emit(MOV(dest
, fs_reg(instr
->value
.u
[i
])));
1770 for (unsigned i
= 0; i
< instr
->array_elems
; i
++) {
1771 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1772 emit(MOV(dest
, fs_reg(instr
->array
[i
].u
[j
])));
1780 fs_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1782 switch (instr
->type
) {
1783 case nir_jump_break
:
1784 emit(BRW_OPCODE_BREAK
);
1786 case nir_jump_continue
:
1787 emit(BRW_OPCODE_CONTINUE
);
1789 case nir_jump_return
:
1791 unreachable("unknown jump");