2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
30 fs_visitor::emit_nir_code()
32 /* first, lower the GLSL IR shader to NIR */
33 lower_output_reads(shader
->base
.ir
);
34 nir_shader
*nir
= glsl_to_nir(shader
->base
.ir
, NULL
, true);
35 nir_validate_shader(nir
);
37 nir_lower_global_vars_to_local(nir
);
38 nir_validate_shader(nir
);
40 nir_split_var_copies(nir
);
41 nir_validate_shader(nir
);
46 nir_lower_variables(nir
);
47 nir_validate_shader(nir
);
48 progress
|= nir_copy_prop(nir
);
49 nir_validate_shader(nir
);
50 progress
|= nir_opt_dce(nir
);
51 nir_validate_shader(nir
);
52 progress
|= nir_opt_cse(nir
);
53 nir_validate_shader(nir
);
54 progress
|= nir_opt_peephole_select(nir
);
55 nir_validate_shader(nir
);
56 progress
|= nir_opt_peephole_ffma(nir
);
57 nir_validate_shader(nir
);
60 /* Lower a bunch of stuff */
62 nir_validate_shader(nir
);
64 nir_lower_locals_to_regs(nir
);
65 nir_validate_shader(nir
);
67 nir_remove_dead_variables(nir
);
68 nir_validate_shader(nir
);
69 nir_convert_from_ssa(nir
);
70 nir_validate_shader(nir
);
71 nir_lower_vec_to_movs(nir
);
72 nir_validate_shader(nir
);
74 nir_lower_samplers(nir
, shader_prog
, shader
->base
.Program
);
75 nir_validate_shader(nir
);
77 nir_lower_system_values(nir
);
78 nir_validate_shader(nir
);
80 nir_lower_atomics(nir
);
81 nir_validate_shader(nir
);
83 /* emit the arrays used for inputs and outputs - load/store intrinsics will
84 * be converted to reads/writes of these arrays
87 if (nir
->num_inputs
> 0) {
88 nir_inputs
= fs_reg(GRF
, virtual_grf_alloc(nir
->num_inputs
));
89 nir_setup_inputs(nir
);
92 if (nir
->num_outputs
> 0) {
93 nir_outputs
= fs_reg(GRF
, virtual_grf_alloc(nir
->num_outputs
));
94 nir_setup_outputs(nir
);
97 if (nir
->num_uniforms
> 0) {
98 nir_uniforms
= fs_reg(UNIFORM
, 0);
99 nir_setup_uniforms(nir
);
102 nir_globals
= ralloc_array(mem_ctx
, fs_reg
, nir
->reg_alloc
);
103 foreach_list_typed(nir_register
, reg
, node
, &nir
->registers
) {
104 unsigned array_elems
=
105 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
106 unsigned size
= array_elems
* reg
->num_components
;
107 nir_globals
[reg
->index
] = fs_reg(GRF
, virtual_grf_alloc(size
));
110 /* get the main function and emit it */
111 nir_foreach_overload(nir
, overload
) {
112 assert(strcmp(overload
->function
->name
, "main") == 0);
113 assert(overload
->impl
);
114 nir_emit_impl(overload
->impl
);
121 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
123 fs_reg varying
= nir_inputs
;
125 struct hash_entry
*entry
;
126 hash_table_foreach(shader
->inputs
, entry
) {
127 nir_variable
*var
= (nir_variable
*) entry
->data
;
128 varying
.reg_offset
= var
->data
.driver_location
;
131 if (!strcmp(var
->name
, "gl_FragCoord")) {
132 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
133 var
->data
.origin_upper_left
);
134 emit_percomp(MOV(varying
, reg
), 0xF);
135 } else if (!strcmp(var
->name
, "gl_FrontFacing")) {
136 reg
= *emit_frontfacing_interpolation();
137 emit(MOV(retype(varying
, BRW_REGISTER_TYPE_UD
), reg
));
139 emit_general_interpolation(varying
, var
->name
, var
->type
,
140 (glsl_interp_qualifier
) var
->data
.interpolation
,
141 var
->data
.location
, var
->data
.centroid
,
148 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
150 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
151 fs_reg reg
= nir_outputs
;
153 struct hash_entry
*entry
;
154 hash_table_foreach(shader
->outputs
, entry
) {
155 nir_variable
*var
= (nir_variable
*) entry
->data
;
156 reg
.reg_offset
= var
->data
.driver_location
;
158 if (var
->data
.index
> 0) {
159 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
160 assert(var
->data
.index
== 1);
161 this->dual_src_output
= reg
;
162 this->do_dual_src
= true;
163 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
164 /* Writing gl_FragColor outputs to all color regions. */
165 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
166 this->outputs
[i
] = reg
;
167 this->output_components
[i
] = 4;
169 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
170 this->frag_depth
= reg
;
171 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
172 this->sample_mask
= reg
;
174 /* gl_FragData or a user-defined FS output */
175 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
176 var
->data
.location
< FRAG_RESULT_DATA0
+ BRW_MAX_DRAW_BUFFERS
);
178 int vector_elements
=
179 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
180 : var
->type
->vector_elements
;
182 /* General color output. */
183 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
184 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
185 this->outputs
[output
] = reg
;
186 this->outputs
[output
].reg_offset
+= vector_elements
* i
;
187 this->output_components
[output
] = vector_elements
;
194 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
196 uniforms
= shader
->num_uniforms
;
197 param_size
[0] = shader
->num_uniforms
;
199 if (dispatch_width
!= 8)
202 struct hash_entry
*entry
;
203 hash_table_foreach(shader
->uniforms
, entry
) {
204 nir_variable
*var
= (nir_variable
*) entry
->data
;
206 /* UBO's and atomics don't take up space in the uniform file */
208 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
211 if (strncmp(var
->name
, "gl_", 3) == 0)
212 nir_setup_builtin_uniform(var
);
214 nir_setup_uniform(var
);
219 fs_visitor::nir_setup_uniform(nir_variable
*var
)
221 int namelen
= strlen(var
->name
);
223 /* The data for our (non-builtin) uniforms is stored in a series of
224 * gl_uniform_driver_storage structs for each subcomponent that
225 * glGetUniformLocation() could name. We know it's been set up in the
226 * same order we'd walk the type, so walk the list of storage and find
227 * anything with our name, or the prefix of a component that starts with
230 unsigned index
= var
->data
.driver_location
;
231 for (unsigned u
= 0; u
< shader_prog
->NumUserUniformStorage
; u
++) {
232 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
234 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
235 (storage
->name
[namelen
] != 0 &&
236 storage
->name
[namelen
] != '.' &&
237 storage
->name
[namelen
] != '[')) {
241 unsigned slots
= storage
->type
->component_slots();
242 if (storage
->array_elements
)
243 slots
*= storage
->array_elements
;
245 for (unsigned i
= 0; i
< slots
; i
++) {
246 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
250 /* Make sure we actually initialized the right amount of stuff here. */
251 assert(var
->data
.driver_location
+ var
->type
->component_slots() == index
);
255 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
257 const nir_state_slot
*const slots
= var
->state_slots
;
258 assert(var
->state_slots
!= NULL
);
260 unsigned uniform_index
= var
->data
.driver_location
;
261 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
262 /* This state reference has already been setup by ir_to_mesa, but we'll
263 * get the same index back here.
265 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
266 (gl_state_index
*)slots
[i
].tokens
);
268 /* Add each of the unique swizzles of the element as a parameter.
269 * This'll end up matching the expected layout of the
270 * array/matrix/structure we're trying to fill in.
273 for (unsigned int j
= 0; j
< 4; j
++) {
274 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
275 if (swiz
== last_swiz
)
279 stage_prog_data
->param
[uniform_index
++] =
280 &prog
->Parameters
->ParameterValues
[index
][swiz
];
286 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
288 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
289 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
290 unsigned array_elems
=
291 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
292 unsigned size
= array_elems
* reg
->num_components
;
293 nir_locals
[reg
->index
] = fs_reg(GRF
, virtual_grf_alloc(size
));
296 nir_emit_cf_list(&impl
->body
);
300 fs_visitor::nir_emit_cf_list(exec_list
*list
)
302 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
303 switch (node
->type
) {
305 nir_emit_if(nir_cf_node_as_if(node
));
308 case nir_cf_node_loop
:
309 nir_emit_loop(nir_cf_node_as_loop(node
));
312 case nir_cf_node_block
:
313 nir_emit_block(nir_cf_node_as_block(node
));
317 unreachable("Invalid CFG node block");
323 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
326 no16("Can't support (non-uniform) control flow on SIMD16\n");
329 /* first, put the condition into f0 */
330 fs_inst
*inst
= emit(MOV(reg_null_d
,
331 retype(get_nir_src(if_stmt
->condition
),
332 BRW_REGISTER_TYPE_UD
)));
333 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
335 emit(IF(BRW_PREDICATE_NORMAL
));
337 nir_emit_cf_list(&if_stmt
->then_list
);
339 /* note: if the else is empty, dead CF elimination will remove it */
340 emit(BRW_OPCODE_ELSE
);
342 nir_emit_cf_list(&if_stmt
->else_list
);
344 emit(BRW_OPCODE_ENDIF
);
346 try_replace_with_sel();
350 fs_visitor::nir_emit_loop(nir_loop
*loop
)
353 no16("Can't support (non-uniform) control flow on SIMD16\n");
358 nir_emit_cf_list(&loop
->body
);
360 emit(BRW_OPCODE_WHILE
);
364 fs_visitor::nir_emit_block(nir_block
*block
)
366 nir_foreach_instr(block
, instr
) {
367 nir_emit_instr(instr
);
372 fs_visitor::nir_emit_instr(nir_instr
*instr
)
374 switch (instr
->type
) {
375 case nir_instr_type_alu
:
376 nir_emit_alu(nir_instr_as_alu(instr
));
379 case nir_instr_type_intrinsic
:
380 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
383 case nir_instr_type_texture
:
384 nir_emit_texture(nir_instr_as_texture(instr
));
387 case nir_instr_type_load_const
:
388 nir_emit_load_const(nir_instr_as_load_const(instr
));
391 case nir_instr_type_jump
:
392 nir_emit_jump(nir_instr_as_jump(instr
));
396 unreachable("unknown instruction type");
401 brw_type_for_nir_type(nir_alu_type type
)
405 case nir_type_unsigned
:
406 return BRW_REGISTER_TYPE_UD
;
408 return BRW_REGISTER_TYPE_D
;
410 return BRW_REGISTER_TYPE_F
;
412 unreachable("unknown type");
415 return BRW_REGISTER_TYPE_F
;
419 fs_visitor::nir_emit_alu(nir_alu_instr
*instr
)
421 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
424 fs_reg dest
= get_nir_dest(instr
->dest
.dest
);
425 dest
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
428 if (instr
->has_predicate
) {
429 result
= fs_reg(GRF
, virtual_grf_alloc(4));
430 result
.type
= dest
.type
;
436 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
437 op
[i
] = get_nir_alu_src(instr
, i
);
443 fs_inst
*inst
= MOV(result
, op
[0]);
444 inst
->saturate
= instr
->dest
.saturate
;
445 emit_percomp(inst
, instr
->dest
.write_mask
);
452 emit_percomp(MOV(result
, op
[0]), instr
->dest
.write_mask
);
456 /* AND(val, 0x80000000) gives the sign bit.
458 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
461 emit_percomp(CMP(reg_null_f
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
),
462 instr
->dest
.write_mask
);
464 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
465 op
[0].type
= BRW_REGISTER_TYPE_UD
;
466 result
.type
= BRW_REGISTER_TYPE_UD
;
467 emit_percomp(AND(result_int
, op
[0], fs_reg(0x80000000u
)),
468 instr
->dest
.write_mask
);
470 fs_inst
*inst
= OR(result_int
, result_int
, fs_reg(0x3f800000u
));
471 inst
->predicate
= BRW_PREDICATE_NORMAL
;
472 emit_percomp(inst
, instr
->dest
.write_mask
);
473 if (instr
->dest
.saturate
) {
474 fs_inst
*inst
= MOV(result
, result
);
475 inst
->saturate
= true;
476 emit_percomp(inst
, instr
->dest
.write_mask
);
482 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
483 * -> non-negative val generates 0x00000000.
484 * Predicated OR sets 1 if val is positive.
486 emit_percomp(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_G
),
487 instr
->dest
.write_mask
);
489 emit_percomp(ASR(result
, op
[0], fs_reg(31)), instr
->dest
.write_mask
);
491 fs_inst
*inst
= OR(result
, result
, fs_reg(1));
492 inst
->predicate
= BRW_PREDICATE_NORMAL
;
493 emit_percomp(inst
, instr
->dest
.write_mask
);
498 emit_math_percomp(SHADER_OPCODE_RCP
, result
, op
[0],
499 instr
->dest
.write_mask
, instr
->dest
.saturate
);
503 emit_math_percomp(SHADER_OPCODE_EXP2
, result
, op
[0],
504 instr
->dest
.write_mask
, instr
->dest
.saturate
);
508 emit_math_percomp(SHADER_OPCODE_LOG2
, result
, op
[0],
509 instr
->dest
.write_mask
, instr
->dest
.saturate
);
514 unreachable("not reached: should be handled by ir_explog_to_explog2");
517 case nir_op_fsin_reduced
:
518 emit_math_percomp(SHADER_OPCODE_SIN
, result
, op
[0],
519 instr
->dest
.write_mask
, instr
->dest
.saturate
);
523 case nir_op_fcos_reduced
:
524 emit_math_percomp(SHADER_OPCODE_COS
, result
, op
[0],
525 instr
->dest
.write_mask
, instr
->dest
.saturate
);
529 if (fs_key
->high_quality_derivatives
)
530 emit_percomp(FS_OPCODE_DDX_FINE
, result
, op
[0],
531 instr
->dest
.write_mask
, instr
->dest
.saturate
);
533 emit_percomp(FS_OPCODE_DDX_COARSE
, result
, op
[0],
534 instr
->dest
.write_mask
, instr
->dest
.saturate
);
536 case nir_op_fddx_fine
:
537 emit_percomp(FS_OPCODE_DDX_FINE
, result
, op
[0],
538 instr
->dest
.write_mask
, instr
->dest
.saturate
);
540 case nir_op_fddx_coarse
:
541 emit_percomp(FS_OPCODE_DDX_COARSE
, result
, op
[0],
542 instr
->dest
.write_mask
, instr
->dest
.saturate
);
545 if (fs_key
->high_quality_derivatives
)
546 emit_percomp(FS_OPCODE_DDY_FINE
, result
, op
[0],
547 fs_reg(fs_key
->render_to_fbo
),
548 instr
->dest
.write_mask
, instr
->dest
.saturate
);
550 emit_percomp(FS_OPCODE_DDY_COARSE
, result
, op
[0],
551 fs_reg(fs_key
->render_to_fbo
),
552 instr
->dest
.write_mask
, instr
->dest
.saturate
);
554 case nir_op_fddy_fine
:
555 emit_percomp(FS_OPCODE_DDY_FINE
, result
, op
[0],
556 fs_reg(fs_key
->render_to_fbo
),
557 instr
->dest
.write_mask
, instr
->dest
.saturate
);
559 case nir_op_fddy_coarse
:
560 emit_percomp(FS_OPCODE_DDY_COARSE
, result
, op
[0],
561 fs_reg(fs_key
->render_to_fbo
),
562 instr
->dest
.write_mask
, instr
->dest
.saturate
);
567 fs_inst
*inst
= ADD(result
, op
[0], op
[1]);
568 inst
->saturate
= instr
->dest
.saturate
;
569 emit_percomp(inst
, instr
->dest
.write_mask
);
574 fs_inst
*inst
= MUL(result
, op
[0], op
[1]);
575 inst
->saturate
= instr
->dest
.saturate
;
576 emit_percomp(inst
, instr
->dest
.write_mask
);
581 /* TODO put in the 16-bit constant optimization once we have SSA */
584 no16("SIMD16 explicit accumulator operands unsupported\n");
586 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
588 emit_percomp(MUL(acc
, op
[0], op
[1]), instr
->dest
.write_mask
);
589 emit_percomp(MACH(reg_null_d
, op
[0], op
[1]), instr
->dest
.write_mask
);
590 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
594 case nir_op_imul_high
:
595 case nir_op_umul_high
: {
597 no16("SIMD16 explicit accumulator operands unsupported\n");
599 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
601 emit_percomp(MUL(acc
, op
[0], op
[1]), instr
->dest
.write_mask
);
602 emit_percomp(MACH(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
608 emit_math_percomp(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1],
609 instr
->dest
.write_mask
);
612 case nir_op_uadd_carry
: {
614 no16("SIMD16 explicit accumulator operands unsupported\n");
616 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
617 BRW_REGISTER_TYPE_UD
);
619 emit_percomp(ADDC(reg_null_ud
, op
[0], op
[1]), instr
->dest
.write_mask
);
620 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
624 case nir_op_usub_borrow
: {
626 no16("SIMD16 explicit accumulator operands unsupported\n");
628 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
629 BRW_REGISTER_TYPE_UD
);
631 emit_percomp(SUBB(reg_null_ud
, op
[0], op
[1]), instr
->dest
.write_mask
);
632 emit_percomp(MOV(result
, fs_reg(acc
)), instr
->dest
.write_mask
);
637 emit_math_percomp(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0],
638 op
[1], instr
->dest
.write_mask
);
644 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
),
645 instr
->dest
.write_mask
);
651 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
),
652 instr
->dest
.write_mask
);
657 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
),
658 instr
->dest
.write_mask
);
663 emit_percomp(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
),
664 instr
->dest
.write_mask
);
667 case nir_op_ball_fequal2
:
668 case nir_op_ball_iequal2
:
669 case nir_op_ball_fequal3
:
670 case nir_op_ball_iequal3
:
671 case nir_op_ball_fequal4
:
672 case nir_op_ball_iequal4
: {
673 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
674 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
675 emit_percomp(CMP(temp
, op
[0], op
[1], BRW_CONDITIONAL_Z
),
676 (1 << num_components
) - 1);
677 emit_reduction(BRW_OPCODE_AND
, result
, temp
, num_components
);
681 case nir_op_bany_fnequal2
:
682 case nir_op_bany_inequal2
:
683 case nir_op_bany_fnequal3
:
684 case nir_op_bany_inequal3
:
685 case nir_op_bany_fnequal4
:
686 case nir_op_bany_inequal4
: {
687 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
688 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
689 temp
.type
= BRW_REGISTER_TYPE_UD
;
690 emit_percomp(CMP(temp
, op
[0], op
[1], BRW_CONDITIONAL_NZ
),
691 (1 << num_components
) - 1);
692 emit_reduction(BRW_OPCODE_OR
, result
, temp
, num_components
);
697 emit_percomp(NOT(result
, op
[0]), instr
->dest
.write_mask
);
700 emit_percomp(XOR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
703 emit_percomp(OR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
706 emit_percomp(AND(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
712 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
713 fs_reg temp
= fs_reg(GRF
, virtual_grf_alloc(num_components
));
714 emit_percomp(MUL(temp
, op
[0], op
[1]), (1 << num_components
) - 1);
715 emit_reduction(BRW_OPCODE_ADD
, result
, temp
, num_components
);
716 if (instr
->dest
.saturate
) {
717 fs_inst
*inst
= emit(MOV(result
, result
));
718 inst
->saturate
= true;
726 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
727 emit_reduction(BRW_OPCODE_OR
, result
, op
[0], num_components
);
734 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
735 emit_reduction(BRW_OPCODE_AND
, result
, op
[0], num_components
);
739 case nir_op_fnoise1_1
:
740 case nir_op_fnoise1_2
:
741 case nir_op_fnoise1_3
:
742 case nir_op_fnoise1_4
:
743 case nir_op_fnoise2_1
:
744 case nir_op_fnoise2_2
:
745 case nir_op_fnoise2_3
:
746 case nir_op_fnoise2_4
:
747 case nir_op_fnoise3_1
:
748 case nir_op_fnoise3_2
:
749 case nir_op_fnoise3_3
:
750 case nir_op_fnoise3_4
:
751 case nir_op_fnoise4_1
:
752 case nir_op_fnoise4_2
:
753 case nir_op_fnoise4_3
:
754 case nir_op_fnoise4_4
:
755 unreachable("not reached: should be handled by lower_noise");
760 unreachable("not reached: should be handled by lower_quadop_vector");
763 unreachable("not reached: should be handled by ldexp_to_arith()");
766 emit_math_percomp(SHADER_OPCODE_SQRT
, result
, op
[0],
767 instr
->dest
.write_mask
, instr
->dest
.saturate
);
771 emit_math_percomp(SHADER_OPCODE_RSQ
, result
, op
[0],
772 instr
->dest
.write_mask
, instr
->dest
.saturate
);
776 emit_percomp(AND(result
, op
[0], fs_reg(1)), instr
->dest
.write_mask
);
779 emit_percomp(AND(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0],
780 fs_reg(0x3f800000u
)),
781 instr
->dest
.write_mask
);
786 emit_percomp(CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
),
787 instr
->dest
.write_mask
);
790 emit_percomp(CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
),
791 instr
->dest
.write_mask
);
794 case nir_op_ftrunc
: {
795 fs_inst
*inst
= RNDZ(result
, op
[0]);
796 inst
->saturate
= instr
->dest
.saturate
;
797 emit_percomp(inst
, instr
->dest
.write_mask
);
801 op
[0].negate
= !op
[0].negate
;
802 fs_reg temp
= fs_reg(this, glsl_type::vec4_type
);
803 emit_percomp(RNDD(temp
, op
[0]), instr
->dest
.write_mask
);
805 fs_inst
*inst
= MOV(result
, temp
);
806 inst
->saturate
= instr
->dest
.saturate
;
807 emit_percomp(inst
, instr
->dest
.write_mask
);
810 case nir_op_ffloor
: {
811 fs_inst
*inst
= RNDD(result
, op
[0]);
812 inst
->saturate
= instr
->dest
.saturate
;
813 emit_percomp(inst
, instr
->dest
.write_mask
);
816 case nir_op_ffract
: {
817 fs_inst
*inst
= FRC(result
, op
[0]);
818 inst
->saturate
= instr
->dest
.saturate
;
819 emit_percomp(inst
, instr
->dest
.write_mask
);
822 case nir_op_fround_even
: {
823 fs_inst
*inst
= RNDE(result
, op
[0]);
824 inst
->saturate
= instr
->dest
.saturate
;
825 emit_percomp(inst
, instr
->dest
.write_mask
);
833 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
834 instr
->dest
.write_mask
, instr
->dest
.saturate
,
835 BRW_PREDICATE_NONE
, BRW_CONDITIONAL_L
);
837 emit_percomp(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_L
),
838 instr
->dest
.write_mask
);
840 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
841 instr
->dest
.write_mask
, instr
->dest
.saturate
,
842 BRW_PREDICATE_NORMAL
);
850 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
851 instr
->dest
.write_mask
, instr
->dest
.saturate
,
852 BRW_PREDICATE_NONE
, BRW_CONDITIONAL_GE
);
854 emit_percomp(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_GE
),
855 instr
->dest
.write_mask
);
857 emit_percomp(BRW_OPCODE_SEL
, result
, op
[0], op
[1],
858 instr
->dest
.write_mask
, instr
->dest
.saturate
,
859 BRW_PREDICATE_NORMAL
);
863 case nir_op_pack_snorm_2x16
:
864 case nir_op_pack_snorm_4x8
:
865 case nir_op_pack_unorm_2x16
:
866 case nir_op_pack_unorm_4x8
:
867 case nir_op_unpack_snorm_2x16
:
868 case nir_op_unpack_snorm_4x8
:
869 case nir_op_unpack_unorm_2x16
:
870 case nir_op_unpack_unorm_4x8
:
871 case nir_op_unpack_half_2x16
:
872 case nir_op_pack_half_2x16
:
873 unreachable("not reached: should be handled by lower_packing_builtins");
875 case nir_op_unpack_half_2x16_split_x
:
876 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0],
877 instr
->dest
.write_mask
, instr
->dest
.saturate
);
879 case nir_op_unpack_half_2x16_split_y
:
880 emit_percomp(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0],
881 instr
->dest
.write_mask
, instr
->dest
.saturate
);
885 emit_percomp(SHADER_OPCODE_POW
, result
, op
[0], op
[1],
886 instr
->dest
.write_mask
, instr
->dest
.saturate
);
889 case nir_op_bitfield_reverse
:
890 emit_percomp(BFREV(result
, op
[0]), instr
->dest
.write_mask
);
893 case nir_op_bit_count
:
894 emit_percomp(CBIT(result
, op
[0]), instr
->dest
.write_mask
);
897 case nir_op_ufind_msb
:
898 case nir_op_ifind_msb
: {
899 emit_percomp(FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]),
900 instr
->dest
.write_mask
);
902 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
903 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
904 * subtract the result from 31 to convert the MSB count into an LSB count.
907 emit_percomp(CMP(reg_null_d
, result
, fs_reg(-1), BRW_CONDITIONAL_NZ
),
908 instr
->dest
.write_mask
);
909 fs_reg
neg_result(result
);
910 neg_result
.negate
= true;
911 fs_inst
*inst
= ADD(result
, neg_result
, fs_reg(31));
912 inst
->predicate
= BRW_PREDICATE_NORMAL
;
913 emit_percomp(inst
, instr
->dest
.write_mask
);
917 case nir_op_find_lsb
:
918 emit_percomp(FBL(result
, op
[0]), instr
->dest
.write_mask
);
921 case nir_op_ubitfield_extract
:
922 case nir_op_ibitfield_extract
:
923 emit_percomp(BFE(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
926 emit_percomp(BFI1(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
929 emit_percomp(BFI2(result
, op
[0], op
[1], op
[2]), instr
->dest
.write_mask
);
932 case nir_op_bitfield_insert
:
933 unreachable("not reached: should be handled by "
934 "lower_instructions::bitfield_insert_to_bfm_bfi");
937 emit_percomp(SHL(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
940 emit_percomp(ASR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
943 emit_percomp(SHR(result
, op
[0], op
[1]), instr
->dest
.write_mask
);
946 case nir_op_pack_half_2x16_split
:
947 emit_percomp(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1],
948 instr
->dest
.write_mask
);
952 emit_percomp(MAD(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
956 /* TODO emulate for gen < 6 */
957 emit_percomp(LRP(result
, op
[2], op
[1], op
[0]), instr
->dest
.write_mask
);
961 emit(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
));
962 emit_percomp(BRW_OPCODE_SEL
, result
, op
[1], op
[2],
963 instr
->dest
.write_mask
, false, BRW_PREDICATE_NORMAL
);
967 unreachable("unhandled instruction");
970 /* emit a predicated move if there was predication */
971 if (instr
->has_predicate
) {
972 fs_inst
*inst
= emit(MOV(reg_null_d
,
973 retype(get_nir_src(instr
->predicate
),
974 BRW_REGISTER_TYPE_UD
)));
975 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
976 inst
= MOV(dest
, result
);
977 inst
->predicate
= BRW_PREDICATE_NORMAL
;
978 emit_percomp(inst
, instr
->dest
.write_mask
);
983 fs_visitor::get_nir_src(nir_src src
)
986 assert(src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
);
987 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
988 fs_reg
reg(GRF
, virtual_grf_alloc(src
.ssa
->num_components
),
989 BRW_REGISTER_TYPE_D
);
991 for (unsigned i
= 0; i
< src
.ssa
->num_components
; ++i
)
992 emit(MOV(offset(reg
, i
), fs_reg(load
->value
.i
[i
])));
997 if (src
.reg
.reg
->is_global
)
998 reg
= nir_globals
[src
.reg
.reg
->index
];
1000 reg
= nir_locals
[src
.reg
.reg
->index
];
1002 /* to avoid floating-point denorm flushing problems, set the type by
1003 * default to D - instructions that need floating point semantics will set
1004 * this to F if they need to
1006 reg
.type
= BRW_REGISTER_TYPE_D
;
1007 reg
.reg_offset
= src
.reg
.base_offset
;
1008 if (src
.reg
.indirect
) {
1009 reg
.reladdr
= new(mem_ctx
) fs_reg();
1010 *reg
.reladdr
= retype(get_nir_src(*src
.reg
.indirect
),
1011 BRW_REGISTER_TYPE_D
);
1019 fs_visitor::get_nir_alu_src(nir_alu_instr
*instr
, unsigned src
)
1021 fs_reg reg
= get_nir_src(instr
->src
[src
].src
);
1023 reg
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[src
]);
1024 reg
.abs
= instr
->src
[src
].abs
;
1025 reg
.negate
= instr
->src
[src
].negate
;
1027 bool needs_swizzle
= false;
1028 unsigned num_components
= 0;
1029 for (unsigned i
= 0; i
< 4; i
++) {
1030 if (!nir_alu_instr_channel_used(instr
, src
, i
))
1033 if (instr
->src
[src
].swizzle
[i
] != i
)
1034 needs_swizzle
= true;
1036 num_components
= i
+ 1;
1039 if (needs_swizzle
) {
1040 /* resolve the swizzle through MOV's */
1041 fs_reg new_reg
= fs_reg(GRF
, virtual_grf_alloc(num_components
), reg
.type
);
1043 for (unsigned i
= 0; i
< 4; i
++) {
1044 if (!nir_alu_instr_channel_used(instr
, src
, i
))
1047 emit(MOV(offset(new_reg
, i
),
1048 offset(reg
, instr
->src
[src
].swizzle
[i
])));
1058 fs_visitor::get_nir_dest(nir_dest dest
)
1061 if (dest
.reg
.reg
->is_global
)
1062 reg
= nir_globals
[dest
.reg
.reg
->index
];
1064 reg
= nir_locals
[dest
.reg
.reg
->index
];
1066 reg
.reg_offset
= dest
.reg
.base_offset
;
1067 if (dest
.reg
.indirect
) {
1068 reg
.reladdr
= new(mem_ctx
) fs_reg();
1069 *reg
.reladdr
= retype(get_nir_src(*dest
.reg
.indirect
),
1070 BRW_REGISTER_TYPE_D
);
1077 fs_visitor::emit_percomp(fs_inst
*inst
, unsigned wr_mask
)
1079 for (unsigned i
= 0; i
< 4; i
++) {
1080 if (!((wr_mask
>> i
) & 1))
1083 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(*inst
);
1084 new_inst
->dst
.reg_offset
+= i
;
1085 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1086 if (inst
->src
[j
].file
== GRF
)
1087 new_inst
->src
[j
].reg_offset
+= i
;
1094 fs_visitor::emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1095 unsigned wr_mask
, bool saturate
,
1096 enum brw_predicate predicate
,
1097 enum brw_conditional_mod mod
)
1099 for (unsigned i
= 0; i
< 4; i
++) {
1100 if (!((wr_mask
>> i
) & 1))
1103 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(op
, dest
, src0
);
1104 new_inst
->dst
.reg_offset
+= i
;
1105 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1106 if (new_inst
->src
[j
].file
== GRF
)
1107 new_inst
->src
[j
].reg_offset
+= i
;
1109 new_inst
->predicate
= predicate
;
1110 new_inst
->conditional_mod
= mod
;
1111 new_inst
->saturate
= saturate
;
1117 fs_visitor::emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
, fs_reg src1
,
1118 unsigned wr_mask
, bool saturate
,
1119 enum brw_predicate predicate
,
1120 enum brw_conditional_mod mod
)
1122 for (unsigned i
= 0; i
< 4; i
++) {
1123 if (!((wr_mask
>> i
) & 1))
1126 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(op
, dest
, src0
, src1
);
1127 new_inst
->dst
.reg_offset
+= i
;
1128 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1129 if (new_inst
->src
[j
].file
== GRF
)
1130 new_inst
->src
[j
].reg_offset
+= i
;
1132 new_inst
->predicate
= predicate
;
1133 new_inst
->conditional_mod
= mod
;
1134 new_inst
->saturate
= saturate
;
1140 fs_visitor::emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1141 unsigned wr_mask
, bool saturate
)
1143 for (unsigned i
= 0; i
< 4; i
++) {
1144 if (!((wr_mask
>> i
) & 1))
1147 fs_reg new_dest
= dest
;
1148 new_dest
.reg_offset
+= i
;
1149 fs_reg new_src0
= src0
;
1150 if (src0
.file
== GRF
)
1151 new_src0
.reg_offset
+= i
;
1153 fs_inst
*new_inst
= emit_math(op
, new_dest
, new_src0
);
1154 new_inst
->saturate
= saturate
;
1159 fs_visitor::emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
1160 fs_reg src1
, unsigned wr_mask
,
1163 for (unsigned i
= 0; i
< 4; i
++) {
1164 if (!((wr_mask
>> i
) & 1))
1167 fs_reg new_dest
= dest
;
1168 new_dest
.reg_offset
+= i
;
1169 fs_reg new_src0
= src0
;
1170 if (src0
.file
== GRF
)
1171 new_src0
.reg_offset
+= i
;
1172 fs_reg new_src1
= src1
;
1173 if (src1
.file
== GRF
)
1174 new_src1
.reg_offset
+= i
;
1176 fs_inst
*new_inst
= emit_math(op
, new_dest
, new_src0
, new_src1
);
1177 new_inst
->saturate
= saturate
;
1182 fs_visitor::emit_reduction(enum opcode op
, fs_reg dest
, fs_reg src
,
1183 unsigned num_components
)
1189 if (num_components
== 2) {
1190 emit(op
, dest
, src0
, src1
);
1194 fs_reg temp1
= fs_reg(GRF
, virtual_grf_alloc(1));
1195 temp1
.type
= src
.type
;
1196 emit(op
, temp1
, src0
, src1
);
1199 src2
.reg_offset
+= 2;
1201 if (num_components
== 3) {
1202 emit(op
, dest
, temp1
, src2
);
1206 assert(num_components
== 4);
1209 src3
.reg_offset
+= 3;
1210 fs_reg temp2
= fs_reg(GRF
, virtual_grf_alloc(1));
1211 temp2
.type
= src
.type
;
1213 emit(op
, temp2
, src2
, src3
);
1214 emit(op
, dest
, temp1
, temp2
);
1218 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
1221 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1222 dest
= get_nir_dest(instr
->dest
);
1223 if (instr
->has_predicate
) {
1224 fs_inst
*inst
= emit(MOV(reg_null_d
,
1225 retype(get_nir_src(instr
->predicate
),
1226 BRW_REGISTER_TYPE_UD
)));
1227 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1230 switch (instr
->intrinsic
) {
1231 case nir_intrinsic_discard
: {
1232 /* We track our discarded pixels in f0.1. By predicating on it, we can
1233 * update just the flag bits that aren't yet discarded. By emitting a
1234 * CMP of g0 != g0, all our currently executing channels will get turned
1237 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1238 BRW_REGISTER_TYPE_UW
));
1239 fs_inst
*cmp
= emit(CMP(reg_null_f
, some_reg
, some_reg
,
1240 BRW_CONDITIONAL_NZ
));
1241 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1242 cmp
->flag_subreg
= 1;
1244 if (brw
->gen
>= 6) {
1245 /* For performance, after a discard, jump to the end of the shader.
1246 * Only jump if all relevant channels have been discarded.
1248 fs_inst
*discard_jump
= emit(FS_OPCODE_DISCARD_JUMP
);
1249 discard_jump
->flag_subreg
= 1;
1251 discard_jump
->predicate
= (dispatch_width
== 8)
1252 ? BRW_PREDICATE_ALIGN1_ANY8H
1253 : BRW_PREDICATE_ALIGN1_ANY16H
;
1254 discard_jump
->predicate_inverse
= true;
1260 case nir_intrinsic_atomic_counter_inc
:
1261 case nir_intrinsic_atomic_counter_dec
:
1262 case nir_intrinsic_atomic_counter_read
: {
1263 unsigned surf_index
= prog_data
->binding_table
.abo_start
+
1264 (unsigned) instr
->const_index
[0];
1265 fs_reg offset
= fs_reg(get_nir_src(instr
->src
[0]));
1267 switch (instr
->intrinsic
) {
1268 case nir_intrinsic_atomic_counter_inc
:
1269 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
1270 fs_reg(), fs_reg());
1272 case nir_intrinsic_atomic_counter_dec
:
1273 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
1274 fs_reg(), fs_reg());
1276 case nir_intrinsic_atomic_counter_read
:
1277 emit_untyped_surface_read(surf_index
, dest
, offset
);
1280 unreachable("Unreachable");
1285 case nir_intrinsic_load_front_face
:
1288 case nir_intrinsic_load_sample_mask_in
: {
1289 assert(brw
->gen
>= 7);
1290 fs_reg reg
= fs_reg(retype(brw_vec8_grf(payload
.sample_mask_in_reg
, 0),
1291 BRW_REGISTER_TYPE_D
));
1292 dest
.type
= reg
.type
;
1293 fs_inst
*inst
= MOV(dest
, reg
);
1294 if (instr
->has_predicate
)
1295 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1300 case nir_intrinsic_load_sample_pos
: {
1301 fs_reg
*reg
= emit_samplepos_setup();
1302 dest
.type
= reg
->type
;
1303 emit(MOV(dest
, *reg
));
1304 emit(MOV(offset(dest
, 1), offset(*reg
, 1)));
1308 case nir_intrinsic_load_sample_id
: {
1309 fs_reg
*reg
= emit_sampleid_setup();
1310 dest
.type
= reg
->type
;
1311 emit(MOV(dest
, *reg
));
1315 case nir_intrinsic_load_uniform_vec1
:
1316 case nir_intrinsic_load_uniform_vec2
:
1317 case nir_intrinsic_load_uniform_vec3
:
1318 case nir_intrinsic_load_uniform_vec4
: {
1320 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1321 for (unsigned j
= 0;
1322 j
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; j
++) {
1323 fs_reg src
= nir_uniforms
;
1324 src
.reg_offset
= instr
->const_index
[0] + index
;
1325 src
.type
= dest
.type
;
1328 fs_inst
*inst
= MOV(dest
, src
);
1329 if (instr
->has_predicate
)
1330 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1338 case nir_intrinsic_load_uniform_vec1_indirect
:
1339 case nir_intrinsic_load_uniform_vec2_indirect
:
1340 case nir_intrinsic_load_uniform_vec3_indirect
:
1341 case nir_intrinsic_load_uniform_vec4_indirect
: {
1343 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1344 for (unsigned j
= 0;
1345 j
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; j
++) {
1346 fs_reg src
= nir_uniforms
;
1347 src
.reg_offset
= instr
->const_index
[0] + index
;
1348 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1349 src
.reladdr
->type
= BRW_REGISTER_TYPE_D
;
1350 src
.type
= dest
.type
;
1353 fs_inst
*inst
= MOV(dest
, src
);
1354 if (instr
->has_predicate
)
1355 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1363 case nir_intrinsic_load_ubo_vec1
:
1364 case nir_intrinsic_load_ubo_vec2
:
1365 case nir_intrinsic_load_ubo_vec3
:
1366 case nir_intrinsic_load_ubo_vec4
: {
1367 fs_reg surf_index
= fs_reg(prog_data
->binding_table
.ubo_start
+
1368 (unsigned) instr
->const_index
[0]);
1369 fs_reg packed_consts
= fs_reg(this, glsl_type::float_type
);
1370 packed_consts
.type
= dest
.type
;
1372 fs_reg const_offset_reg
= fs_reg((unsigned) instr
->const_index
[1] & ~15);
1373 emit(new(mem_ctx
) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
,
1374 packed_consts
, surf_index
, const_offset_reg
));
1376 for (unsigned i
= 0;
1377 i
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; i
++) {
1378 packed_consts
.set_smear(instr
->const_index
[1] % 16 / 4 + i
);
1380 /* The std140 packing rules don't allow vectors to cross 16-byte
1381 * boundaries, and a reg is 32 bytes.
1383 assert(packed_consts
.subreg_offset
< 32);
1385 fs_inst
*inst
= MOV(dest
, packed_consts
);
1386 if (instr
->has_predicate
)
1387 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1395 case nir_intrinsic_load_ubo_vec1_indirect
:
1396 case nir_intrinsic_load_ubo_vec2_indirect
:
1397 case nir_intrinsic_load_ubo_vec3_indirect
:
1398 case nir_intrinsic_load_ubo_vec4_indirect
: {
1399 fs_reg surf_index
= fs_reg(prog_data
->binding_table
.ubo_start
+
1400 instr
->const_index
[0]);
1401 /* Turn the byte offset into a dword offset. */
1402 unsigned base_offset
= instr
->const_index
[1] / 4;
1403 fs_reg offset
= fs_reg(this, glsl_type::int_type
);
1404 emit(SHR(offset
, retype(get_nir_src(instr
->src
[0]), BRW_REGISTER_TYPE_D
),
1407 for (unsigned i
= 0;
1408 i
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; i
++) {
1409 exec_list list
= VARYING_PULL_CONSTANT_LOAD(dest
, surf_index
,
1410 offset
, base_offset
+ i
);
1411 fs_inst
*last_inst
= (fs_inst
*) list
.get_tail();
1412 if (instr
->has_predicate
)
1413 last_inst
->predicate
= BRW_PREDICATE_NORMAL
;
1421 case nir_intrinsic_load_input_vec1
:
1422 case nir_intrinsic_load_input_vec2
:
1423 case nir_intrinsic_load_input_vec3
:
1424 case nir_intrinsic_load_input_vec4
: {
1426 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1427 for (unsigned j
= 0;
1428 j
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; j
++) {
1429 fs_reg src
= nir_inputs
;
1430 src
.reg_offset
= instr
->const_index
[0] + index
;
1431 src
.type
= dest
.type
;
1434 fs_inst
*inst
= MOV(dest
, src
);
1435 if (instr
->has_predicate
)
1436 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1444 case nir_intrinsic_load_input_vec1_indirect
:
1445 case nir_intrinsic_load_input_vec2_indirect
:
1446 case nir_intrinsic_load_input_vec3_indirect
:
1447 case nir_intrinsic_load_input_vec4_indirect
: {
1449 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1450 for (unsigned j
= 0;
1451 j
< nir_intrinsic_infos
[instr
->intrinsic
].dest_components
; j
++) {
1452 fs_reg src
= nir_inputs
;
1453 src
.reg_offset
= instr
->const_index
[0] + index
;
1454 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1455 src
.reladdr
->type
= BRW_REGISTER_TYPE_D
;
1456 src
.type
= dest
.type
;
1459 fs_inst
*inst
= MOV(dest
, src
);
1460 if (instr
->has_predicate
)
1461 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1469 case nir_intrinsic_store_output_vec1
:
1470 case nir_intrinsic_store_output_vec2
:
1471 case nir_intrinsic_store_output_vec3
:
1472 case nir_intrinsic_store_output_vec4
: {
1473 fs_reg src
= get_nir_src(instr
->src
[0]);
1475 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1476 for (unsigned j
= 0;
1477 j
< nir_intrinsic_infos
[instr
->intrinsic
].src_components
[0]; j
++) {
1478 fs_reg new_dest
= nir_outputs
;
1479 new_dest
.reg_offset
= instr
->const_index
[0] + index
;
1480 new_dest
.type
= src
.type
;
1482 fs_inst
*inst
= MOV(new_dest
, src
);
1483 if (instr
->has_predicate
)
1484 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1492 case nir_intrinsic_store_output_vec1_indirect
:
1493 case nir_intrinsic_store_output_vec2_indirect
:
1494 case nir_intrinsic_store_output_vec3_indirect
:
1495 case nir_intrinsic_store_output_vec4_indirect
: {
1496 fs_reg src
= get_nir_src(instr
->src
[0]);
1497 fs_reg indirect
= get_nir_src(instr
->src
[1]);
1499 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1500 for (unsigned j
= 0;
1501 j
< nir_intrinsic_infos
[instr
->intrinsic
].src_components
[0]; j
++) {
1502 fs_reg new_dest
= nir_outputs
;
1503 new_dest
.reg_offset
= instr
->const_index
[0] + index
;
1504 new_dest
.reladdr
= new(mem_ctx
) fs_reg(indirect
);
1505 new_dest
.type
= src
.type
;
1507 fs_inst
*inst
= MOV(new_dest
, src
);
1508 if (instr
->has_predicate
)
1509 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1510 emit(MOV(new_dest
, src
));
1518 unreachable("unknown intrinsic");
1523 fs_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1525 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
1526 unsigned sampler
= instr
->sampler_index
;
1528 /* FINISHME: We're failing to recompile our programs when the sampler is
1529 * updated. This only matters for the texture rectangle scale parameters
1530 * (pre-gen6, or gen6+ with GL_CLAMP).
1532 int texunit
= prog
->SamplerUnits
[sampler
];
1534 int gather_component
= instr
->component
;
1536 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1538 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1541 int lod_components
, offset_components
= 0;
1543 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, offset
;
1545 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1546 fs_reg src
= get_nir_src(instr
->src
[i
]);
1547 switch (instr
->src_type
[i
]) {
1548 case nir_tex_src_bias
:
1549 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1551 case nir_tex_src_comparitor
:
1552 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1554 case nir_tex_src_coord
:
1555 switch (instr
->op
) {
1557 case nir_texop_txf_ms
:
1558 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1561 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1565 case nir_tex_src_ddx
:
1566 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1567 lod_components
= nir_tex_instr_src_size(instr
, i
);
1569 case nir_tex_src_ddy
:
1570 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1572 case nir_tex_src_lod
:
1573 switch (instr
->op
) {
1575 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1578 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1581 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1585 case nir_tex_src_ms_index
:
1586 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1588 case nir_tex_src_offset
:
1589 offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1590 if (instr
->is_array
)
1591 offset_components
= instr
->coord_components
- 1;
1593 offset_components
= instr
->coord_components
;
1595 case nir_tex_src_projector
:
1596 unreachable("should be lowered");
1597 case nir_tex_src_sampler_index
:
1598 unreachable("not yet supported");
1600 unreachable("unknown texture source");
1604 if (instr
->op
== nir_texop_txf_ms
) {
1605 if (brw
->gen
>= 7 && key
->tex
.compressed_multisample_layout_mask
& (1<<sampler
))
1606 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, fs_reg(sampler
));
1611 for (unsigned i
= 0; i
< 3; i
++) {
1612 if (instr
->const_offset
[i
] != 0) {
1613 assert(offset_components
== 0);
1614 offset
= fs_reg(brw_texture_offset(ctx
, instr
->const_offset
, 3));
1619 enum glsl_base_type dest_base_type
;
1620 switch (instr
->dest_type
) {
1621 case nir_type_float
:
1622 dest_base_type
= GLSL_TYPE_FLOAT
;
1625 dest_base_type
= GLSL_TYPE_INT
;
1627 case nir_type_unsigned
:
1628 dest_base_type
= GLSL_TYPE_UINT
;
1631 unreachable("bad type");
1634 const glsl_type
*dest_type
=
1635 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1638 ir_texture_opcode op
;
1639 switch (instr
->op
) {
1640 case nir_texop_lod
: op
= ir_lod
; break;
1641 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1642 case nir_texop_tex
: op
= ir_tex
; break;
1643 case nir_texop_tg4
: op
= ir_tg4
; break;
1644 case nir_texop_txb
: op
= ir_txb
; break;
1645 case nir_texop_txd
: op
= ir_txd
; break;
1646 case nir_texop_txf
: op
= ir_txf
; break;
1647 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1648 case nir_texop_txl
: op
= ir_txl
; break;
1649 case nir_texop_txs
: op
= ir_txs
; break;
1651 unreachable("unknown texture opcode");
1654 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1655 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1656 offset
, offset_components
, mcs
, gather_component
,
1657 is_cube_array
, is_rect
, sampler
, fs_reg(sampler
), texunit
);
1659 fs_reg dest
= get_nir_dest(instr
->dest
);
1660 dest
.type
= this->result
.type
;
1661 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1662 emit_percomp(MOV(dest
, this->result
), (1 << num_components
) - 1);
1666 fs_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
1668 /* Bail on SSA constant loads. These are used for immediates. */
1669 if (instr
->dest
.is_ssa
)
1672 fs_reg dest
= get_nir_dest(instr
->dest
);
1673 dest
.type
= BRW_REGISTER_TYPE_UD
;
1674 if (instr
->array_elems
== 0) {
1675 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1676 emit(MOV(dest
, fs_reg(instr
->value
.u
[i
])));
1680 for (unsigned i
= 0; i
< instr
->array_elems
; i
++) {
1681 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1682 emit(MOV(dest
, fs_reg(instr
->array
[i
].u
[j
])));
1690 fs_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1692 switch (instr
->type
) {
1693 case nir_jump_break
:
1694 emit(BRW_OPCODE_BREAK
);
1696 case nir_jump_continue
:
1697 emit(BRW_OPCODE_CONTINUE
);
1699 case nir_jump_return
:
1701 unreachable("unknown jump");