2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
30 nir_optimize(nir_shader
*nir
)
35 nir_lower_vars_to_ssa(nir
);
36 nir_validate_shader(nir
);
37 nir_lower_alu_to_scalar(nir
);
38 nir_validate_shader(nir
);
39 progress
|= nir_copy_prop(nir
);
40 nir_validate_shader(nir
);
41 nir_lower_phis_to_scalar(nir
);
42 nir_validate_shader(nir
);
43 progress
|= nir_copy_prop(nir
);
44 nir_validate_shader(nir
);
45 progress
|= nir_opt_dce(nir
);
46 nir_validate_shader(nir
);
47 progress
|= nir_opt_cse(nir
);
48 nir_validate_shader(nir
);
49 progress
|= nir_opt_peephole_select(nir
);
50 nir_validate_shader(nir
);
51 progress
|= nir_opt_algebraic(nir
);
52 nir_validate_shader(nir
);
53 progress
|= nir_opt_constant_folding(nir
);
54 nir_validate_shader(nir
);
55 progress
|= nir_opt_remove_phis(nir
);
56 nir_validate_shader(nir
);
61 count_nir_instrs_in_block(nir_block
*block
, void *state
)
63 int *count
= (int *) state
;
64 nir_foreach_instr(block
, instr
) {
71 count_nir_instrs(nir_shader
*nir
)
74 nir_foreach_overload(nir
, overload
) {
77 nir_foreach_block(overload
->impl
, count_nir_instrs_in_block
, &count
);
83 fs_visitor::emit_nir_code()
85 /* first, lower the GLSL IR shader to NIR */
86 lower_output_reads(shader
->base
.ir
);
87 nir_shader
*nir
= glsl_to_nir(shader
->base
.ir
, NULL
, true);
88 nir_validate_shader(nir
);
90 nir_lower_global_vars_to_local(nir
);
91 nir_validate_shader(nir
);
93 nir_split_var_copies(nir
);
94 nir_validate_shader(nir
);
98 /* Lower a bunch of stuff */
99 nir_lower_var_copies(nir
);
100 nir_validate_shader(nir
);
103 nir_validate_shader(nir
);
105 nir_lower_locals_to_regs(nir
);
106 nir_validate_shader(nir
);
108 nir_remove_dead_variables(nir
);
109 nir_validate_shader(nir
);
111 nir_lower_samplers(nir
, shader_prog
, shader
->base
.Program
);
112 nir_validate_shader(nir
);
114 nir_lower_system_values(nir
);
115 nir_validate_shader(nir
);
117 nir_lower_atomics(nir
);
118 nir_validate_shader(nir
);
122 nir_lower_to_source_mods(nir
);
123 nir_validate_shader(nir
);
125 nir_validate_shader(nir
);
127 if (INTEL_DEBUG
& DEBUG_WM
) {
128 fprintf(stderr
, "NIR (SSA form) for fragment shader:\n");
129 nir_print_shader(nir
, stderr
);
132 if (dispatch_width
== 8) {
133 static GLuint msg_id
= 0;
134 _mesa_gl_debug(&brw
->ctx
, &msg_id
,
135 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
136 MESA_DEBUG_TYPE_OTHER
,
137 MESA_DEBUG_SEVERITY_NOTIFICATION
,
138 "FS NIR shader: %d inst\n",
139 count_nir_instrs(nir
));
142 nir_convert_from_ssa(nir
);
143 nir_validate_shader(nir
);
145 /* emit the arrays used for inputs and outputs - load/store intrinsics will
146 * be converted to reads/writes of these arrays
149 if (nir
->num_inputs
> 0) {
150 nir_inputs
= vgrf(nir
->num_inputs
);
151 nir_setup_inputs(nir
);
154 if (nir
->num_outputs
> 0) {
155 nir_outputs
= vgrf(nir
->num_outputs
);
156 nir_setup_outputs(nir
);
159 if (nir
->num_uniforms
> 0) {
160 nir_uniforms
= fs_reg(UNIFORM
, 0);
161 nir_setup_uniforms(nir
);
164 nir_emit_system_values(nir
);
166 nir_globals
= ralloc_array(mem_ctx
, fs_reg
, nir
->reg_alloc
);
167 foreach_list_typed(nir_register
, reg
, node
, &nir
->registers
) {
168 unsigned array_elems
=
169 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
170 unsigned size
= array_elems
* reg
->num_components
;
171 nir_globals
[reg
->index
] = vgrf(size
);
174 /* get the main function and emit it */
175 nir_foreach_overload(nir
, overload
) {
176 assert(strcmp(overload
->function
->name
, "main") == 0);
177 assert(overload
->impl
);
178 nir_emit_impl(overload
->impl
);
181 if (INTEL_DEBUG
& DEBUG_WM
) {
182 fprintf(stderr
, "NIR (final form) for fragment shader:\n");
183 nir_print_shader(nir
, stderr
);
190 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
192 struct hash_entry
*entry
;
193 hash_table_foreach(shader
->inputs
, entry
) {
194 nir_variable
*var
= (nir_variable
*) entry
->data
;
195 fs_reg varying
= offset(nir_inputs
, var
->data
.driver_location
);
198 if (!strcmp(var
->name
, "gl_FragCoord")) {
199 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
200 var
->data
.origin_upper_left
);
201 emit_percomp(MOV(varying
, reg
), 0xF);
203 emit_general_interpolation(varying
, var
->name
, var
->type
,
204 (glsl_interp_qualifier
) var
->data
.interpolation
,
205 var
->data
.location
, var
->data
.centroid
,
212 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
214 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
216 struct hash_entry
*entry
;
217 hash_table_foreach(shader
->outputs
, entry
) {
218 nir_variable
*var
= (nir_variable
*) entry
->data
;
219 fs_reg reg
= offset(nir_outputs
, var
->data
.driver_location
);
221 if (var
->data
.index
> 0) {
222 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
223 assert(var
->data
.index
== 1);
224 this->dual_src_output
= reg
;
225 this->do_dual_src
= true;
226 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
227 /* Writing gl_FragColor outputs to all color regions. */
228 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
229 this->outputs
[i
] = reg
;
230 this->output_components
[i
] = 4;
232 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
233 this->frag_depth
= reg
;
234 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
235 this->sample_mask
= reg
;
237 /* gl_FragData or a user-defined FS output */
238 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
239 var
->data
.location
< FRAG_RESULT_DATA0
+ BRW_MAX_DRAW_BUFFERS
);
241 int vector_elements
=
242 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
243 : var
->type
->vector_elements
;
245 /* General color output. */
246 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
247 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
248 this->outputs
[output
] = offset(reg
, vector_elements
* i
);
249 this->output_components
[output
] = vector_elements
;
256 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
258 uniforms
= shader
->num_uniforms
;
259 param_size
[0] = shader
->num_uniforms
;
261 if (dispatch_width
!= 8)
264 struct hash_entry
*entry
;
265 hash_table_foreach(shader
->uniforms
, entry
) {
266 nir_variable
*var
= (nir_variable
*) entry
->data
;
268 /* UBO's and atomics don't take up space in the uniform file */
270 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
273 if (strncmp(var
->name
, "gl_", 3) == 0)
274 nir_setup_builtin_uniform(var
);
276 nir_setup_uniform(var
);
281 fs_visitor::nir_setup_uniform(nir_variable
*var
)
283 int namelen
= strlen(var
->name
);
285 /* The data for our (non-builtin) uniforms is stored in a series of
286 * gl_uniform_driver_storage structs for each subcomponent that
287 * glGetUniformLocation() could name. We know it's been set up in the
288 * same order we'd walk the type, so walk the list of storage and find
289 * anything with our name, or the prefix of a component that starts with
292 unsigned index
= var
->data
.driver_location
;
293 for (unsigned u
= 0; u
< shader_prog
->NumUserUniformStorage
; u
++) {
294 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
296 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
297 (storage
->name
[namelen
] != 0 &&
298 storage
->name
[namelen
] != '.' &&
299 storage
->name
[namelen
] != '[')) {
303 unsigned slots
= storage
->type
->component_slots();
304 if (storage
->array_elements
)
305 slots
*= storage
->array_elements
;
307 for (unsigned i
= 0; i
< slots
; i
++) {
308 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
312 /* Make sure we actually initialized the right amount of stuff here. */
313 assert(var
->data
.driver_location
+ var
->type
->component_slots() == index
);
317 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
319 const nir_state_slot
*const slots
= var
->state_slots
;
320 assert(var
->state_slots
!= NULL
);
322 unsigned uniform_index
= var
->data
.driver_location
;
323 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
324 /* This state reference has already been setup by ir_to_mesa, but we'll
325 * get the same index back here.
327 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
328 (gl_state_index
*)slots
[i
].tokens
);
330 /* Add each of the unique swizzles of the element as a parameter.
331 * This'll end up matching the expected layout of the
332 * array/matrix/structure we're trying to fill in.
335 for (unsigned int j
= 0; j
< 4; j
++) {
336 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
337 if (swiz
== last_swiz
)
341 stage_prog_data
->param
[uniform_index
++] =
342 &prog
->Parameters
->ParameterValues
[index
][swiz
];
348 emit_system_values_block(nir_block
*block
, void *void_visitor
)
350 fs_visitor
*v
= (fs_visitor
*)void_visitor
;
353 nir_foreach_instr(block
, instr
) {
354 if (instr
->type
!= nir_instr_type_intrinsic
)
357 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
358 switch (intrin
->intrinsic
) {
359 case nir_intrinsic_load_sample_pos
:
360 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
361 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
362 if (reg
->file
== BAD_FILE
)
363 *reg
= *v
->emit_samplepos_setup();
366 case nir_intrinsic_load_sample_id
:
367 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
368 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
369 if (reg
->file
== BAD_FILE
)
370 *reg
= *v
->emit_sampleid_setup();
373 case nir_intrinsic_load_sample_mask_in
:
374 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
375 assert(v
->brw
->gen
>= 7);
376 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
377 if (reg
->file
== BAD_FILE
)
378 *reg
= fs_reg(retype(brw_vec8_grf(v
->payload
.sample_mask_in_reg
, 0),
379 BRW_REGISTER_TYPE_D
));
391 fs_visitor::nir_emit_system_values(nir_shader
*shader
)
393 nir_system_values
= ralloc_array(mem_ctx
, fs_reg
, SYSTEM_VALUE_MAX
);
394 nir_foreach_overload(shader
, overload
) {
395 assert(strcmp(overload
->function
->name
, "main") == 0);
396 assert(overload
->impl
);
397 nir_foreach_block(overload
->impl
, emit_system_values_block
, this);
402 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
404 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
405 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
406 unsigned array_elems
=
407 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
408 unsigned size
= array_elems
* reg
->num_components
;
409 nir_locals
[reg
->index
] = vgrf(size
);
412 nir_emit_cf_list(&impl
->body
);
416 fs_visitor::nir_emit_cf_list(exec_list
*list
)
418 exec_list_validate(list
);
419 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
420 switch (node
->type
) {
422 nir_emit_if(nir_cf_node_as_if(node
));
425 case nir_cf_node_loop
:
426 nir_emit_loop(nir_cf_node_as_loop(node
));
429 case nir_cf_node_block
:
430 nir_emit_block(nir_cf_node_as_block(node
));
434 unreachable("Invalid CFG node block");
440 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
442 /* first, put the condition into f0 */
443 fs_inst
*inst
= emit(MOV(reg_null_d
,
444 retype(get_nir_src(if_stmt
->condition
),
445 BRW_REGISTER_TYPE_UD
)));
446 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
448 emit(IF(BRW_PREDICATE_NORMAL
));
450 nir_emit_cf_list(&if_stmt
->then_list
);
452 /* note: if the else is empty, dead CF elimination will remove it */
453 emit(BRW_OPCODE_ELSE
);
455 nir_emit_cf_list(&if_stmt
->else_list
);
457 emit(BRW_OPCODE_ENDIF
);
459 if (!try_replace_with_sel() && brw
->gen
< 6) {
460 no16("Can't support (non-uniform) control flow on SIMD16\n");
465 fs_visitor::nir_emit_loop(nir_loop
*loop
)
468 no16("Can't support (non-uniform) control flow on SIMD16\n");
473 nir_emit_cf_list(&loop
->body
);
475 emit(BRW_OPCODE_WHILE
);
479 fs_visitor::nir_emit_block(nir_block
*block
)
481 nir_foreach_instr(block
, instr
) {
482 nir_emit_instr(instr
);
487 fs_visitor::nir_emit_instr(nir_instr
*instr
)
489 switch (instr
->type
) {
490 case nir_instr_type_alu
:
491 nir_emit_alu(nir_instr_as_alu(instr
));
494 case nir_instr_type_intrinsic
:
495 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
498 case nir_instr_type_tex
:
499 nir_emit_texture(nir_instr_as_tex(instr
));
502 case nir_instr_type_load_const
:
503 /* We can hit these, but we do nothing now and use them as
508 case nir_instr_type_jump
:
509 nir_emit_jump(nir_instr_as_jump(instr
));
513 unreachable("unknown instruction type");
518 brw_type_for_nir_type(nir_alu_type type
)
522 case nir_type_unsigned
:
523 return BRW_REGISTER_TYPE_UD
;
525 return BRW_REGISTER_TYPE_D
;
527 return BRW_REGISTER_TYPE_F
;
529 unreachable("unknown type");
532 return BRW_REGISTER_TYPE_F
;
536 fs_visitor::nir_emit_alu(nir_alu_instr
*instr
)
538 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
541 fs_reg result
= get_nir_dest(instr
->dest
.dest
);
542 result
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
545 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
546 op
[i
] = get_nir_src(instr
->src
[i
].src
);
547 op
[i
].type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[i
]);
548 op
[i
].abs
= instr
->src
[i
].abs
;
549 op
[i
].negate
= instr
->src
[i
].negate
;
552 /* We get a bunch of mov's out of the from_ssa pass and they may still
553 * be vectorized. We'll handle them as a special-case. We'll also
554 * handle vecN here because it's basically the same thing.
562 fs_reg temp
= result
;
563 bool need_extra_copy
= false;
564 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
565 if (!instr
->src
[i
].src
.is_ssa
&&
566 instr
->dest
.dest
.reg
.reg
== instr
->src
[i
].src
.reg
.reg
) {
567 need_extra_copy
= true;
568 temp
= retype(vgrf(4), result
.type
);
573 for (unsigned i
= 0; i
< 4; i
++) {
574 if (!(instr
->dest
.write_mask
& (1 << i
)))
577 if (instr
->op
== nir_op_imov
|| instr
->op
== nir_op_fmov
) {
578 inst
= emit(MOV(offset(temp
, i
),
579 offset(op
[0], instr
->src
[0].swizzle
[i
])));
581 inst
= emit(MOV(offset(temp
, i
),
582 offset(op
[i
], instr
->src
[i
].swizzle
[0])));
584 inst
->saturate
= instr
->dest
.saturate
;
587 /* In this case the source and destination registers were the same,
588 * so we need to insert an extra set of moves in order to deal with
591 if (need_extra_copy
) {
592 for (unsigned i
= 0; i
< 4; i
++) {
593 if (!(instr
->dest
.write_mask
& (1 << i
)))
596 emit(MOV(offset(result
, i
), offset(temp
, i
)));
605 /* At this point, we have dealt with any instruction that operates on
606 * more than a single channel. Therefore, we can just adjust the source
607 * and destination registers for that channel and emit the instruction.
609 unsigned channel
= 0;
610 if (nir_op_infos
[instr
->op
].output_size
== 0) {
611 /* Since NIR is doing the scalarizing for us, we should only ever see
612 * vectorized operations with a single channel.
614 assert(_mesa_bitcount(instr
->dest
.write_mask
) == 1);
615 channel
= ffs(instr
->dest
.write_mask
) - 1;
617 result
= offset(result
, channel
);
620 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
621 assert(nir_op_infos
[instr
->op
].input_sizes
[i
] < 2);
622 op
[i
] = offset(op
[i
], instr
->src
[i
].swizzle
[channel
]);
628 inst
= emit(MOV(result
, op
[0]));
629 inst
->saturate
= instr
->dest
.saturate
;
634 emit(MOV(result
, op
[0]));
638 /* AND(val, 0x80000000) gives the sign bit.
640 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
643 emit(CMP(reg_null_f
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
));
645 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
646 op
[0].type
= BRW_REGISTER_TYPE_UD
;
647 result
.type
= BRW_REGISTER_TYPE_UD
;
648 emit(AND(result_int
, op
[0], fs_reg(0x80000000u
)));
650 inst
= emit(OR(result_int
, result_int
, fs_reg(0x3f800000u
)));
651 inst
->predicate
= BRW_PREDICATE_NORMAL
;
652 if (instr
->dest
.saturate
) {
653 inst
= emit(MOV(result
, result
));
654 inst
->saturate
= true;
660 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
661 * -> non-negative val generates 0x00000000.
662 * Predicated OR sets 1 if val is positive.
664 emit(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_G
));
665 emit(ASR(result
, op
[0], fs_reg(31)));
666 inst
= emit(OR(result
, result
, fs_reg(1)));
667 inst
->predicate
= BRW_PREDICATE_NORMAL
;
671 inst
= emit_math(SHADER_OPCODE_RCP
, result
, op
[0]);
672 inst
->saturate
= instr
->dest
.saturate
;
676 inst
= emit_math(SHADER_OPCODE_EXP2
, result
, op
[0]);
677 inst
->saturate
= instr
->dest
.saturate
;
681 inst
= emit_math(SHADER_OPCODE_LOG2
, result
, op
[0]);
682 inst
->saturate
= instr
->dest
.saturate
;
687 unreachable("not reached: should be handled by ir_explog_to_explog2");
690 case nir_op_fsin_reduced
:
691 inst
= emit_math(SHADER_OPCODE_SIN
, result
, op
[0]);
692 inst
->saturate
= instr
->dest
.saturate
;
696 case nir_op_fcos_reduced
:
697 inst
= emit_math(SHADER_OPCODE_COS
, result
, op
[0]);
698 inst
->saturate
= instr
->dest
.saturate
;
702 if (fs_key
->high_quality_derivatives
) {
703 inst
= emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
705 inst
= emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
707 inst
->saturate
= instr
->dest
.saturate
;
709 case nir_op_fddx_fine
:
710 inst
= emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
711 inst
->saturate
= instr
->dest
.saturate
;
713 case nir_op_fddx_coarse
:
714 inst
= emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
715 inst
->saturate
= instr
->dest
.saturate
;
718 if (fs_key
->high_quality_derivatives
) {
719 inst
= emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
720 fs_reg(fs_key
->render_to_fbo
));
722 inst
= emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
723 fs_reg(fs_key
->render_to_fbo
));
725 inst
->saturate
= instr
->dest
.saturate
;
727 case nir_op_fddy_fine
:
728 inst
= emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
729 fs_reg(fs_key
->render_to_fbo
));
730 inst
->saturate
= instr
->dest
.saturate
;
732 case nir_op_fddy_coarse
:
733 inst
= emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
734 fs_reg(fs_key
->render_to_fbo
));
735 inst
->saturate
= instr
->dest
.saturate
;
740 inst
= emit(ADD(result
, op
[0], op
[1]));
741 inst
->saturate
= instr
->dest
.saturate
;
745 inst
= emit(MUL(result
, op
[0], op
[1]));
746 inst
->saturate
= instr
->dest
.saturate
;
750 /* TODO put in the 16-bit constant optimization once we have SSA */
753 no16("SIMD16 explicit accumulator operands unsupported\n");
755 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
757 emit(MUL(acc
, op
[0], op
[1]));
758 emit(MACH(reg_null_d
, op
[0], op
[1]));
759 emit(MOV(result
, fs_reg(acc
)));
763 case nir_op_imul_high
:
764 case nir_op_umul_high
: {
766 no16("SIMD16 explicit accumulator operands unsupported\n");
768 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
770 emit(MUL(acc
, op
[0], op
[1]));
771 emit(MACH(result
, op
[0], op
[1]));
777 emit_math(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1]);
780 case nir_op_uadd_carry
: {
782 no16("SIMD16 explicit accumulator operands unsupported\n");
784 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
785 BRW_REGISTER_TYPE_UD
);
787 emit(ADDC(reg_null_ud
, op
[0], op
[1]));
788 emit(MOV(result
, fs_reg(acc
)));
792 case nir_op_usub_borrow
: {
794 no16("SIMD16 explicit accumulator operands unsupported\n");
796 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
),
797 BRW_REGISTER_TYPE_UD
);
799 emit(SUBB(reg_null_ud
, op
[0], op
[1]));
800 emit(MOV(result
, fs_reg(acc
)));
805 emit_math(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0], op
[1]);
811 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
));
817 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
));
822 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
));
827 emit(CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
));
831 emit(NOT(result
, op
[0]));
834 emit(XOR(result
, op
[0], op
[1]));
837 emit(OR(result
, op
[0], op
[1]));
840 emit(AND(result
, op
[0], op
[1]));
852 case nir_op_ball_fequal2
:
853 case nir_op_ball_iequal2
:
854 case nir_op_ball_fequal3
:
855 case nir_op_ball_iequal3
:
856 case nir_op_ball_fequal4
:
857 case nir_op_ball_iequal4
:
858 case nir_op_bany_fnequal2
:
859 case nir_op_bany_inequal2
:
860 case nir_op_bany_fnequal3
:
861 case nir_op_bany_inequal3
:
862 case nir_op_bany_fnequal4
:
863 case nir_op_bany_inequal4
:
864 unreachable("Lowered by nir_lower_alu_reductions");
866 case nir_op_fnoise1_1
:
867 case nir_op_fnoise1_2
:
868 case nir_op_fnoise1_3
:
869 case nir_op_fnoise1_4
:
870 case nir_op_fnoise2_1
:
871 case nir_op_fnoise2_2
:
872 case nir_op_fnoise2_3
:
873 case nir_op_fnoise2_4
:
874 case nir_op_fnoise3_1
:
875 case nir_op_fnoise3_2
:
876 case nir_op_fnoise3_3
:
877 case nir_op_fnoise3_4
:
878 case nir_op_fnoise4_1
:
879 case nir_op_fnoise4_2
:
880 case nir_op_fnoise4_3
:
881 case nir_op_fnoise4_4
:
882 unreachable("not reached: should be handled by lower_noise");
885 unreachable("not reached: should be handled by ldexp_to_arith()");
888 inst
= emit_math(SHADER_OPCODE_SQRT
, result
, op
[0]);
889 inst
->saturate
= instr
->dest
.saturate
;
893 inst
= emit_math(SHADER_OPCODE_RSQ
, result
, op
[0]);
894 inst
->saturate
= instr
->dest
.saturate
;
898 emit(AND(result
, op
[0], fs_reg(1)));
901 emit(AND(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0], fs_reg(0x3f800000u
)));
905 emit(CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
));
908 emit(CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
));
912 inst
= emit(RNDZ(result
, op
[0]));
913 inst
->saturate
= instr
->dest
.saturate
;
917 op
[0].negate
= !op
[0].negate
;
918 fs_reg temp
= vgrf(glsl_type::float_type
);
919 emit(RNDD(temp
, op
[0]));
921 inst
= emit(MOV(result
, temp
));
922 inst
->saturate
= instr
->dest
.saturate
;
926 inst
= emit(RNDD(result
, op
[0]));
927 inst
->saturate
= instr
->dest
.saturate
;
930 inst
= emit(FRC(result
, op
[0]));
931 inst
->saturate
= instr
->dest
.saturate
;
933 case nir_op_fround_even
:
934 inst
= emit(RNDE(result
, op
[0]));
935 inst
->saturate
= instr
->dest
.saturate
;
942 inst
= emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
943 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
945 emit(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_L
));
946 inst
= emit(SEL(result
, op
[0], op
[1]));
948 inst
->saturate
= instr
->dest
.saturate
;
955 inst
= emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
956 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
958 emit(CMP(reg_null_d
, op
[0], op
[1], BRW_CONDITIONAL_GE
));
959 inst
= emit(SEL(result
, op
[0], op
[1]));
961 inst
->saturate
= instr
->dest
.saturate
;
964 case nir_op_pack_snorm_2x16
:
965 case nir_op_pack_snorm_4x8
:
966 case nir_op_pack_unorm_2x16
:
967 case nir_op_pack_unorm_4x8
:
968 case nir_op_unpack_snorm_2x16
:
969 case nir_op_unpack_snorm_4x8
:
970 case nir_op_unpack_unorm_2x16
:
971 case nir_op_unpack_unorm_4x8
:
972 case nir_op_unpack_half_2x16
:
973 case nir_op_pack_half_2x16
:
974 unreachable("not reached: should be handled by lower_packing_builtins");
976 case nir_op_unpack_half_2x16_split_x
:
977 inst
= emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0]);
978 inst
->saturate
= instr
->dest
.saturate
;
980 case nir_op_unpack_half_2x16_split_y
:
981 inst
= emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0]);
982 inst
->saturate
= instr
->dest
.saturate
;
986 inst
= emit(SHADER_OPCODE_POW
, result
, op
[0], op
[1]);
987 inst
->saturate
= instr
->dest
.saturate
;
990 case nir_op_bitfield_reverse
:
991 emit(BFREV(result
, op
[0]));
994 case nir_op_bit_count
:
995 emit(CBIT(result
, op
[0]));
998 case nir_op_ufind_msb
:
999 case nir_op_ifind_msb
: {
1000 emit(FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]));
1002 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1003 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1004 * subtract the result from 31 to convert the MSB count into an LSB count.
1007 emit(CMP(reg_null_d
, result
, fs_reg(-1), BRW_CONDITIONAL_NZ
));
1008 fs_reg
neg_result(result
);
1009 neg_result
.negate
= true;
1010 inst
= emit(ADD(result
, neg_result
, fs_reg(31)));
1011 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1015 case nir_op_find_lsb
:
1016 emit(FBL(result
, op
[0]));
1019 case nir_op_ubitfield_extract
:
1020 case nir_op_ibitfield_extract
:
1021 emit(BFE(result
, op
[2], op
[1], op
[0]));
1024 emit(BFI1(result
, op
[0], op
[1]));
1027 emit(BFI2(result
, op
[0], op
[1], op
[2]));
1030 case nir_op_bitfield_insert
:
1031 unreachable("not reached: should be handled by "
1032 "lower_instructions::bitfield_insert_to_bfm_bfi");
1035 emit(SHL(result
, op
[0], op
[1]));
1038 emit(ASR(result
, op
[0], op
[1]));
1041 emit(SHR(result
, op
[0], op
[1]));
1044 case nir_op_pack_half_2x16_split
:
1045 emit(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1]);
1049 inst
= emit(MAD(result
, op
[2], op
[1], op
[0]));
1050 inst
->saturate
= instr
->dest
.saturate
;
1054 /* TODO emulate for gen < 6 */
1055 inst
= emit(LRP(result
, op
[2], op
[1], op
[0]));
1056 inst
->saturate
= instr
->dest
.saturate
;
1060 emit(CMP(reg_null_d
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
));
1061 inst
= emit(SEL(result
, op
[1], op
[2]));
1062 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1066 unreachable("unhandled instruction");
1071 fs_visitor::get_nir_src(nir_src src
)
1074 assert(src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
);
1075 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1076 fs_reg reg
= vgrf(src
.ssa
->num_components
);
1077 reg
.type
= BRW_REGISTER_TYPE_D
;
1079 for (unsigned i
= 0; i
< src
.ssa
->num_components
; ++i
)
1080 emit(MOV(offset(reg
, i
), fs_reg(load
->value
.i
[i
])));
1085 if (src
.reg
.reg
->is_global
)
1086 reg
= nir_globals
[src
.reg
.reg
->index
];
1088 reg
= nir_locals
[src
.reg
.reg
->index
];
1090 /* to avoid floating-point denorm flushing problems, set the type by
1091 * default to D - instructions that need floating point semantics will set
1092 * this to F if they need to
1094 reg
= retype(offset(reg
, src
.reg
.base_offset
), BRW_REGISTER_TYPE_D
);
1095 if (src
.reg
.indirect
) {
1096 reg
.reladdr
= new(mem_ctx
) fs_reg();
1097 *reg
.reladdr
= retype(get_nir_src(*src
.reg
.indirect
),
1098 BRW_REGISTER_TYPE_D
);
1106 fs_visitor::get_nir_dest(nir_dest dest
)
1109 if (dest
.reg
.reg
->is_global
)
1110 reg
= nir_globals
[dest
.reg
.reg
->index
];
1112 reg
= nir_locals
[dest
.reg
.reg
->index
];
1114 reg
= offset(reg
, dest
.reg
.base_offset
);
1115 if (dest
.reg
.indirect
) {
1116 reg
.reladdr
= new(mem_ctx
) fs_reg();
1117 *reg
.reladdr
= retype(get_nir_src(*dest
.reg
.indirect
),
1118 BRW_REGISTER_TYPE_D
);
1125 fs_visitor::emit_percomp(fs_inst
*inst
, unsigned wr_mask
)
1127 for (unsigned i
= 0; i
< 4; i
++) {
1128 if (!((wr_mask
>> i
) & 1))
1131 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(*inst
);
1132 new_inst
->dst
= offset(new_inst
->dst
, i
);
1133 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1134 if (inst
->src
[j
].file
== GRF
)
1135 new_inst
->src
[j
] = offset(new_inst
->src
[j
], i
);
1142 fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
1145 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1146 dest
= get_nir_dest(instr
->dest
);
1148 bool has_indirect
= false;
1150 switch (instr
->intrinsic
) {
1151 case nir_intrinsic_discard
: {
1152 /* We track our discarded pixels in f0.1. By predicating on it, we can
1153 * update just the flag bits that aren't yet discarded. By emitting a
1154 * CMP of g0 != g0, all our currently executing channels will get turned
1157 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1158 BRW_REGISTER_TYPE_UW
));
1159 fs_inst
*cmp
= emit(CMP(reg_null_f
, some_reg
, some_reg
,
1160 BRW_CONDITIONAL_NZ
));
1161 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1162 cmp
->flag_subreg
= 1;
1164 if (brw
->gen
>= 6) {
1165 /* For performance, after a discard, jump to the end of the shader.
1166 * Only jump if all relevant channels have been discarded.
1168 fs_inst
*discard_jump
= emit(FS_OPCODE_DISCARD_JUMP
);
1169 discard_jump
->flag_subreg
= 1;
1171 discard_jump
->predicate
= (dispatch_width
== 8)
1172 ? BRW_PREDICATE_ALIGN1_ANY8H
1173 : BRW_PREDICATE_ALIGN1_ANY16H
;
1174 discard_jump
->predicate_inverse
= true;
1180 case nir_intrinsic_atomic_counter_inc
:
1181 case nir_intrinsic_atomic_counter_dec
:
1182 case nir_intrinsic_atomic_counter_read
: {
1183 unsigned surf_index
= prog_data
->binding_table
.abo_start
+
1184 (unsigned) instr
->const_index
[0];
1185 fs_reg offset
= fs_reg(get_nir_src(instr
->src
[0]));
1187 switch (instr
->intrinsic
) {
1188 case nir_intrinsic_atomic_counter_inc
:
1189 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
1190 fs_reg(), fs_reg());
1192 case nir_intrinsic_atomic_counter_dec
:
1193 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
1194 fs_reg(), fs_reg());
1196 case nir_intrinsic_atomic_counter_read
:
1197 emit_untyped_surface_read(surf_index
, dest
, offset
);
1200 unreachable("Unreachable");
1205 case nir_intrinsic_load_front_face
:
1206 emit(MOV(retype(dest
, BRW_REGISTER_TYPE_D
),
1207 *emit_frontfacing_interpolation()));
1210 case nir_intrinsic_load_sample_mask_in
: {
1211 fs_reg sample_mask_in
= nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
1212 assert(sample_mask_in
.file
!= BAD_FILE
);
1213 dest
.type
= sample_mask_in
.type
;
1214 emit(MOV(dest
, sample_mask_in
));
1218 case nir_intrinsic_load_sample_pos
: {
1219 fs_reg sample_pos
= nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
1220 assert(sample_pos
.file
!= BAD_FILE
);
1221 dest
.type
= sample_pos
.type
;
1222 emit(MOV(dest
, sample_pos
));
1223 emit(MOV(offset(dest
, 1), offset(sample_pos
, 1)));
1227 case nir_intrinsic_load_sample_id
: {
1228 fs_reg sample_id
= nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
1229 assert(sample_id
.file
!= BAD_FILE
);
1230 dest
.type
= sample_id
.type
;
1231 emit(MOV(dest
, sample_id
));
1235 case nir_intrinsic_load_uniform_indirect
:
1236 has_indirect
= true;
1237 case nir_intrinsic_load_uniform
: {
1239 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1240 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1241 fs_reg src
= offset(retype(nir_uniforms
, dest
.type
),
1242 instr
->const_index
[0] + index
);
1244 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1247 emit(MOV(dest
, src
));
1248 dest
= offset(dest
, 1);
1254 case nir_intrinsic_load_ubo_indirect
:
1255 has_indirect
= true;
1256 case nir_intrinsic_load_ubo
: {
1257 nir_const_value
*const_index
= nir_src_as_const_value(instr
->src
[0]);
1261 surf_index
= fs_reg(stage_prog_data
->binding_table
.ubo_start
+
1264 /* The block index is not a constant. Evaluate the index expression
1265 * per-channel and add the base UBO index; the generator will select
1266 * a value from any live channel.
1268 surf_index
= vgrf(glsl_type::uint_type
);
1269 emit(ADD(surf_index
, get_nir_src(instr
->src
[0]),
1270 fs_reg(stage_prog_data
->binding_table
.ubo_start
)))
1271 ->force_writemask_all
= true;
1273 /* Assume this may touch any UBO. It would be nice to provide
1274 * a tighter bound, but the array information is already lowered away.
1276 brw_mark_surface_used(prog_data
,
1277 stage_prog_data
->binding_table
.ubo_start
+
1278 shader_prog
->NumUniformBlocks
- 1);
1282 /* Turn the byte offset into a dword offset. */
1283 fs_reg base_offset
= vgrf(glsl_type::int_type
);
1284 emit(SHR(base_offset
, retype(get_nir_src(instr
->src
[1]),
1285 BRW_REGISTER_TYPE_D
),
1288 unsigned vec4_offset
= instr
->const_index
[0] / 4;
1289 for (int i
= 0; i
< instr
->num_components
; i
++)
1290 emit(VARYING_PULL_CONSTANT_LOAD(offset(dest
, i
), surf_index
,
1291 base_offset
, vec4_offset
+ i
));
1293 fs_reg packed_consts
= vgrf(glsl_type::float_type
);
1294 packed_consts
.type
= dest
.type
;
1296 fs_reg
const_offset_reg((unsigned) instr
->const_index
[0] & ~15);
1297 emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
, packed_consts
,
1298 surf_index
, const_offset_reg
);
1300 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1301 packed_consts
.set_smear(instr
->const_index
[0] % 16 / 4 + i
);
1303 /* The std140 packing rules don't allow vectors to cross 16-byte
1304 * boundaries, and a reg is 32 bytes.
1306 assert(packed_consts
.subreg_offset
< 32);
1308 emit(MOV(dest
, packed_consts
));
1309 dest
= offset(dest
, 1);
1315 case nir_intrinsic_load_input_indirect
:
1316 has_indirect
= true;
1317 case nir_intrinsic_load_input
: {
1319 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1320 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1321 fs_reg src
= offset(retype(nir_inputs
, dest
.type
),
1322 instr
->const_index
[0] + index
);
1324 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1327 emit(MOV(dest
, src
));
1328 dest
= offset(dest
, 1);
1334 /* Handle ARB_gpu_shader5 interpolation intrinsics
1336 * It's worth a quick word of explanation as to why we handle the full
1337 * variable-based interpolation intrinsic rather than a lowered version
1338 * with like we do for other inputs. We have to do that because the way
1339 * we set up inputs doesn't allow us to use the already setup inputs for
1340 * interpolation. At the beginning of the shader, we go through all of
1341 * the input variables and do the initial interpolation and put it in
1342 * the nir_inputs array based on its location as determined in
1343 * nir_lower_io. If the input isn't used, dead code cleans up and
1344 * everything works fine. However, when we get to the ARB_gpu_shader5
1345 * interpolation intrinsics, we need to reinterpolate the input
1346 * differently. If we used an intrinsic that just had an index it would
1347 * only give us the offset into the nir_inputs array. However, this is
1348 * useless because that value is post-interpolation and we need
1349 * pre-interpolation. In order to get the actual location of the bits
1350 * we get from the vertex fetching hardware, we need the variable.
1352 case nir_intrinsic_interp_var_at_centroid
:
1353 case nir_intrinsic_interp_var_at_sample
:
1354 case nir_intrinsic_interp_var_at_offset
: {
1355 /* in SIMD16 mode, the pixel interpolator returns coords interleaved
1356 * 8 channels at a time, same as the barycentric coords presented in
1357 * the FS payload. this requires a bit of extra work to support.
1359 no16("interpolate_at_* not yet supported in SIMD16 mode.");
1361 fs_reg dst_x
= vgrf(2);
1362 fs_reg dst_y
= offset(dst_x
, 1);
1364 /* For most messages, we need one reg of ignored data; the hardware
1365 * requires mlen==1 even when there is no payload. in the per-slot
1366 * offset case, we'll replace this with the proper source data.
1368 fs_reg src
= vgrf(glsl_type::float_type
);
1369 int mlen
= 1; /* one reg unless overriden */
1372 switch (instr
->intrinsic
) {
1373 case nir_intrinsic_interp_var_at_centroid
:
1374 inst
= emit(FS_OPCODE_INTERPOLATE_AT_CENTROID
, dst_x
, src
, fs_reg(0u));
1377 case nir_intrinsic_interp_var_at_sample
: {
1378 /* XXX: We should probably handle non-constant sample id's */
1379 nir_const_value
*const_sample
= nir_src_as_const_value(instr
->src
[0]);
1380 assert(const_sample
);
1381 unsigned msg_data
= const_sample
? const_sample
->i
[0] << 4 : 0;
1382 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE
, dst_x
, src
,
1387 case nir_intrinsic_interp_var_at_offset
: {
1388 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
1391 unsigned off_x
= MIN2((int)(const_offset
->f
[0] * 16), 7) & 0xf;
1392 unsigned off_y
= MIN2((int)(const_offset
->f
[1] * 16), 7) & 0xf;
1394 inst
= emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
, dst_x
, src
,
1395 fs_reg(off_x
| (off_y
<< 4)));
1397 src
= vgrf(glsl_type::ivec2_type
);
1398 fs_reg offset_src
= retype(get_nir_src(instr
->src
[0]),
1399 BRW_REGISTER_TYPE_F
);
1400 for (int i
= 0; i
< 2; i
++) {
1401 fs_reg temp
= vgrf(glsl_type::float_type
);
1402 emit(MUL(temp
, offset(offset_src
, i
), fs_reg(16.0f
)));
1403 fs_reg itemp
= vgrf(glsl_type::int_type
);
1404 emit(MOV(itemp
, temp
)); /* float to int */
1406 /* Clamp the upper end of the range to +7/16.
1407 * ARB_gpu_shader5 requires that we support a maximum offset
1408 * of +0.5, which isn't representable in a S0.4 value -- if
1409 * we didn't clamp it, we'd end up with -8/16, which is the
1410 * opposite of what the shader author wanted.
1412 * This is legal due to ARB_gpu_shader5's quantization
1415 * "Not all values of <offset> may be supported; x and y
1416 * offsets may be rounded to fixed-point values with the
1417 * number of fraction bits given by the
1418 * implementation-dependent constant
1419 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1422 emit(BRW_OPCODE_SEL
, offset(src
, i
), itemp
, fs_reg(7))
1423 ->conditional_mod
= BRW_CONDITIONAL_L
; /* min(src2, 7) */
1427 inst
= emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
, dst_x
, src
,
1434 unreachable("Invalid intrinsic");
1438 inst
->regs_written
= 2; /* 2 floats per slot returned */
1439 inst
->pi_noperspective
= instr
->variables
[0]->var
->data
.interpolation
==
1440 INTERP_QUALIFIER_NOPERSPECTIVE
;
1442 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1443 fs_reg src
= interp_reg(instr
->variables
[0]->var
->data
.location
, j
);
1444 src
.type
= dest
.type
;
1446 emit(FS_OPCODE_LINTERP
, dest
, dst_x
, dst_y
, src
);
1447 dest
= offset(dest
, 1);
1452 case nir_intrinsic_store_output_indirect
:
1453 has_indirect
= true;
1454 case nir_intrinsic_store_output
: {
1455 fs_reg src
= get_nir_src(instr
->src
[0]);
1457 for (int i
= 0; i
< instr
->const_index
[1]; i
++) {
1458 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1459 fs_reg new_dest
= offset(retype(nir_outputs
, src
.type
),
1460 instr
->const_index
[0] + index
);
1462 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[1]));
1464 emit(MOV(new_dest
, src
));
1465 src
= offset(src
, 1);
1472 unreachable("unknown intrinsic");
1477 fs_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1479 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
1480 unsigned sampler
= instr
->sampler_index
;
1481 fs_reg
sampler_reg(sampler
);
1483 /* FINISHME: We're failing to recompile our programs when the sampler is
1484 * updated. This only matters for the texture rectangle scale parameters
1485 * (pre-gen6, or gen6+ with GL_CLAMP).
1487 int texunit
= prog
->SamplerUnits
[sampler
];
1489 int gather_component
= instr
->component
;
1491 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1493 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1496 int lod_components
= 0, offset_components
= 0;
1498 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, offset
;
1500 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1501 fs_reg src
= get_nir_src(instr
->src
[i
].src
);
1502 switch (instr
->src
[i
].src_type
) {
1503 case nir_tex_src_bias
:
1504 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1506 case nir_tex_src_comparitor
:
1507 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1509 case nir_tex_src_coord
:
1510 switch (instr
->op
) {
1512 case nir_texop_txf_ms
:
1513 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1516 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1520 case nir_tex_src_ddx
:
1521 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1522 lod_components
= nir_tex_instr_src_size(instr
, i
);
1524 case nir_tex_src_ddy
:
1525 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1527 case nir_tex_src_lod
:
1528 switch (instr
->op
) {
1530 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1533 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1536 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1540 case nir_tex_src_ms_index
:
1541 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1543 case nir_tex_src_offset
:
1544 offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1545 if (instr
->is_array
)
1546 offset_components
= instr
->coord_components
- 1;
1548 offset_components
= instr
->coord_components
;
1550 case nir_tex_src_projector
:
1551 unreachable("should be lowered");
1553 case nir_tex_src_sampler_offset
: {
1554 /* Figure out the highest possible sampler index and mark it as used */
1555 uint32_t max_used
= sampler
+ instr
->sampler_array_size
- 1;
1556 if (instr
->op
== nir_texop_tg4
&& brw
->gen
< 8) {
1557 max_used
+= stage_prog_data
->binding_table
.gather_texture_start
;
1559 max_used
+= stage_prog_data
->binding_table
.texture_start
;
1561 brw_mark_surface_used(prog_data
, max_used
);
1563 /* Emit code to evaluate the actual indexing expression */
1564 sampler_reg
= vgrf(glsl_type::uint_type
);
1565 emit(ADD(sampler_reg
, src
, fs_reg(sampler
)))
1566 ->force_writemask_all
= true;
1571 unreachable("unknown texture source");
1575 if (instr
->op
== nir_texop_txf_ms
) {
1576 if (brw
->gen
>= 7 && key
->tex
.compressed_multisample_layout_mask
& (1<<sampler
))
1577 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, sampler_reg
);
1582 for (unsigned i
= 0; i
< 3; i
++) {
1583 if (instr
->const_offset
[i
] != 0) {
1584 assert(offset_components
== 0);
1585 offset
= fs_reg(brw_texture_offset(ctx
, instr
->const_offset
, 3));
1590 enum glsl_base_type dest_base_type
;
1591 switch (instr
->dest_type
) {
1592 case nir_type_float
:
1593 dest_base_type
= GLSL_TYPE_FLOAT
;
1596 dest_base_type
= GLSL_TYPE_INT
;
1598 case nir_type_unsigned
:
1599 dest_base_type
= GLSL_TYPE_UINT
;
1602 unreachable("bad type");
1605 const glsl_type
*dest_type
=
1606 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1609 ir_texture_opcode op
;
1610 switch (instr
->op
) {
1611 case nir_texop_lod
: op
= ir_lod
; break;
1612 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1613 case nir_texop_tex
: op
= ir_tex
; break;
1614 case nir_texop_tg4
: op
= ir_tg4
; break;
1615 case nir_texop_txb
: op
= ir_txb
; break;
1616 case nir_texop_txd
: op
= ir_txd
; break;
1617 case nir_texop_txf
: op
= ir_txf
; break;
1618 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1619 case nir_texop_txl
: op
= ir_txl
; break;
1620 case nir_texop_txs
: op
= ir_txs
; break;
1622 unreachable("unknown texture opcode");
1625 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1626 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1627 offset
, offset_components
, mcs
, gather_component
,
1628 is_cube_array
, is_rect
, sampler
, sampler_reg
, texunit
);
1630 fs_reg dest
= get_nir_dest(instr
->dest
);
1631 dest
.type
= this->result
.type
;
1632 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1633 emit_percomp(MOV(dest
, this->result
), (1 << num_components
) - 1);
1637 fs_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1639 switch (instr
->type
) {
1640 case nir_jump_break
:
1641 emit(BRW_OPCODE_BREAK
);
1643 case nir_jump_continue
:
1644 emit(BRW_OPCODE_CONTINUE
);
1646 case nir_jump_return
:
1648 unreachable("unknown jump");