2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "glsl/ir_uniform.h"
31 using namespace brw::surface_access
;
36 vec4_visitor::emit_nir_code()
38 nir_shader
*nir
= prog
->nir
;
40 if (nir
->num_inputs
> 0)
41 nir_setup_inputs(nir
);
43 if (nir
->num_uniforms
> 0)
44 nir_setup_uniforms(nir
);
46 nir_setup_system_values(nir
);
48 /* get the main function and emit it */
49 nir_foreach_overload(nir
, overload
) {
50 assert(strcmp(overload
->function
->name
, "main") == 0);
51 assert(overload
->impl
);
52 nir_emit_impl(overload
->impl
);
57 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr
*instr
)
61 switch (instr
->intrinsic
) {
62 case nir_intrinsic_load_vertex_id
:
63 unreachable("should be lowered by lower_vertex_id().");
65 case nir_intrinsic_load_vertex_id_zero_base
:
66 reg
= &nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
67 if (reg
->file
== BAD_FILE
)
68 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
,
72 case nir_intrinsic_load_base_vertex
:
73 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
74 if (reg
->file
== BAD_FILE
)
75 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX
,
79 case nir_intrinsic_load_instance_id
:
80 reg
= &nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
81 if (reg
->file
== BAD_FILE
)
82 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID
,
92 setup_system_values_block(nir_block
*block
, void *void_visitor
)
94 vec4_visitor
*v
= (vec4_visitor
*)void_visitor
;
96 nir_foreach_instr(block
, instr
) {
97 if (instr
->type
!= nir_instr_type_intrinsic
)
100 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
101 v
->nir_setup_system_value_intrinsic(intrin
);
108 vec4_visitor::nir_setup_system_values(nir_shader
*shader
)
110 nir_system_values
= ralloc_array(mem_ctx
, dst_reg
, SYSTEM_VALUE_MAX
);
112 nir_foreach_overload(shader
, overload
) {
113 assert(strcmp(overload
->function
->name
, "main") == 0);
114 assert(overload
->impl
);
115 nir_foreach_block(overload
->impl
, setup_system_values_block
, this);
120 vec4_visitor::nir_setup_inputs(nir_shader
*shader
)
122 nir_inputs
= ralloc_array(mem_ctx
, src_reg
, shader
->num_inputs
);
124 foreach_list_typed(nir_variable
, var
, node
, &shader
->inputs
) {
125 int offset
= var
->data
.driver_location
;
126 unsigned size
= type_size_vec4(var
->type
);
127 for (unsigned i
= 0; i
< size
; i
++) {
128 src_reg src
= src_reg(ATTR
, var
->data
.location
+ i
, var
->type
);
129 nir_inputs
[offset
+ i
] = src
;
135 vec4_visitor::nir_setup_uniforms(nir_shader
*shader
)
140 foreach_list_typed(nir_variable
, var
, node
, &shader
->uniforms
) {
141 /* UBO's, atomics and samplers don't take up space in the
143 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic() ||
144 type_size_vec4(var
->type
) == 0) {
148 assert(uniforms
< uniform_array_size
);
149 uniform_size
[uniforms
] = type_size_vec4(var
->type
);
151 if (strncmp(var
->name
, "gl_", 3) == 0)
152 nir_setup_builtin_uniform(var
);
154 nir_setup_uniform(var
);
157 /* For ARB_vertex_program, only a single "parameters" variable is
158 * generated to support uniform data.
160 nir_variable
*var
= (nir_variable
*) shader
->uniforms
.get_head();
161 assert(shader
->uniforms
.length() == 1 &&
162 strcmp(var
->name
, "parameters") == 0);
164 assert(uniforms
< uniform_array_size
);
165 uniform_size
[uniforms
] = type_size_vec4(var
->type
);
167 struct gl_program_parameter_list
*plist
= prog
->Parameters
;
168 for (unsigned p
= 0; p
< plist
->NumParameters
; p
++) {
169 uniform_vector_size
[uniforms
] = plist
->Parameters
[p
].Size
;
171 /* Parameters should be either vec4 uniforms or single component
172 * constants; matrices and other larger types should have been broken
175 assert(uniform_vector_size
[uniforms
] <= 4);
178 for (i
= 0; i
< uniform_vector_size
[uniforms
]; i
++) {
179 stage_prog_data
->param
[uniforms
* 4 + i
] = &plist
->ParameterValues
[p
][i
];
182 static const gl_constant_value zero
= { 0.0 };
183 stage_prog_data
->param
[uniforms
* 4 + i
] = &zero
;
192 vec4_visitor::nir_setup_uniform(nir_variable
*var
)
194 int namelen
= strlen(var
->name
);
196 /* The data for our (non-builtin) uniforms is stored in a series of
197 * gl_uniform_driver_storage structs for each subcomponent that
198 * glGetUniformLocation() could name. We know it's been set up in the same
199 * order we'd walk the type, so walk the list of storage and find anything
200 * with our name, or the prefix of a component that starts with our name.
202 for (unsigned u
= 0; u
< shader_prog
->NumUniformStorage
; u
++) {
203 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
205 if (storage
->builtin
)
208 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
209 (storage
->name
[namelen
] != 0 &&
210 storage
->name
[namelen
] != '.' &&
211 storage
->name
[namelen
] != '[')) {
215 gl_constant_value
*components
= storage
->storage
;
216 unsigned vector_count
= (MAX2(storage
->array_elements
, 1) *
217 storage
->type
->matrix_columns
);
219 for (unsigned s
= 0; s
< vector_count
; s
++) {
220 assert(uniforms
< uniform_array_size
);
221 uniform_vector_size
[uniforms
] = storage
->type
->vector_elements
;
224 for (i
= 0; i
< uniform_vector_size
[uniforms
]; i
++) {
225 stage_prog_data
->param
[uniforms
* 4 + i
] = components
;
229 static const gl_constant_value zero
= { 0.0 };
230 stage_prog_data
->param
[uniforms
* 4 + i
] = &zero
;
239 vec4_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
241 const nir_state_slot
*const slots
= var
->state_slots
;
242 assert(var
->state_slots
!= NULL
);
244 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
245 /* This state reference has already been setup by ir_to_mesa,
246 * but we'll get the same index back here. We can reference
247 * ParameterValues directly, since unlike brw_fs.cpp, we never
248 * add new state references during compile.
250 int index
= _mesa_add_state_reference(prog
->Parameters
,
251 (gl_state_index
*)slots
[i
].tokens
);
252 gl_constant_value
*values
=
253 &prog
->Parameters
->ParameterValues
[index
][0];
255 assert(uniforms
< uniform_array_size
);
257 for (unsigned j
= 0; j
< 4; j
++)
258 stage_prog_data
->param
[uniforms
* 4 + j
] =
259 &values
[GET_SWZ(slots
[i
].swizzle
, j
)];
261 uniform_vector_size
[uniforms
] =
262 (var
->type
->is_scalar() || var
->type
->is_vector() ||
263 var
->type
->is_matrix() ? var
->type
->vector_elements
: 4);
270 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
272 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
274 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
275 unsigned array_elems
=
276 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
278 nir_locals
[reg
->index
] = dst_reg(GRF
, alloc
.allocate(array_elems
));
281 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
283 nir_emit_cf_list(&impl
->body
);
287 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
289 exec_list_validate(list
);
290 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
291 switch (node
->type
) {
293 nir_emit_if(nir_cf_node_as_if(node
));
296 case nir_cf_node_loop
:
297 nir_emit_loop(nir_cf_node_as_loop(node
));
300 case nir_cf_node_block
:
301 nir_emit_block(nir_cf_node_as_block(node
));
305 unreachable("Invalid CFG node block");
311 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
313 /* First, put the condition in f0 */
314 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
315 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
316 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
318 emit(IF(BRW_PREDICATE_NORMAL
));
320 nir_emit_cf_list(&if_stmt
->then_list
);
322 /* note: if the else is empty, dead CF elimination will remove it */
323 emit(BRW_OPCODE_ELSE
);
325 nir_emit_cf_list(&if_stmt
->else_list
);
327 emit(BRW_OPCODE_ENDIF
);
331 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
335 nir_emit_cf_list(&loop
->body
);
337 emit(BRW_OPCODE_WHILE
);
341 vec4_visitor::nir_emit_block(nir_block
*block
)
343 nir_foreach_instr(block
, instr
) {
344 nir_emit_instr(instr
);
349 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
353 switch (instr
->type
) {
354 case nir_instr_type_load_const
:
355 nir_emit_load_const(nir_instr_as_load_const(instr
));
358 case nir_instr_type_intrinsic
:
359 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
362 case nir_instr_type_alu
:
363 nir_emit_alu(nir_instr_as_alu(instr
));
366 case nir_instr_type_jump
:
367 nir_emit_jump(nir_instr_as_jump(instr
));
370 case nir_instr_type_tex
:
371 nir_emit_texture(nir_instr_as_tex(instr
));
374 case nir_instr_type_ssa_undef
:
375 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
379 fprintf(stderr
, "VS instruction not yet implemented by NIR->vec4\n");
385 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
386 unsigned base_offset
, nir_src
*indirect
)
390 reg
= v
->nir_locals
[nir_reg
->index
];
391 reg
= offset(reg
, base_offset
);
394 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
402 vec4_visitor::get_nir_dest(nir_dest dest
)
405 dst_reg dst
= dst_reg(GRF
, alloc
.allocate(1));
406 nir_ssa_values
[dest
.ssa
.index
] = dst
;
409 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
415 vec4_visitor::get_nir_dest(nir_dest dest
, enum brw_reg_type type
)
417 return retype(get_nir_dest(dest
), type
);
421 vec4_visitor::get_nir_dest(nir_dest dest
, nir_alu_type type
)
423 return get_nir_dest(dest
, brw_type_for_nir_type(type
));
427 vec4_visitor::get_nir_src(nir_src src
, enum brw_reg_type type
,
428 unsigned num_components
)
433 assert(src
.ssa
!= NULL
);
434 reg
= nir_ssa_values
[src
.ssa
->index
];
437 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
441 reg
= retype(reg
, type
);
443 src_reg reg_as_src
= src_reg(reg
);
444 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
449 vec4_visitor::get_nir_src(nir_src src
, nir_alu_type type
,
450 unsigned num_components
)
452 return get_nir_src(src
, brw_type_for_nir_type(type
), num_components
);
456 vec4_visitor::get_nir_src(nir_src src
, unsigned num_components
)
458 /* if type is not specified, default to signed int */
459 return get_nir_src(src
, nir_type_int
, num_components
);
463 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
465 dst_reg reg
= dst_reg(GRF
, alloc
.allocate(1));
466 reg
.type
= BRW_REGISTER_TYPE_D
;
468 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
470 /* @FIXME: consider emitting vector operations to save some MOVs in
471 * cases where the components are representable in 8 bits.
472 * For now, we emit a MOV for each distinct value.
474 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
475 unsigned writemask
= 1 << i
;
477 if ((remaining
& writemask
) == 0)
480 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
481 if (instr
->value
.u
[i
] == instr
->value
.u
[j
]) {
486 reg
.writemask
= writemask
;
487 emit(MOV(reg
, src_reg(instr
->value
.i
[i
])));
489 remaining
&= ~writemask
;
492 /* Set final writemask */
493 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
495 nir_ssa_values
[instr
->def
.index
] = reg
;
499 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
504 bool has_indirect
= false;
506 switch (instr
->intrinsic
) {
508 case nir_intrinsic_load_input_indirect
:
511 case nir_intrinsic_load_input
: {
512 int offset
= instr
->const_index
[0];
513 src
= nir_inputs
[offset
];
516 dest
.reladdr
= new(mem_ctx
) src_reg(get_nir_src(instr
->src
[0],
520 dest
= get_nir_dest(instr
->dest
, src
.type
);
521 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
523 emit(MOV(dest
, src
));
527 case nir_intrinsic_store_output_indirect
:
530 case nir_intrinsic_store_output
: {
531 int varying
= instr
->const_index
[0];
533 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
534 instr
->num_components
);
538 dest
.reladdr
= new(mem_ctx
) src_reg(get_nir_src(instr
->src
[1],
542 output_reg
[varying
] = dest
;
546 case nir_intrinsic_get_buffer_size
: {
547 nir_const_value
*const_uniform_block
= nir_src_as_const_value(instr
->src
[0]);
548 unsigned ubo_index
= const_uniform_block
? const_uniform_block
->u
[0] : 0;
550 assert(shader
->base
.UniformBlocks
[ubo_index
].IsShaderStorage
);
552 src_reg surf_index
= src_reg(prog_data
->base
.binding_table
.ubo_start
+
554 dst_reg result_dst
= get_nir_dest(instr
->dest
);
555 vec4_instruction
*inst
= new(mem_ctx
)
556 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE
, result_dst
);
559 inst
->mlen
= 1; /* always at least one */
560 inst
->src
[1] = src_reg(surf_index
);
562 /* MRF for the first parameter */
563 src_reg lod
= src_reg(0);
564 int param_base
= inst
->base_mrf
;
565 int writemask
= WRITEMASK_X
;
566 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
572 case nir_intrinsic_store_ssbo_indirect
:
575 case nir_intrinsic_store_ssbo
: {
576 assert(devinfo
->gen
>= 7);
580 nir_const_value
*const_uniform_block
=
581 nir_src_as_const_value(instr
->src
[1]);
582 if (const_uniform_block
) {
583 unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
584 const_uniform_block
->u
[0];
585 surf_index
= src_reg(index
);
586 brw_mark_surface_used(&prog_data
->base
, index
);
588 surf_index
= src_reg(this, glsl_type::uint_type
);
589 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[1], 1),
590 src_reg(prog_data
->base
.binding_table
.ubo_start
)));
591 surf_index
= emit_uniformize(surf_index
);
593 brw_mark_surface_used(&prog_data
->base
,
594 prog_data
->base
.binding_table
.ubo_start
+
595 shader_prog
->NumBufferInterfaceBlocks
- 1);
599 src_reg offset_reg
= src_reg(this, glsl_type::uint_type
);
600 unsigned const_offset_bytes
= 0;
602 emit(MOV(dst_reg(offset_reg
), get_nir_src(instr
->src
[2], 1)));
604 const_offset_bytes
= instr
->const_index
[0];
605 emit(MOV(dst_reg(offset_reg
), src_reg(const_offset_bytes
)));
609 src_reg val_reg
= get_nir_src(instr
->src
[0], 4);
612 unsigned write_mask
= instr
->const_index
[1];
614 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
615 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
616 * typed and untyped messages and across hardware platforms, the
617 * current implementation of the untyped messages will transparently convert
618 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
619 * and enabling only channel X on the SEND instruction.
621 * The above, works well for full vector writes, but not for partial writes
622 * where we want to write some channels and not others, like when we have
623 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
624 * quite restrictive with regards to the channel enables we can configure in
625 * the message descriptor (not all combinations are allowed) we cannot simply
626 * implement these scenarios with a single message while keeping the
627 * aforementioned symmetry in the implementation. For now we de decided that
628 * it is better to keep the symmetry to reduce complexity, so in situations
629 * such as the one described we end up emitting two untyped write messages
630 * (one for xy and another for w).
632 * The code below packs consecutive channels into a single write message,
633 * detects gaps in the vector write and if needed, sends a second message
634 * with the remaining channels. If in the future we decide that we want to
635 * emit a single message at the expense of losing the symmetry in the
636 * implementation we can:
638 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
639 * message payload. In this mode we can write up to 8 offsets and dwords
640 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
641 * and select which of the 8 channels carry data to write by setting the
642 * appropriate writemask in the dst register of the SEND instruction.
643 * It would require to write a new generator opcode specifically for
644 * IvyBridge since we would need to prepare a SIMD8 payload that could
645 * use any channel, not just X.
647 * 2) For Haswell+: Simply send a single write message but set the writemask
648 * on the dst of the SEND instruction to select the channels we want to
649 * write. It would require to modify the current messages to receive
650 * and honor the writemask provided.
652 const vec4_builder bld
= vec4_builder(this).at_end()
653 .annotate(current_annotation
, base_ir
);
655 int swizzle
[4] = { 0, 0, 0, 0};
656 int num_channels
= 0;
657 unsigned skipped_channels
= 0;
658 int num_components
= instr
->num_components
;
659 for (int i
= 0; i
< num_components
; i
++) {
660 /* Check if this channel needs to be written. If so, record the
661 * channel we need to take the data from in the swizzle array
663 int component_mask
= 1 << i
;
664 int write_test
= write_mask
& component_mask
;
666 swizzle
[num_channels
++] = i
;
668 /* If we don't have to write this channel it means we have a gap in the
669 * vector, so write the channels we accumulated until now, if any. Do
670 * the same if this was the last component in the vector.
672 if (!write_test
|| i
== num_components
- 1) {
673 if (num_channels
> 0) {
674 /* We have channels to write, so update the offset we need to
675 * write at to skip the channels we skipped, if any.
677 if (skipped_channels
> 0) {
679 const_offset_bytes
+= 4 * skipped_channels
;
680 offset_reg
= src_reg(const_offset_bytes
);
682 emit(ADD(dst_reg(offset_reg
), offset_reg
,
683 brw_imm_ud(4 * skipped_channels
)));
687 /* Swizzle the data register so we take the data from the channels
688 * we need to write and send the write message. This will write
689 * num_channels consecutive dwords starting at offset.
692 BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
693 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
694 1 /* dims */, num_channels
/* size */,
697 /* If we have to do a second write we will have to update the
698 * offset so that we jump over the channels we have just written
701 skipped_channels
= num_channels
;
703 /* Restart the count for the next write message */
707 /* We did not write the current channel, so increase skipped count */
715 case nir_intrinsic_load_ssbo_indirect
:
718 case nir_intrinsic_load_ssbo
: {
719 assert(devinfo
->gen
>= 7);
721 nir_const_value
*const_uniform_block
=
722 nir_src_as_const_value(instr
->src
[0]);
725 if (const_uniform_block
) {
726 unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
727 const_uniform_block
->u
[0];
728 surf_index
= src_reg(index
);
730 brw_mark_surface_used(&prog_data
->base
, index
);
732 surf_index
= src_reg(this, glsl_type::uint_type
);
733 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], 1),
734 src_reg(prog_data
->base
.binding_table
.ubo_start
)));
735 surf_index
= emit_uniformize(surf_index
);
737 /* Assume this may touch any UBO. It would be nice to provide
738 * a tighter bound, but the array information is already lowered away.
740 brw_mark_surface_used(&prog_data
->base
,
741 prog_data
->base
.binding_table
.ubo_start
+
742 shader_prog
->NumBufferInterfaceBlocks
- 1);
745 src_reg offset_reg
= src_reg(this, glsl_type::uint_type
);
746 unsigned const_offset_bytes
= 0;
748 emit(MOV(dst_reg(offset_reg
), get_nir_src(instr
->src
[1], 1)));
750 const_offset_bytes
= instr
->const_index
[0];
751 emit(MOV(dst_reg(offset_reg
), src_reg(const_offset_bytes
)));
754 /* Read the vector */
755 const vec4_builder bld
= vec4_builder(this).at_end()
756 .annotate(current_annotation
, base_ir
);
758 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
759 1 /* dims */, 4 /* size*/,
761 dst_reg dest
= get_nir_dest(instr
->dest
);
762 read_result
.type
= dest
.type
;
763 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
764 emit(MOV(dest
, read_result
));
769 case nir_intrinsic_ssbo_atomic_add
:
770 nir_emit_ssbo_atomic(BRW_AOP_ADD
, instr
);
772 case nir_intrinsic_ssbo_atomic_min
:
773 if (dest
.type
== BRW_REGISTER_TYPE_D
)
774 nir_emit_ssbo_atomic(BRW_AOP_IMIN
, instr
);
776 nir_emit_ssbo_atomic(BRW_AOP_UMIN
, instr
);
778 case nir_intrinsic_ssbo_atomic_max
:
779 if (dest
.type
== BRW_REGISTER_TYPE_D
)
780 nir_emit_ssbo_atomic(BRW_AOP_IMAX
, instr
);
782 nir_emit_ssbo_atomic(BRW_AOP_UMAX
, instr
);
784 case nir_intrinsic_ssbo_atomic_and
:
785 nir_emit_ssbo_atomic(BRW_AOP_AND
, instr
);
787 case nir_intrinsic_ssbo_atomic_or
:
788 nir_emit_ssbo_atomic(BRW_AOP_OR
, instr
);
790 case nir_intrinsic_ssbo_atomic_xor
:
791 nir_emit_ssbo_atomic(BRW_AOP_XOR
, instr
);
793 case nir_intrinsic_ssbo_atomic_exchange
:
794 nir_emit_ssbo_atomic(BRW_AOP_MOV
, instr
);
796 case nir_intrinsic_ssbo_atomic_comp_swap
:
797 nir_emit_ssbo_atomic(BRW_AOP_CMPWR
, instr
);
800 case nir_intrinsic_load_vertex_id
:
801 unreachable("should be lowered by lower_vertex_id()");
803 case nir_intrinsic_load_vertex_id_zero_base
:
804 case nir_intrinsic_load_base_vertex
:
805 case nir_intrinsic_load_instance_id
: {
806 gl_system_value sv
= nir_system_value_from_intrinsic(instr
->intrinsic
);
807 src_reg val
= src_reg(nir_system_values
[sv
]);
808 assert(val
.file
!= BAD_FILE
);
809 dest
= get_nir_dest(instr
->dest
, val
.type
);
810 emit(MOV(dest
, val
));
814 case nir_intrinsic_load_uniform_indirect
:
817 case nir_intrinsic_load_uniform
: {
818 dest
= get_nir_dest(instr
->dest
);
820 src
= src_reg(dst_reg(UNIFORM
, instr
->const_index
[0]));
821 src
.reg_offset
= instr
->const_index
[1];
824 src_reg tmp
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_D
, 1);
825 src
.reladdr
= new(mem_ctx
) src_reg(tmp
);
828 emit(MOV(dest
, src
));
832 case nir_intrinsic_atomic_counter_read
:
833 case nir_intrinsic_atomic_counter_inc
:
834 case nir_intrinsic_atomic_counter_dec
: {
835 unsigned surf_index
= prog_data
->base
.binding_table
.abo_start
+
836 (unsigned) instr
->const_index
[0];
837 src_reg offset
= get_nir_src(instr
->src
[0], nir_type_int
,
838 instr
->num_components
);
839 dest
= get_nir_dest(instr
->dest
);
841 switch (instr
->intrinsic
) {
842 case nir_intrinsic_atomic_counter_inc
:
843 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
844 src_reg(), src_reg());
846 case nir_intrinsic_atomic_counter_dec
:
847 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
848 src_reg(), src_reg());
850 case nir_intrinsic_atomic_counter_read
:
851 emit_untyped_surface_read(surf_index
, dest
, offset
);
854 unreachable("Unreachable");
857 brw_mark_surface_used(stage_prog_data
, surf_index
);
861 case nir_intrinsic_load_ubo_indirect
:
864 case nir_intrinsic_load_ubo
: {
865 nir_const_value
*const_block_index
= nir_src_as_const_value(instr
->src
[0]);
868 dest
= get_nir_dest(instr
->dest
);
870 if (const_block_index
) {
871 /* The block index is a constant, so just emit the binding table entry
874 surf_index
= src_reg(prog_data
->base
.binding_table
.ubo_start
+
875 const_block_index
->u
[0]);
877 /* The block index is not a constant. Evaluate the index expression
878 * per-channel and add the base UBO index; we have to select a value
879 * from any live channel.
881 surf_index
= src_reg(this, glsl_type::uint_type
);
882 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int
,
883 instr
->num_components
),
884 src_reg(prog_data
->base
.binding_table
.ubo_start
)));
885 surf_index
= emit_uniformize(surf_index
);
887 /* Assume this may touch any UBO. It would be nice to provide
888 * a tighter bound, but the array information is already lowered away.
890 brw_mark_surface_used(&prog_data
->base
,
891 prog_data
->base
.binding_table
.ubo_start
+
892 shader_prog
->NumBufferInterfaceBlocks
- 1);
895 unsigned const_offset
= instr
->const_index
[0];
899 offset
= src_reg(const_offset
/ 16);
901 offset
= src_reg(this, glsl_type::uint_type
);
902 emit(SHR(dst_reg(offset
), get_nir_src(instr
->src
[1], nir_type_int
, 1),
906 src_reg packed_consts
= src_reg(this, glsl_type::vec4_type
);
907 packed_consts
.type
= dest
.type
;
909 emit_pull_constant_load_reg(dst_reg(packed_consts
),
912 NULL
, NULL
/* before_block/inst */);
914 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
915 packed_consts
.swizzle
+= BRW_SWIZZLE4(const_offset
% 16 / 4,
916 const_offset
% 16 / 4,
917 const_offset
% 16 / 4,
918 const_offset
% 16 / 4);
920 emit(MOV(dest
, packed_consts
));
925 unreachable("Unknown intrinsic");
930 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
933 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
934 dest
= get_nir_dest(instr
->dest
);
937 nir_const_value
*const_surface
= nir_src_as_const_value(instr
->src
[0]);
939 unsigned surf_index
= prog_data
->base
.binding_table
.ubo_start
+
941 surface
= src_reg(surf_index
);
942 brw_mark_surface_used(&prog_data
->base
, surf_index
);
944 surface
= src_reg(this, glsl_type::uint_type
);
945 emit(ADD(dst_reg(surface
), get_nir_src(instr
->src
[0]),
946 src_reg(prog_data
->base
.binding_table
.ubo_start
)));
948 /* Assume this may touch any UBO. This is the same we do for other
949 * UBO/SSBO accesses with non-constant surface.
951 brw_mark_surface_used(&prog_data
->base
,
952 prog_data
->base
.binding_table
.ubo_start
+
953 shader_prog
->NumBufferInterfaceBlocks
- 1);
956 src_reg offset
= get_nir_src(instr
->src
[1], 1);
957 src_reg data1
= get_nir_src(instr
->src
[2], 1);
959 if (op
== BRW_AOP_CMPWR
)
960 data2
= get_nir_src(instr
->src
[3], 1);
962 /* Emit the actual atomic operation operation */
963 const vec4_builder bld
=
964 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
966 src_reg atomic_result
=
967 surface_access::emit_untyped_atomic(bld
, surface
, offset
,
969 1 /* dims */, 1 /* rsize */,
972 dest
.type
= atomic_result
.type
;
973 bld
.MOV(dest
, atomic_result
);
977 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
979 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
982 static enum brw_conditional_mod
983 brw_conditional_for_nir_comparison(nir_op op
)
989 return BRW_CONDITIONAL_L
;
994 return BRW_CONDITIONAL_GE
;
998 case nir_op_ball_fequal2
:
999 case nir_op_ball_iequal2
:
1000 case nir_op_ball_fequal3
:
1001 case nir_op_ball_iequal3
:
1002 case nir_op_ball_fequal4
:
1003 case nir_op_ball_iequal4
:
1004 return BRW_CONDITIONAL_Z
;
1008 case nir_op_bany_fnequal2
:
1009 case nir_op_bany_inequal2
:
1010 case nir_op_bany_fnequal3
:
1011 case nir_op_bany_inequal3
:
1012 case nir_op_bany_fnequal4
:
1013 case nir_op_bany_inequal4
:
1014 return BRW_CONDITIONAL_NZ
;
1017 unreachable("not reached: bad operation for comparison");
1022 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
1024 vec4_instruction
*inst
;
1026 dst_reg dst
= get_nir_dest(instr
->dest
.dest
,
1027 nir_op_infos
[instr
->op
].output_type
);
1028 dst
.writemask
= instr
->dest
.write_mask
;
1031 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1032 op
[i
] = get_nir_src(instr
->src
[i
].src
,
1033 nir_op_infos
[instr
->op
].input_types
[i
], 4);
1034 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
1035 op
[i
].abs
= instr
->src
[i
].abs
;
1036 op
[i
].negate
= instr
->src
[i
].negate
;
1039 switch (instr
->op
) {
1042 inst
= emit(MOV(dst
, op
[0]));
1043 inst
->saturate
= instr
->dest
.saturate
;
1049 unreachable("not reached: should be handled by lower_vec_to_movs()");
1053 inst
= emit(MOV(dst
, op
[0]));
1054 inst
->saturate
= instr
->dest
.saturate
;
1059 inst
= emit(MOV(dst
, op
[0]));
1065 inst
= emit(ADD(dst
, op
[0], op
[1]));
1066 inst
->saturate
= instr
->dest
.saturate
;
1070 inst
= emit(MUL(dst
, op
[0], op
[1]));
1071 inst
->saturate
= instr
->dest
.saturate
;
1075 if (devinfo
->gen
< 8) {
1076 nir_const_value
*value0
= nir_src_as_const_value(instr
->src
[0].src
);
1077 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
1079 /* For integer multiplication, the MUL uses the low 16 bits of one of
1080 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1081 * accumulates in the contribution of the upper 16 bits of that
1082 * operand. If we can determine that one of the args is in the low
1083 * 16 bits, though, we can just emit a single MUL.
1085 if (value0
&& value0
->u
[0] < (1 << 16)) {
1086 if (devinfo
->gen
< 7)
1087 emit(MUL(dst
, op
[0], op
[1]));
1089 emit(MUL(dst
, op
[1], op
[0]));
1090 } else if (value1
&& value1
->u
[0] < (1 << 16)) {
1091 if (devinfo
->gen
< 7)
1092 emit(MUL(dst
, op
[1], op
[0]));
1094 emit(MUL(dst
, op
[0], op
[1]));
1096 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1098 emit(MUL(acc
, op
[0], op
[1]));
1099 emit(MACH(dst_null_d(), op
[0], op
[1]));
1100 emit(MOV(dst
, src_reg(acc
)));
1103 emit(MUL(dst
, op
[0], op
[1]));
1108 case nir_op_imul_high
:
1109 case nir_op_umul_high
: {
1110 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1112 emit(MUL(acc
, op
[0], op
[1]));
1113 emit(MACH(dst
, op
[0], op
[1]));
1118 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1119 inst
->saturate
= instr
->dest
.saturate
;
1123 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1124 inst
->saturate
= instr
->dest
.saturate
;
1128 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1129 inst
->saturate
= instr
->dest
.saturate
;
1133 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1134 inst
->saturate
= instr
->dest
.saturate
;
1138 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1139 inst
->saturate
= instr
->dest
.saturate
;
1144 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1148 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1152 unreachable("not reached: should be handled by ldexp_to_arith()");
1155 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1156 inst
->saturate
= instr
->dest
.saturate
;
1160 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1161 inst
->saturate
= instr
->dest
.saturate
;
1165 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1166 inst
->saturate
= instr
->dest
.saturate
;
1169 case nir_op_uadd_carry
: {
1170 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1172 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1173 emit(MOV(dst
, src_reg(acc
)));
1177 case nir_op_usub_borrow
: {
1178 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1180 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1181 emit(MOV(dst
, src_reg(acc
)));
1186 inst
= emit(RNDZ(dst
, op
[0]));
1187 inst
->saturate
= instr
->dest
.saturate
;
1190 case nir_op_fceil
: {
1191 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1193 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1194 instr
->src
[0].src
.ssa
->num_components
:
1195 instr
->src
[0].src
.reg
.reg
->num_components
);
1197 op
[0].negate
= !op
[0].negate
;
1198 emit(RNDD(dst_reg(tmp
), op
[0]));
1200 inst
= emit(MOV(dst
, tmp
));
1201 inst
->saturate
= instr
->dest
.saturate
;
1206 inst
= emit(RNDD(dst
, op
[0]));
1207 inst
->saturate
= instr
->dest
.saturate
;
1211 inst
= emit(FRC(dst
, op
[0]));
1212 inst
->saturate
= instr
->dest
.saturate
;
1215 case nir_op_fround_even
:
1216 inst
= emit(RNDE(dst
, op
[0]));
1217 inst
->saturate
= instr
->dest
.saturate
;
1223 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1224 inst
->saturate
= instr
->dest
.saturate
;
1230 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1231 inst
->saturate
= instr
->dest
.saturate
;
1235 case nir_op_fddx_coarse
:
1236 case nir_op_fddx_fine
:
1238 case nir_op_fddy_coarse
:
1239 case nir_op_fddy_fine
:
1240 unreachable("derivatives are not valid in vertex shaders");
1252 emit(CMP(dst
, op
[0], op
[1],
1253 brw_conditional_for_nir_comparison(instr
->op
)));
1256 case nir_op_ball_fequal2
:
1257 case nir_op_ball_iequal2
:
1258 case nir_op_ball_fequal3
:
1259 case nir_op_ball_iequal3
:
1260 case nir_op_ball_fequal4
:
1261 case nir_op_ball_iequal4
: {
1262 dst_reg tmp
= dst_reg(this, glsl_type::bool_type
);
1264 switch (instr
->op
) {
1265 case nir_op_ball_fequal2
:
1266 case nir_op_ball_iequal2
:
1267 tmp
.writemask
= WRITEMASK_XY
;
1269 case nir_op_ball_fequal3
:
1270 case nir_op_ball_iequal3
:
1271 tmp
.writemask
= WRITEMASK_XYZ
;
1273 case nir_op_ball_fequal4
:
1274 case nir_op_ball_iequal4
:
1275 tmp
.writemask
= WRITEMASK_XYZW
;
1278 unreachable("not reached");
1281 emit(CMP(tmp
, op
[0], op
[1],
1282 brw_conditional_for_nir_comparison(instr
->op
)));
1283 emit(MOV(dst
, src_reg(0)));
1284 inst
= emit(MOV(dst
, src_reg(~0)));
1285 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1289 case nir_op_bany_fnequal2
:
1290 case nir_op_bany_inequal2
:
1291 case nir_op_bany_fnequal3
:
1292 case nir_op_bany_inequal3
:
1293 case nir_op_bany_fnequal4
:
1294 case nir_op_bany_inequal4
: {
1295 dst_reg tmp
= dst_reg(this, glsl_type::bool_type
);
1297 switch (instr
->op
) {
1298 case nir_op_bany_fnequal2
:
1299 case nir_op_bany_inequal2
:
1300 tmp
.writemask
= WRITEMASK_XY
;
1302 case nir_op_bany_fnequal3
:
1303 case nir_op_bany_inequal3
:
1304 tmp
.writemask
= WRITEMASK_XYZ
;
1306 case nir_op_bany_fnequal4
:
1307 case nir_op_bany_inequal4
:
1308 tmp
.writemask
= WRITEMASK_XYZW
;
1311 unreachable("not reached");
1314 emit(CMP(tmp
, op
[0], op
[1],
1315 brw_conditional_for_nir_comparison(instr
->op
)));
1317 emit(MOV(dst
, src_reg(0)));
1318 inst
= emit(MOV(dst
, src_reg(~0)));
1319 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1324 if (devinfo
->gen
>= 8) {
1325 op
[0] = resolve_source_modifiers(op
[0]);
1327 emit(NOT(dst
, op
[0]));
1331 if (devinfo
->gen
>= 8) {
1332 op
[0] = resolve_source_modifiers(op
[0]);
1333 op
[1] = resolve_source_modifiers(op
[1]);
1335 emit(XOR(dst
, op
[0], op
[1]));
1339 if (devinfo
->gen
>= 8) {
1340 op
[0] = resolve_source_modifiers(op
[0]);
1341 op
[1] = resolve_source_modifiers(op
[1]);
1343 emit(OR(dst
, op
[0], op
[1]));
1347 if (devinfo
->gen
>= 8) {
1348 op
[0] = resolve_source_modifiers(op
[0]);
1349 op
[1] = resolve_source_modifiers(op
[1]);
1351 emit(AND(dst
, op
[0], op
[1]));
1355 emit(AND(dst
, op
[0], src_reg(1)));
1359 op
[0].type
= BRW_REGISTER_TYPE_D
;
1360 dst
.type
= BRW_REGISTER_TYPE_D
;
1361 emit(AND(dst
, op
[0], src_reg(0x3f800000u
)));
1362 dst
.type
= BRW_REGISTER_TYPE_F
;
1366 emit(CMP(dst
, op
[0], src_reg(0.0f
), BRW_CONDITIONAL_NZ
));
1370 emit(CMP(dst
, op
[0], src_reg(0), BRW_CONDITIONAL_NZ
));
1373 case nir_op_fnoise1_1
:
1374 case nir_op_fnoise1_2
:
1375 case nir_op_fnoise1_3
:
1376 case nir_op_fnoise1_4
:
1377 case nir_op_fnoise2_1
:
1378 case nir_op_fnoise2_2
:
1379 case nir_op_fnoise2_3
:
1380 case nir_op_fnoise2_4
:
1381 case nir_op_fnoise3_1
:
1382 case nir_op_fnoise3_2
:
1383 case nir_op_fnoise3_3
:
1384 case nir_op_fnoise3_4
:
1385 case nir_op_fnoise4_1
:
1386 case nir_op_fnoise4_2
:
1387 case nir_op_fnoise4_3
:
1388 case nir_op_fnoise4_4
:
1389 unreachable("not reached: should be handled by lower_noise");
1391 case nir_op_unpack_half_2x16_split_x
:
1392 case nir_op_unpack_half_2x16_split_y
:
1393 case nir_op_pack_half_2x16_split
:
1394 unreachable("not reached: should not occur in vertex shader");
1396 case nir_op_unpack_snorm_2x16
:
1397 case nir_op_unpack_unorm_2x16
:
1398 case nir_op_pack_snorm_2x16
:
1399 case nir_op_pack_unorm_2x16
:
1400 unreachable("not reached: should be handled by lower_packing_builtins");
1402 case nir_op_unpack_half_2x16
:
1403 /* As NIR does not guarantee that we have a correct swizzle outside the
1404 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1405 * uses the source operand in an operation with WRITEMASK_Y while our
1406 * source operand has only size 1, it accessed incorrect data producing
1407 * regressions in Piglit. We repeat the swizzle of the first component on the
1408 * rest of components to avoid regressions. In the vec4_visitor IR code path
1409 * this is not needed because the operand has already the correct swizzle.
1411 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1412 emit_unpack_half_2x16(dst
, op
[0]);
1415 case nir_op_pack_half_2x16
:
1416 emit_pack_half_2x16(dst
, op
[0]);
1419 case nir_op_unpack_unorm_4x8
:
1420 emit_unpack_unorm_4x8(dst
, op
[0]);
1423 case nir_op_pack_unorm_4x8
:
1424 emit_pack_unorm_4x8(dst
, op
[0]);
1427 case nir_op_unpack_snorm_4x8
:
1428 emit_unpack_snorm_4x8(dst
, op
[0]);
1431 case nir_op_pack_snorm_4x8
:
1432 emit_pack_snorm_4x8(dst
, op
[0]);
1435 case nir_op_bitfield_reverse
:
1436 emit(BFREV(dst
, op
[0]));
1439 case nir_op_bit_count
:
1440 emit(CBIT(dst
, op
[0]));
1443 case nir_op_ufind_msb
:
1444 case nir_op_ifind_msb
: {
1445 src_reg temp
= src_reg(this, glsl_type::uint_type
);
1447 inst
= emit(FBH(dst_reg(temp
), op
[0]));
1448 inst
->dst
.writemask
= WRITEMASK_XYZW
;
1450 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1451 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1452 * subtract the result from 31 to convert the MSB count into an LSB count.
1455 /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
1456 temp
.swizzle
= BRW_SWIZZLE_NOOP
;
1457 emit(MOV(dst
, temp
));
1459 src_reg src_tmp
= src_reg(dst
);
1460 emit(CMP(dst_null_d(), src_tmp
, src_reg(-1), BRW_CONDITIONAL_NZ
));
1462 src_tmp
.negate
= true;
1463 inst
= emit(ADD(dst
, src_tmp
, src_reg(31)));
1464 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1468 case nir_op_find_lsb
:
1469 emit(FBL(dst
, op
[0]));
1472 case nir_op_ubitfield_extract
:
1473 case nir_op_ibitfield_extract
:
1474 op
[0] = fix_3src_operand(op
[0]);
1475 op
[1] = fix_3src_operand(op
[1]);
1476 op
[2] = fix_3src_operand(op
[2]);
1478 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1482 emit(BFI1(dst
, op
[0], op
[1]));
1486 op
[0] = fix_3src_operand(op
[0]);
1487 op
[1] = fix_3src_operand(op
[1]);
1488 op
[2] = fix_3src_operand(op
[2]);
1490 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1493 case nir_op_bitfield_insert
:
1494 unreachable("not reached: should be handled by "
1495 "lower_instructions::bitfield_insert_to_bfm_bfi");
1498 /* AND(val, 0x80000000) gives the sign bit.
1500 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1503 emit(CMP(dst_null_f(), op
[0], src_reg(0.0f
), BRW_CONDITIONAL_NZ
));
1505 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1506 dst
.type
= BRW_REGISTER_TYPE_UD
;
1507 emit(AND(dst
, op
[0], src_reg(0x80000000u
)));
1509 inst
= emit(OR(dst
, src_reg(dst
), src_reg(0x3f800000u
)));
1510 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1511 dst
.type
= BRW_REGISTER_TYPE_F
;
1513 if (instr
->dest
.saturate
) {
1514 inst
= emit(MOV(dst
, src_reg(dst
)));
1515 inst
->saturate
= true;
1520 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1521 * -> non-negative val generates 0x00000000.
1522 * Predicated OR sets 1 if val is positive.
1524 emit(CMP(dst_null_d(), op
[0], src_reg(0), BRW_CONDITIONAL_G
));
1525 emit(ASR(dst
, op
[0], src_reg(31)));
1526 inst
= emit(OR(dst
, src_reg(dst
), src_reg(1)));
1527 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1531 emit(SHL(dst
, op
[0], op
[1]));
1535 emit(ASR(dst
, op
[0], op
[1]));
1539 emit(SHR(dst
, op
[0], op
[1]));
1543 op
[0] = fix_3src_operand(op
[0]);
1544 op
[1] = fix_3src_operand(op
[1]);
1545 op
[2] = fix_3src_operand(op
[2]);
1547 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1548 inst
->saturate
= instr
->dest
.saturate
;
1552 inst
= emit_lrp(dst
, op
[0], op
[1], op
[2]);
1553 inst
->saturate
= instr
->dest
.saturate
;
1557 emit(CMP(dst_null_d(), op
[0], src_reg(0), BRW_CONDITIONAL_NZ
));
1558 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1559 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1562 case nir_op_fdot_replicated2
:
1563 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1564 inst
->saturate
= instr
->dest
.saturate
;
1567 case nir_op_fdot_replicated3
:
1568 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1569 inst
->saturate
= instr
->dest
.saturate
;
1572 case nir_op_fdot_replicated4
:
1573 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1574 inst
->saturate
= instr
->dest
.saturate
;
1577 case nir_op_fdph_replicated
:
1578 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1579 inst
->saturate
= instr
->dest
.saturate
;
1584 case nir_op_bany4
: {
1585 dst_reg tmp
= dst_reg(this, glsl_type::bool_type
);
1586 tmp
.writemask
= brw_writemask_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1588 emit(CMP(tmp
, op
[0], src_reg(0), BRW_CONDITIONAL_NZ
));
1590 emit(MOV(dst
, src_reg(0)));
1591 inst
= emit(MOV(dst
, src_reg(~0)));
1592 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1601 unreachable("not reached: should be lowered by lower_source mods");
1604 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1607 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1611 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1614 unreachable("Unimplemented ALU operation");
1617 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1618 * to sign extend the low bit to 0/~0
1620 if (devinfo
->gen
<= 5 &&
1621 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1622 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1623 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1624 masked
.writemask
= dst
.writemask
;
1625 emit(AND(masked
, src_reg(dst
), src_reg(1)));
1626 src_reg masked_neg
= src_reg(masked
);
1627 masked_neg
.negate
= true;
1628 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1633 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1635 switch (instr
->type
) {
1636 case nir_jump_break
:
1637 emit(BRW_OPCODE_BREAK
);
1640 case nir_jump_continue
:
1641 emit(BRW_OPCODE_CONTINUE
);
1644 case nir_jump_return
:
1647 unreachable("unknown jump");
1651 enum ir_texture_opcode
1652 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1654 enum ir_texture_opcode op
;
1657 case nir_texop_lod
: op
= ir_lod
; break;
1658 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1659 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1660 case nir_texop_tex
: op
= ir_tex
; break;
1661 case nir_texop_tg4
: op
= ir_tg4
; break;
1662 case nir_texop_txb
: op
= ir_txb
; break;
1663 case nir_texop_txd
: op
= ir_txd
; break;
1664 case nir_texop_txf
: op
= ir_txf
; break;
1665 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1666 case nir_texop_txl
: op
= ir_txl
; break;
1667 case nir_texop_txs
: op
= ir_txs
; break;
1669 unreachable("unknown texture opcode");
1675 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1676 unsigned components
)
1679 case nir_type_float
:
1680 return glsl_type::vec(components
);
1682 return glsl_type::ivec(components
);
1683 case nir_type_unsigned
:
1684 return glsl_type::uvec(components
);
1686 return glsl_type::bvec(components
);
1688 return glsl_type::error_type
;
1691 return glsl_type::error_type
;
1695 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1697 unsigned sampler
= instr
->sampler_index
;
1698 src_reg sampler_reg
= src_reg(sampler
);
1700 const glsl_type
*coord_type
= NULL
;
1701 src_reg shadow_comparitor
;
1702 src_reg offset_value
;
1704 src_reg sample_index
;
1707 const glsl_type
*dest_type
=
1708 glsl_type_for_nir_alu_type(instr
->dest_type
,
1709 nir_tex_instr_dest_size(instr
));
1710 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
1712 /* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
1713 * emitting anything other than setting up the constant result.
1715 if (instr
->op
== nir_texop_tg4
) {
1716 int swiz
= GET_SWZ(key_tex
->swizzles
[sampler
], instr
->component
);
1717 if (swiz
== SWIZZLE_ZERO
|| swiz
== SWIZZLE_ONE
) {
1718 emit(MOV(dest
, src_reg(swiz
== SWIZZLE_ONE
? 1.0f
: 0.0f
)));
1723 /* Load the texture operation sources */
1724 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1725 switch (instr
->src
[i
].src_type
) {
1726 case nir_tex_src_comparitor
:
1727 shadow_comparitor
= get_nir_src(instr
->src
[i
].src
,
1728 BRW_REGISTER_TYPE_F
, 1);
1731 case nir_tex_src_coord
: {
1732 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
1734 switch (instr
->op
) {
1736 case nir_texop_txf_ms
:
1737 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
1739 coord_type
= glsl_type::ivec(src_size
);
1743 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1745 coord_type
= glsl_type::vec(src_size
);
1751 case nir_tex_src_ddx
:
1752 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1753 nir_tex_instr_src_size(instr
, i
));
1756 case nir_tex_src_ddy
:
1757 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1758 nir_tex_instr_src_size(instr
, i
));
1761 case nir_tex_src_lod
:
1762 switch (instr
->op
) {
1765 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1769 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
1774 case nir_tex_src_ms_index
: {
1775 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1776 assert(coord_type
!= NULL
);
1777 if (devinfo
->gen
>= 7 &&
1778 key_tex
->compressed_multisample_layout_mask
& (1 << sampler
)) {
1779 mcs
= emit_mcs_fetch(coord_type
, coordinate
, sampler_reg
);
1783 mcs
= retype(mcs
, BRW_REGISTER_TYPE_UD
);
1787 case nir_tex_src_offset
:
1788 offset_value
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
1791 case nir_tex_src_sampler_offset
: {
1792 /* The highest sampler which may be used by this operation is
1793 * the last element of the array. Mark it here, because the generator
1794 * doesn't have enough information to determine the bound.
1796 uint32_t array_size
= instr
->sampler_array_size
;
1797 uint32_t max_used
= sampler
+ array_size
- 1;
1798 if (instr
->op
== nir_texop_tg4
) {
1799 max_used
+= prog_data
->base
.binding_table
.gather_texture_start
;
1801 max_used
+= prog_data
->base
.binding_table
.texture_start
;
1804 brw_mark_surface_used(&prog_data
->base
, max_used
);
1806 /* Emit code to evaluate the actual indexing expression */
1807 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1808 src_reg
temp(this, glsl_type::uint_type
);
1809 emit(ADD(dst_reg(temp
), src
, src_reg(sampler
)));
1810 sampler_reg
= emit_uniformize(temp
);
1814 case nir_tex_src_projector
:
1815 unreachable("Should be lowered by do_lower_texture_projection");
1817 case nir_tex_src_bias
:
1818 unreachable("LOD bias is not valid for vertex shaders.\n");
1821 unreachable("unknown texture source");
1825 uint32_t constant_offset
= 0;
1826 for (unsigned i
= 0; i
< 3; i
++) {
1827 if (instr
->const_offset
[i
] != 0) {
1828 constant_offset
= brw_texture_offset(instr
->const_offset
, 3);
1833 /* Stuff the channel select bits in the top of the texture offset */
1834 if (instr
->op
== nir_texop_tg4
)
1835 constant_offset
|= gather_channel(instr
->component
, sampler
) << 16;
1837 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
1839 bool is_cube_array
=
1840 instr
->op
== nir_texop_txs
&&
1841 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1844 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
1846 lod
, lod2
, sample_index
,
1847 constant_offset
, offset_value
,
1848 mcs
, is_cube_array
, sampler
, sampler_reg
);
1852 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
1854 nir_ssa_values
[instr
->def
.index
] = dst_reg(GRF
, alloc
.allocate(1));