2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_program.h"
31 using namespace brw::surface_access
;
36 vec4_visitor::emit_nir_code()
38 if (nir
->num_uniforms
> 0)
41 nir_setup_system_values();
43 /* get the main function and emit it */
44 nir_foreach_function(function
, nir
) {
45 assert(strcmp(function
->name
, "main") == 0);
46 assert(function
->impl
);
47 nir_emit_impl(function
->impl
);
52 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr
*instr
)
56 switch (instr
->intrinsic
) {
57 case nir_intrinsic_load_vertex_id
:
58 unreachable("should be lowered by lower_vertex_id().");
60 case nir_intrinsic_load_vertex_id_zero_base
:
61 reg
= &nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
62 if (reg
->file
== BAD_FILE
)
63 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
);
66 case nir_intrinsic_load_base_vertex
:
67 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
68 if (reg
->file
== BAD_FILE
)
69 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX
);
72 case nir_intrinsic_load_instance_id
:
73 reg
= &nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
74 if (reg
->file
== BAD_FILE
)
75 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID
);
78 case nir_intrinsic_load_base_instance
:
79 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_INSTANCE
];
80 if (reg
->file
== BAD_FILE
)
81 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_INSTANCE
);
84 case nir_intrinsic_load_draw_id
:
85 reg
= &nir_system_values
[SYSTEM_VALUE_DRAW_ID
];
86 if (reg
->file
== BAD_FILE
)
87 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_DRAW_ID
);
96 setup_system_values_block(nir_block
*block
, vec4_visitor
*v
)
98 nir_foreach_instr(instr
, block
) {
99 if (instr
->type
!= nir_instr_type_intrinsic
)
102 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
103 v
->nir_setup_system_value_intrinsic(intrin
);
110 vec4_visitor::nir_setup_system_values()
112 nir_system_values
= ralloc_array(mem_ctx
, dst_reg
, SYSTEM_VALUE_MAX
);
113 for (unsigned i
= 0; i
< SYSTEM_VALUE_MAX
; i
++) {
114 nir_system_values
[i
] = dst_reg();
117 nir_foreach_function(function
, nir
) {
118 assert(strcmp(function
->name
, "main") == 0);
119 assert(function
->impl
);
120 nir_foreach_block(block
, function
->impl
) {
121 setup_system_values_block(block
, this);
127 vec4_visitor::nir_setup_uniforms()
129 uniforms
= nir
->num_uniforms
/ 16;
133 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
135 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
136 for (unsigned i
= 0; i
< impl
->reg_alloc
; i
++) {
137 nir_locals
[i
] = dst_reg();
140 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
141 unsigned array_elems
=
142 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
143 const unsigned num_regs
= array_elems
* DIV_ROUND_UP(reg
->bit_size
, 32);
144 nir_locals
[reg
->index
] = dst_reg(VGRF
, alloc
.allocate(num_regs
));
146 if (reg
->bit_size
== 64)
147 nir_locals
[reg
->index
].type
= BRW_REGISTER_TYPE_DF
;
150 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
152 nir_emit_cf_list(&impl
->body
);
156 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
158 exec_list_validate(list
);
159 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
160 switch (node
->type
) {
162 nir_emit_if(nir_cf_node_as_if(node
));
165 case nir_cf_node_loop
:
166 nir_emit_loop(nir_cf_node_as_loop(node
));
169 case nir_cf_node_block
:
170 nir_emit_block(nir_cf_node_as_block(node
));
174 unreachable("Invalid CFG node block");
180 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
182 /* First, put the condition in f0 */
183 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
184 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
185 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
187 /* We can just predicate based on the X channel, as the condition only
188 * goes on its own line */
189 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X
));
191 nir_emit_cf_list(&if_stmt
->then_list
);
193 /* note: if the else is empty, dead CF elimination will remove it */
194 emit(BRW_OPCODE_ELSE
);
196 nir_emit_cf_list(&if_stmt
->else_list
);
198 emit(BRW_OPCODE_ENDIF
);
202 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
206 nir_emit_cf_list(&loop
->body
);
208 emit(BRW_OPCODE_WHILE
);
212 vec4_visitor::nir_emit_block(nir_block
*block
)
214 nir_foreach_instr(instr
, block
) {
215 nir_emit_instr(instr
);
220 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
224 switch (instr
->type
) {
225 case nir_instr_type_load_const
:
226 nir_emit_load_const(nir_instr_as_load_const(instr
));
229 case nir_instr_type_intrinsic
:
230 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
233 case nir_instr_type_alu
:
234 nir_emit_alu(nir_instr_as_alu(instr
));
237 case nir_instr_type_jump
:
238 nir_emit_jump(nir_instr_as_jump(instr
));
241 case nir_instr_type_tex
:
242 nir_emit_texture(nir_instr_as_tex(instr
));
245 case nir_instr_type_ssa_undef
:
246 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
250 fprintf(stderr
, "VS instruction not yet implemented by NIR->vec4\n");
256 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
257 unsigned base_offset
, nir_src
*indirect
)
261 reg
= v
->nir_locals
[nir_reg
->index
];
262 reg
= offset(reg
, 8, base_offset
);
265 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
273 vec4_visitor::get_nir_dest(const nir_dest
&dest
)
277 dst_reg(VGRF
, alloc
.allocate(DIV_ROUND_UP(dest
.ssa
.bit_size
, 32)));
278 nir_ssa_values
[dest
.ssa
.index
] = dst
;
281 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
287 vec4_visitor::get_nir_dest(const nir_dest
&dest
, enum brw_reg_type type
)
289 return retype(get_nir_dest(dest
), type
);
293 vec4_visitor::get_nir_dest(const nir_dest
&dest
, nir_alu_type type
)
295 return get_nir_dest(dest
, brw_type_for_nir_type(type
));
299 vec4_visitor::get_nir_src(const nir_src
&src
, enum brw_reg_type type
,
300 unsigned num_components
)
305 assert(src
.ssa
!= NULL
);
306 reg
= nir_ssa_values
[src
.ssa
->index
];
309 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
313 reg
= retype(reg
, type
);
315 src_reg reg_as_src
= src_reg(reg
);
316 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
321 vec4_visitor::get_nir_src(const nir_src
&src
, nir_alu_type type
,
322 unsigned num_components
)
324 return get_nir_src(src
, brw_type_for_nir_type(type
), num_components
);
328 vec4_visitor::get_nir_src(const nir_src
&src
, unsigned num_components
)
330 /* if type is not specified, default to signed int */
331 return get_nir_src(src
, nir_type_int32
, num_components
);
335 vec4_visitor::get_indirect_offset(nir_intrinsic_instr
*instr
)
337 nir_src
*offset_src
= nir_get_io_offset_src(instr
);
338 nir_const_value
*const_value
= nir_src_as_const_value(*offset_src
);
341 /* The only constant offset we should find is 0. brw_nir.c's
342 * add_const_offset_to_base() will fold other constant offsets
343 * into instr->const_index[0].
345 assert(const_value
->u32
[0] == 0);
349 return get_nir_src(*offset_src
, BRW_REGISTER_TYPE_UD
, 1);
353 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
357 if (instr
->def
.bit_size
== 64) {
358 reg
= dst_reg(VGRF
, alloc
.allocate(2));
359 reg
.type
= BRW_REGISTER_TYPE_DF
;
361 reg
= dst_reg(VGRF
, alloc
.allocate(1));
362 reg
.type
= BRW_REGISTER_TYPE_D
;
365 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
367 /* @FIXME: consider emitting vector operations to save some MOVs in
368 * cases where the components are representable in 8 bits.
369 * For now, we emit a MOV for each distinct value.
371 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
372 unsigned writemask
= 1 << i
;
374 if ((remaining
& writemask
) == 0)
377 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
378 if ((instr
->def
.bit_size
== 32 &&
379 instr
->value
.u32
[i
] == instr
->value
.u32
[j
]) ||
380 (instr
->def
.bit_size
== 64 &&
381 instr
->value
.f64
[i
] == instr
->value
.f64
[j
])) {
386 reg
.writemask
= writemask
;
387 if (instr
->def
.bit_size
== 64) {
388 emit(MOV(reg
, brw_imm_df(instr
->value
.f64
[i
])));
390 emit(MOV(reg
, brw_imm_d(instr
->value
.i32
[i
])));
393 remaining
&= ~writemask
;
396 /* Set final writemask */
397 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
399 nir_ssa_values
[instr
->def
.index
] = reg
;
403 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
408 switch (instr
->intrinsic
) {
410 case nir_intrinsic_load_input
: {
411 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
413 /* We set EmitNoIndirectInput for VS */
414 assert(const_offset
);
416 src
= src_reg(ATTR
, instr
->const_index
[0] + const_offset
->u32
[0],
417 glsl_type::uvec4_type
);
418 /* Swizzle source based on component layout qualifier */
419 src
.swizzle
= BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr
));
421 dest
= get_nir_dest(instr
->dest
, src
.type
);
422 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
424 emit(MOV(dest
, src
));
428 case nir_intrinsic_store_output
: {
429 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
430 assert(const_offset
);
432 int varying
= instr
->const_index
[0] + const_offset
->u32
[0];
434 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
435 instr
->num_components
);
437 unsigned c
= nir_intrinsic_component(instr
);
438 output_reg
[varying
][c
] = dst_reg(src
);
439 output_num_components
[varying
][c
] = instr
->num_components
;
443 case nir_intrinsic_get_buffer_size
: {
444 nir_const_value
*const_uniform_block
= nir_src_as_const_value(instr
->src
[0]);
445 unsigned ssbo_index
= const_uniform_block
? const_uniform_block
->u32
[0] : 0;
447 const unsigned index
=
448 prog_data
->base
.binding_table
.ssbo_start
+ ssbo_index
;
449 dst_reg result_dst
= get_nir_dest(instr
->dest
);
450 vec4_instruction
*inst
= new(mem_ctx
)
451 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE
, result_dst
);
454 inst
->mlen
= 1; /* always at least one */
455 inst
->src
[1] = brw_imm_ud(index
);
457 /* MRF for the first parameter */
458 src_reg lod
= brw_imm_d(0);
459 int param_base
= inst
->base_mrf
;
460 int writemask
= WRITEMASK_X
;
461 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
465 brw_mark_surface_used(&prog_data
->base
, index
);
469 case nir_intrinsic_store_ssbo
: {
470 assert(devinfo
->gen
>= 7);
474 nir_const_value
*const_uniform_block
=
475 nir_src_as_const_value(instr
->src
[1]);
476 if (const_uniform_block
) {
477 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
478 const_uniform_block
->u32
[0];
479 surf_index
= brw_imm_ud(index
);
480 brw_mark_surface_used(&prog_data
->base
, index
);
482 surf_index
= src_reg(this, glsl_type::uint_type
);
483 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[1], 1),
484 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
485 surf_index
= emit_uniformize(surf_index
);
487 brw_mark_surface_used(&prog_data
->base
,
488 prog_data
->base
.binding_table
.ssbo_start
+
489 nir
->info
->num_ssbos
- 1);
494 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[2]);
496 offset_reg
= brw_imm_ud(const_offset
->u32
[0]);
498 offset_reg
= get_nir_src(instr
->src
[2], 1);
502 src_reg val_reg
= get_nir_src(instr
->src
[0], 4);
505 unsigned write_mask
= instr
->const_index
[0];
507 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
508 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
509 * typed and untyped messages and across hardware platforms, the
510 * current implementation of the untyped messages will transparently convert
511 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
512 * and enabling only channel X on the SEND instruction.
514 * The above, works well for full vector writes, but not for partial writes
515 * where we want to write some channels and not others, like when we have
516 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
517 * quite restrictive with regards to the channel enables we can configure in
518 * the message descriptor (not all combinations are allowed) we cannot simply
519 * implement these scenarios with a single message while keeping the
520 * aforementioned symmetry in the implementation. For now we de decided that
521 * it is better to keep the symmetry to reduce complexity, so in situations
522 * such as the one described we end up emitting two untyped write messages
523 * (one for xy and another for w).
525 * The code below packs consecutive channels into a single write message,
526 * detects gaps in the vector write and if needed, sends a second message
527 * with the remaining channels. If in the future we decide that we want to
528 * emit a single message at the expense of losing the symmetry in the
529 * implementation we can:
531 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
532 * message payload. In this mode we can write up to 8 offsets and dwords
533 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
534 * and select which of the 8 channels carry data to write by setting the
535 * appropriate writemask in the dst register of the SEND instruction.
536 * It would require to write a new generator opcode specifically for
537 * IvyBridge since we would need to prepare a SIMD8 payload that could
538 * use any channel, not just X.
540 * 2) For Haswell+: Simply send a single write message but set the writemask
541 * on the dst of the SEND instruction to select the channels we want to
542 * write. It would require to modify the current messages to receive
543 * and honor the writemask provided.
545 const vec4_builder bld
= vec4_builder(this).at_end()
546 .annotate(current_annotation
, base_ir
);
548 int swizzle
[4] = { 0, 0, 0, 0};
549 int num_channels
= 0;
550 unsigned skipped_channels
= 0;
551 int num_components
= instr
->num_components
;
552 for (int i
= 0; i
< num_components
; i
++) {
553 /* Check if this channel needs to be written. If so, record the
554 * channel we need to take the data from in the swizzle array
556 int component_mask
= 1 << i
;
557 int write_test
= write_mask
& component_mask
;
559 swizzle
[num_channels
++] = i
;
561 /* If we don't have to write this channel it means we have a gap in the
562 * vector, so write the channels we accumulated until now, if any. Do
563 * the same if this was the last component in the vector.
565 if (!write_test
|| i
== num_components
- 1) {
566 if (num_channels
> 0) {
567 /* We have channels to write, so update the offset we need to
568 * write at to skip the channels we skipped, if any.
570 if (skipped_channels
> 0) {
571 if (offset_reg
.file
== IMM
) {
572 offset_reg
.ud
+= 4 * skipped_channels
;
574 emit(ADD(dst_reg(offset_reg
), offset_reg
,
575 brw_imm_ud(4 * skipped_channels
)));
579 /* Swizzle the data register so we take the data from the channels
580 * we need to write and send the write message. This will write
581 * num_channels consecutive dwords starting at offset.
584 BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
585 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
586 1 /* dims */, num_channels
/* size */,
589 /* If we have to do a second write we will have to update the
590 * offset so that we jump over the channels we have just written
593 skipped_channels
= num_channels
;
595 /* Restart the count for the next write message */
599 /* We did not write the current channel, so increase skipped count */
607 case nir_intrinsic_load_ssbo
: {
608 assert(devinfo
->gen
>= 7);
610 nir_const_value
*const_uniform_block
=
611 nir_src_as_const_value(instr
->src
[0]);
614 if (const_uniform_block
) {
615 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
616 const_uniform_block
->u32
[0];
617 surf_index
= brw_imm_ud(index
);
619 brw_mark_surface_used(&prog_data
->base
, index
);
621 surf_index
= src_reg(this, glsl_type::uint_type
);
622 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], 1),
623 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
624 surf_index
= emit_uniformize(surf_index
);
626 /* Assume this may touch any UBO. It would be nice to provide
627 * a tighter bound, but the array information is already lowered away.
629 brw_mark_surface_used(&prog_data
->base
,
630 prog_data
->base
.binding_table
.ssbo_start
+
631 nir
->info
->num_ssbos
- 1);
635 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
637 offset_reg
= brw_imm_ud(const_offset
->u32
[0]);
639 offset_reg
= get_nir_src(instr
->src
[1], 1);
642 /* Read the vector */
643 const vec4_builder bld
= vec4_builder(this).at_end()
644 .annotate(current_annotation
, base_ir
);
646 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
647 1 /* dims */, 4 /* size*/,
649 dst_reg dest
= get_nir_dest(instr
->dest
);
650 read_result
.type
= dest
.type
;
651 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
652 emit(MOV(dest
, read_result
));
657 case nir_intrinsic_ssbo_atomic_add
:
658 nir_emit_ssbo_atomic(BRW_AOP_ADD
, instr
);
660 case nir_intrinsic_ssbo_atomic_imin
:
661 nir_emit_ssbo_atomic(BRW_AOP_IMIN
, instr
);
663 case nir_intrinsic_ssbo_atomic_umin
:
664 nir_emit_ssbo_atomic(BRW_AOP_UMIN
, instr
);
666 case nir_intrinsic_ssbo_atomic_imax
:
667 nir_emit_ssbo_atomic(BRW_AOP_IMAX
, instr
);
669 case nir_intrinsic_ssbo_atomic_umax
:
670 nir_emit_ssbo_atomic(BRW_AOP_UMAX
, instr
);
672 case nir_intrinsic_ssbo_atomic_and
:
673 nir_emit_ssbo_atomic(BRW_AOP_AND
, instr
);
675 case nir_intrinsic_ssbo_atomic_or
:
676 nir_emit_ssbo_atomic(BRW_AOP_OR
, instr
);
678 case nir_intrinsic_ssbo_atomic_xor
:
679 nir_emit_ssbo_atomic(BRW_AOP_XOR
, instr
);
681 case nir_intrinsic_ssbo_atomic_exchange
:
682 nir_emit_ssbo_atomic(BRW_AOP_MOV
, instr
);
684 case nir_intrinsic_ssbo_atomic_comp_swap
:
685 nir_emit_ssbo_atomic(BRW_AOP_CMPWR
, instr
);
688 case nir_intrinsic_load_vertex_id
:
689 unreachable("should be lowered by lower_vertex_id()");
691 case nir_intrinsic_load_vertex_id_zero_base
:
692 case nir_intrinsic_load_base_vertex
:
693 case nir_intrinsic_load_instance_id
:
694 case nir_intrinsic_load_base_instance
:
695 case nir_intrinsic_load_draw_id
:
696 case nir_intrinsic_load_invocation_id
: {
697 gl_system_value sv
= nir_system_value_from_intrinsic(instr
->intrinsic
);
698 src_reg val
= src_reg(nir_system_values
[sv
]);
699 assert(val
.file
!= BAD_FILE
);
700 dest
= get_nir_dest(instr
->dest
, val
.type
);
701 emit(MOV(dest
, val
));
705 case nir_intrinsic_load_uniform
: {
706 /* Offsets are in bytes but they should always be multiples of 4 */
707 assert(nir_intrinsic_base(instr
) % 4 == 0);
709 dest
= get_nir_dest(instr
->dest
);
711 src
= src_reg(dst_reg(UNIFORM
, nir_intrinsic_base(instr
) / 16));
712 src
.type
= dest
.type
;
714 /* Uniforms don't actually have to be vec4 aligned. In the case that
715 * it isn't, we have to use a swizzle to shift things around. They
716 * do still have the std140 alignment requirement that vec2's have to
717 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
719 * The swizzle also works in the indirect case as the generator adds
720 * the swizzle to the offset for us.
722 unsigned shift
= (nir_intrinsic_base(instr
) % 16) / 4;
723 assert(shift
+ instr
->num_components
<= 4);
725 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
727 /* Offsets are in bytes but they should always be multiples of 4 */
728 assert(const_offset
->u32
[0] % 4 == 0);
730 unsigned offset
= const_offset
->u32
[0] + shift
* 4;
731 src
.offset
= ROUND_DOWN_TO(offset
, 16);
732 shift
= (offset
% 16) / 4;
733 src
.swizzle
+= BRW_SWIZZLE4(shift
, shift
, shift
, shift
);
735 emit(MOV(dest
, src
));
737 src
.swizzle
+= BRW_SWIZZLE4(shift
, shift
, shift
, shift
);
739 src_reg indirect
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_UD
, 1);
741 /* MOV_INDIRECT is going to stomp the whole thing anyway */
742 dest
.writemask
= WRITEMASK_XYZW
;
744 emit(SHADER_OPCODE_MOV_INDIRECT
, dest
, src
,
745 indirect
, brw_imm_ud(instr
->const_index
[1]));
750 case nir_intrinsic_atomic_counter_read
:
751 case nir_intrinsic_atomic_counter_inc
:
752 case nir_intrinsic_atomic_counter_dec
: {
753 unsigned surf_index
= prog_data
->base
.binding_table
.abo_start
+
754 (unsigned) instr
->const_index
[0];
755 const vec4_builder bld
=
756 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
758 /* Get some metadata from the image intrinsic. */
759 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
761 /* Get the arguments of the atomic intrinsic. */
762 src_reg offset
= get_nir_src(instr
->src
[0], nir_type_int32
,
763 instr
->num_components
);
764 const src_reg surface
= brw_imm_ud(surf_index
);
765 const src_reg src0
= (info
->num_srcs
>= 2
766 ? get_nir_src(instr
->src
[1]) : src_reg());
767 const src_reg src1
= (info
->num_srcs
>= 3
768 ? get_nir_src(instr
->src
[2]) : src_reg());
772 dest
= get_nir_dest(instr
->dest
);
774 if (instr
->intrinsic
== nir_intrinsic_atomic_counter_read
) {
775 tmp
= emit_untyped_read(bld
, surface
, offset
, 1, 1);
777 tmp
= emit_untyped_atomic(bld
, surface
, offset
,
780 get_atomic_counter_op(instr
->intrinsic
));
783 bld
.MOV(retype(dest
, tmp
.type
), tmp
);
784 brw_mark_surface_used(stage_prog_data
, surf_index
);
788 case nir_intrinsic_load_ubo
: {
789 nir_const_value
*const_block_index
= nir_src_as_const_value(instr
->src
[0]);
792 dest
= get_nir_dest(instr
->dest
);
794 if (const_block_index
) {
795 /* The block index is a constant, so just emit the binding table entry
798 const unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
799 const_block_index
->u32
[0];
800 surf_index
= brw_imm_ud(index
);
801 brw_mark_surface_used(&prog_data
->base
, index
);
803 /* The block index is not a constant. Evaluate the index expression
804 * per-channel and add the base UBO index; we have to select a value
805 * from any live channel.
807 surf_index
= src_reg(this, glsl_type::uint_type
);
808 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int32
,
809 instr
->num_components
),
810 brw_imm_ud(prog_data
->base
.binding_table
.ubo_start
)));
811 surf_index
= emit_uniformize(surf_index
);
813 /* Assume this may touch any UBO. It would be nice to provide
814 * a tighter bound, but the array information is already lowered away.
816 brw_mark_surface_used(&prog_data
->base
,
817 prog_data
->base
.binding_table
.ubo_start
+
818 nir
->info
->num_ubos
- 1);
822 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
824 offset
= brw_imm_ud(const_offset
->u32
[0] & ~15);
826 offset
= get_nir_src(instr
->src
[1], nir_type_uint32
, 1);
829 src_reg packed_consts
= src_reg(this, glsl_type::vec4_type
);
830 packed_consts
.type
= dest
.type
;
832 emit_pull_constant_load_reg(dst_reg(packed_consts
),
835 NULL
, NULL
/* before_block/inst */);
837 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
839 packed_consts
.swizzle
+= BRW_SWIZZLE4(const_offset
->u32
[0] % 16 / 4,
840 const_offset
->u32
[0] % 16 / 4,
841 const_offset
->u32
[0] % 16 / 4,
842 const_offset
->u32
[0] % 16 / 4);
845 emit(MOV(dest
, packed_consts
));
849 case nir_intrinsic_memory_barrier
: {
850 const vec4_builder bld
=
851 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
852 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
853 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
)
854 ->size_written
= 2 * REG_SIZE
;
858 case nir_intrinsic_shader_clock
: {
859 /* We cannot do anything if there is an event, so ignore it for now */
860 const src_reg shader_clock
= get_timestamp();
861 const enum brw_reg_type type
= brw_type_for_base_type(glsl_type::uvec2_type
);
863 dest
= get_nir_dest(instr
->dest
, type
);
864 emit(MOV(dest
, shader_clock
));
869 unreachable("Unknown intrinsic");
874 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
877 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
878 dest
= get_nir_dest(instr
->dest
);
881 nir_const_value
*const_surface
= nir_src_as_const_value(instr
->src
[0]);
883 unsigned surf_index
= prog_data
->base
.binding_table
.ssbo_start
+
884 const_surface
->u32
[0];
885 surface
= brw_imm_ud(surf_index
);
886 brw_mark_surface_used(&prog_data
->base
, surf_index
);
888 surface
= src_reg(this, glsl_type::uint_type
);
889 emit(ADD(dst_reg(surface
), get_nir_src(instr
->src
[0]),
890 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
892 /* Assume this may touch any UBO. This is the same we do for other
893 * UBO/SSBO accesses with non-constant surface.
895 brw_mark_surface_used(&prog_data
->base
,
896 prog_data
->base
.binding_table
.ssbo_start
+
897 nir
->info
->num_ssbos
- 1);
900 src_reg offset
= get_nir_src(instr
->src
[1], 1);
901 src_reg data1
= get_nir_src(instr
->src
[2], 1);
903 if (op
== BRW_AOP_CMPWR
)
904 data2
= get_nir_src(instr
->src
[3], 1);
906 /* Emit the actual atomic operation operation */
907 const vec4_builder bld
=
908 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
910 src_reg atomic_result
= emit_untyped_atomic(bld
, surface
, offset
,
912 1 /* dims */, 1 /* rsize */,
915 dest
.type
= atomic_result
.type
;
916 bld
.MOV(dest
, atomic_result
);
920 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
922 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
925 static enum brw_conditional_mod
926 brw_conditional_for_nir_comparison(nir_op op
)
932 return BRW_CONDITIONAL_L
;
937 return BRW_CONDITIONAL_GE
;
941 case nir_op_ball_fequal2
:
942 case nir_op_ball_iequal2
:
943 case nir_op_ball_fequal3
:
944 case nir_op_ball_iequal3
:
945 case nir_op_ball_fequal4
:
946 case nir_op_ball_iequal4
:
947 return BRW_CONDITIONAL_Z
;
951 case nir_op_bany_fnequal2
:
952 case nir_op_bany_inequal2
:
953 case nir_op_bany_fnequal3
:
954 case nir_op_bany_inequal3
:
955 case nir_op_bany_fnequal4
:
956 case nir_op_bany_inequal4
:
957 return BRW_CONDITIONAL_NZ
;
960 unreachable("not reached: bad operation for comparison");
965 vec4_visitor::optimize_predicate(nir_alu_instr
*instr
,
966 enum brw_predicate
*predicate
)
968 if (!instr
->src
[0].src
.is_ssa
||
969 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
972 nir_alu_instr
*cmp_instr
=
973 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
975 switch (cmp_instr
->op
) {
976 case nir_op_bany_fnequal2
:
977 case nir_op_bany_inequal2
:
978 case nir_op_bany_fnequal3
:
979 case nir_op_bany_inequal3
:
980 case nir_op_bany_fnequal4
:
981 case nir_op_bany_inequal4
:
982 *predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
984 case nir_op_ball_fequal2
:
985 case nir_op_ball_iequal2
:
986 case nir_op_ball_fequal3
:
987 case nir_op_ball_iequal3
:
988 case nir_op_ball_fequal4
:
989 case nir_op_ball_iequal4
:
990 *predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
996 unsigned size_swizzle
=
997 brw_swizzle_for_size(nir_op_infos
[cmp_instr
->op
].input_sizes
[0]);
1000 assert(nir_op_infos
[cmp_instr
->op
].num_inputs
== 2);
1001 for (unsigned i
= 0; i
< 2; i
++) {
1002 op
[i
] = get_nir_src(cmp_instr
->src
[i
].src
,
1003 nir_op_infos
[cmp_instr
->op
].input_types
[i
], 4);
1004 unsigned base_swizzle
=
1005 brw_swizzle_for_nir_swizzle(cmp_instr
->src
[i
].swizzle
);
1006 op
[i
].swizzle
= brw_compose_swizzle(size_swizzle
, base_swizzle
);
1007 op
[i
].abs
= cmp_instr
->src
[i
].abs
;
1008 op
[i
].negate
= cmp_instr
->src
[i
].negate
;
1011 emit(CMP(dst_null_d(), op
[0], op
[1],
1012 brw_conditional_for_nir_comparison(cmp_instr
->op
)));
1018 emit_find_msb_using_lzd(const vec4_builder
&bld
,
1023 vec4_instruction
*inst
;
1027 /* LZD of an absolute value source almost always does the right
1028 * thing. There are two problem values:
1030 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
1031 * 0. However, findMSB(int(0x80000000)) == 30.
1033 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
1034 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
1036 * For a value of zero or negative one, -1 will be returned.
1038 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
1039 * findMSB(-(1<<x)) should return x-1.
1041 * For all negative number cases, including 0x80000000 and
1042 * 0xffffffff, the correct value is obtained from LZD if instead of
1043 * negating the (already negative) value the logical-not is used. A
1044 * conditonal logical-not can be achieved in two instructions.
1046 temp
= src_reg(bld
.vgrf(BRW_REGISTER_TYPE_D
));
1048 bld
.ASR(dst_reg(temp
), src
, brw_imm_d(31));
1049 bld
.XOR(dst_reg(temp
), temp
, src
);
1052 bld
.LZD(retype(dst
, BRW_REGISTER_TYPE_UD
),
1053 retype(temp
, BRW_REGISTER_TYPE_UD
));
1055 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
1056 * from the LSB side. Subtract the result from 31 to convert the MSB count
1057 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
1058 * which is exactly what findMSB() is supposed to return.
1060 inst
= bld
.ADD(dst
, retype(src_reg(dst
), BRW_REGISTER_TYPE_D
),
1062 inst
->src
[0].negate
= true;
1066 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
1068 vec4_instruction
*inst
;
1070 nir_alu_type dst_type
= (nir_alu_type
) (nir_op_infos
[instr
->op
].output_type
|
1071 nir_dest_bit_size(instr
->dest
.dest
));
1072 dst_reg dst
= get_nir_dest(instr
->dest
.dest
, dst_type
);
1073 dst
.writemask
= instr
->dest
.write_mask
;
1076 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1077 nir_alu_type src_type
= (nir_alu_type
)
1078 (nir_op_infos
[instr
->op
].input_types
[i
] |
1079 nir_src_bit_size(instr
->src
[i
].src
));
1080 op
[i
] = get_nir_src(instr
->src
[i
].src
, src_type
, 4);
1081 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
1082 op
[i
].abs
= instr
->src
[i
].abs
;
1083 op
[i
].negate
= instr
->src
[i
].negate
;
1086 switch (instr
->op
) {
1089 inst
= emit(MOV(dst
, op
[0]));
1090 inst
->saturate
= instr
->dest
.saturate
;
1096 unreachable("not reached: should be handled by lower_vec_to_movs()");
1100 inst
= emit(MOV(dst
, op
[0]));
1101 inst
->saturate
= instr
->dest
.saturate
;
1106 inst
= emit(MOV(dst
, op
[0]));
1110 dst_reg temp
= dst_reg(this, glsl_type::dvec4_type
);
1111 emit(MOV(temp
, op
[0]));
1113 dst_reg temp2
= dst_reg(this, glsl_type::dvec4_type
);
1114 temp2
= retype(temp2
, BRW_REGISTER_TYPE_F
);
1115 emit(VEC4_OPCODE_DOUBLE_TO_FLOAT
, temp2
, src_reg(temp
))
1116 ->size_written
= 2 * REG_SIZE
;
1118 vec4_instruction
*inst
= emit(MOV(dst
, src_reg(temp2
)));
1119 inst
->saturate
= instr
->dest
.saturate
;
1124 dst_reg tmp_dst
= dst_reg(src_reg(this, glsl_type::dvec4_type
));
1125 src_reg tmp_src
= src_reg(this, glsl_type::vec4_type
);
1126 emit(MOV(dst_reg(tmp_src
), retype(op
[0], BRW_REGISTER_TYPE_F
)));
1127 emit(VEC4_OPCODE_FLOAT_TO_DOUBLE
, tmp_dst
, tmp_src
);
1128 vec4_instruction
*inst
= emit(MOV(dst
, src_reg(tmp_dst
)));
1129 inst
->saturate
= instr
->dest
.saturate
;
1136 inst
= emit(ADD(dst
, op
[0], op
[1]));
1137 inst
->saturate
= instr
->dest
.saturate
;
1141 inst
= emit(MUL(dst
, op
[0], op
[1]));
1142 inst
->saturate
= instr
->dest
.saturate
;
1146 if (devinfo
->gen
< 8) {
1147 nir_const_value
*value0
= nir_src_as_const_value(instr
->src
[0].src
);
1148 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
1150 /* For integer multiplication, the MUL uses the low 16 bits of one of
1151 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1152 * accumulates in the contribution of the upper 16 bits of that
1153 * operand. If we can determine that one of the args is in the low
1154 * 16 bits, though, we can just emit a single MUL.
1156 if (value0
&& value0
->u32
[0] < (1 << 16)) {
1157 if (devinfo
->gen
< 7)
1158 emit(MUL(dst
, op
[0], op
[1]));
1160 emit(MUL(dst
, op
[1], op
[0]));
1161 } else if (value1
&& value1
->u32
[0] < (1 << 16)) {
1162 if (devinfo
->gen
< 7)
1163 emit(MUL(dst
, op
[1], op
[0]));
1165 emit(MUL(dst
, op
[0], op
[1]));
1167 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1169 emit(MUL(acc
, op
[0], op
[1]));
1170 emit(MACH(dst_null_d(), op
[0], op
[1]));
1171 emit(MOV(dst
, src_reg(acc
)));
1174 emit(MUL(dst
, op
[0], op
[1]));
1179 case nir_op_imul_high
:
1180 case nir_op_umul_high
: {
1181 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1183 if (devinfo
->gen
>= 8)
1184 emit(MUL(acc
, op
[0], retype(op
[1], BRW_REGISTER_TYPE_UW
)));
1186 emit(MUL(acc
, op
[0], op
[1]));
1188 emit(MACH(dst
, op
[0], op
[1]));
1193 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1194 inst
->saturate
= instr
->dest
.saturate
;
1198 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1199 inst
->saturate
= instr
->dest
.saturate
;
1203 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1204 inst
->saturate
= instr
->dest
.saturate
;
1208 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1209 inst
->saturate
= instr
->dest
.saturate
;
1213 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1214 inst
->saturate
= instr
->dest
.saturate
;
1219 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1224 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1225 * appears that our hardware just does the right thing for signed
1228 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1232 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1233 inst
= emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1235 /* Math instructions don't support conditional mod */
1236 inst
= emit(MOV(dst_null_d(), src_reg(dst
)));
1237 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1239 /* Now, we need to determine if signs of the sources are different.
1240 * When we XOR the sources, the top bit is 0 if they are the same and 1
1241 * if they are different. We can then use a conditional modifier to
1242 * turn that into a predicate. This leads us to an XOR.l instruction.
1244 * Technically, according to the PRM, you're not allowed to use .l on a
1245 * XOR instruction. However, emperical experiments and Curro's reading
1246 * of the simulator source both indicate that it's safe.
1248 src_reg tmp
= src_reg(this, glsl_type::ivec4_type
);
1249 inst
= emit(XOR(dst_reg(tmp
), op
[0], op
[1]));
1250 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1251 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1253 /* If the result of the initial remainder operation is non-zero and the
1254 * two sources have different signs, add in a copy of op[1] to get the
1255 * final integer modulus value.
1257 inst
= emit(ADD(dst
, src_reg(dst
), op
[1]));
1258 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1263 unreachable("not reached: should be handled by ldexp_to_arith()");
1266 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1267 inst
->saturate
= instr
->dest
.saturate
;
1271 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1272 inst
->saturate
= instr
->dest
.saturate
;
1276 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1277 inst
->saturate
= instr
->dest
.saturate
;
1280 case nir_op_uadd_carry
: {
1281 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1283 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1284 emit(MOV(dst
, src_reg(acc
)));
1288 case nir_op_usub_borrow
: {
1289 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1291 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1292 emit(MOV(dst
, src_reg(acc
)));
1297 inst
= emit(RNDZ(dst
, op
[0]));
1298 inst
->saturate
= instr
->dest
.saturate
;
1301 case nir_op_fceil
: {
1302 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1304 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1305 instr
->src
[0].src
.ssa
->num_components
:
1306 instr
->src
[0].src
.reg
.reg
->num_components
);
1308 op
[0].negate
= !op
[0].negate
;
1309 emit(RNDD(dst_reg(tmp
), op
[0]));
1311 inst
= emit(MOV(dst
, tmp
));
1312 inst
->saturate
= instr
->dest
.saturate
;
1317 inst
= emit(RNDD(dst
, op
[0]));
1318 inst
->saturate
= instr
->dest
.saturate
;
1322 inst
= emit(FRC(dst
, op
[0]));
1323 inst
->saturate
= instr
->dest
.saturate
;
1326 case nir_op_fround_even
:
1327 inst
= emit(RNDE(dst
, op
[0]));
1328 inst
->saturate
= instr
->dest
.saturate
;
1331 case nir_op_fquantize2f16
: {
1332 /* See also vec4_visitor::emit_pack_half_2x16() */
1333 src_reg tmp16
= src_reg(this, glsl_type::uvec4_type
);
1334 src_reg tmp32
= src_reg(this, glsl_type::vec4_type
);
1335 src_reg zero
= src_reg(this, glsl_type::vec4_type
);
1337 /* Check for denormal */
1338 src_reg abs_src0
= op
[0];
1339 abs_src0
.abs
= true;
1340 emit(CMP(dst_null_f(), abs_src0
, brw_imm_f(ldexpf(1.0, -14)),
1341 BRW_CONDITIONAL_L
));
1342 /* Get the appropriately signed zero */
1343 emit(AND(retype(dst_reg(zero
), BRW_REGISTER_TYPE_UD
),
1344 retype(op
[0], BRW_REGISTER_TYPE_UD
),
1345 brw_imm_ud(0x80000000)));
1346 /* Do the actual F32 -> F16 -> F32 conversion */
1347 emit(F32TO16(dst_reg(tmp16
), op
[0]));
1348 emit(F16TO32(dst_reg(tmp32
), tmp16
));
1349 /* Select that or zero based on normal status */
1350 inst
= emit(BRW_OPCODE_SEL
, dst
, zero
, tmp32
);
1351 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1352 inst
->saturate
= instr
->dest
.saturate
;
1359 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1360 inst
->saturate
= instr
->dest
.saturate
;
1366 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1367 inst
->saturate
= instr
->dest
.saturate
;
1371 case nir_op_fddx_coarse
:
1372 case nir_op_fddx_fine
:
1374 case nir_op_fddy_coarse
:
1375 case nir_op_fddy_fine
:
1376 unreachable("derivatives are not valid in vertex shaders");
1388 emit(CMP(dst
, op
[0], op
[1],
1389 brw_conditional_for_nir_comparison(instr
->op
)));
1392 case nir_op_ball_fequal2
:
1393 case nir_op_ball_iequal2
:
1394 case nir_op_ball_fequal3
:
1395 case nir_op_ball_iequal3
:
1396 case nir_op_ball_fequal4
:
1397 case nir_op_ball_iequal4
: {
1399 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1401 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1402 brw_conditional_for_nir_comparison(instr
->op
)));
1403 emit(MOV(dst
, brw_imm_d(0)));
1404 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1405 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1409 case nir_op_bany_fnequal2
:
1410 case nir_op_bany_inequal2
:
1411 case nir_op_bany_fnequal3
:
1412 case nir_op_bany_inequal3
:
1413 case nir_op_bany_fnequal4
:
1414 case nir_op_bany_inequal4
: {
1416 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1418 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1419 brw_conditional_for_nir_comparison(instr
->op
)));
1421 emit(MOV(dst
, brw_imm_d(0)));
1422 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1423 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1428 if (devinfo
->gen
>= 8) {
1429 op
[0] = resolve_source_modifiers(op
[0]);
1431 emit(NOT(dst
, op
[0]));
1435 if (devinfo
->gen
>= 8) {
1436 op
[0] = resolve_source_modifiers(op
[0]);
1437 op
[1] = resolve_source_modifiers(op
[1]);
1439 emit(XOR(dst
, op
[0], op
[1]));
1443 if (devinfo
->gen
>= 8) {
1444 op
[0] = resolve_source_modifiers(op
[0]);
1445 op
[1] = resolve_source_modifiers(op
[1]);
1447 emit(OR(dst
, op
[0], op
[1]));
1451 if (devinfo
->gen
>= 8) {
1452 op
[0] = resolve_source_modifiers(op
[0]);
1453 op
[1] = resolve_source_modifiers(op
[1]);
1455 emit(AND(dst
, op
[0], op
[1]));
1460 emit(MOV(dst
, negate(op
[0])));
1464 emit(CMP(dst
, op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1468 emit(CMP(dst
, op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1471 case nir_op_fnoise1_1
:
1472 case nir_op_fnoise1_2
:
1473 case nir_op_fnoise1_3
:
1474 case nir_op_fnoise1_4
:
1475 case nir_op_fnoise2_1
:
1476 case nir_op_fnoise2_2
:
1477 case nir_op_fnoise2_3
:
1478 case nir_op_fnoise2_4
:
1479 case nir_op_fnoise3_1
:
1480 case nir_op_fnoise3_2
:
1481 case nir_op_fnoise3_3
:
1482 case nir_op_fnoise3_4
:
1483 case nir_op_fnoise4_1
:
1484 case nir_op_fnoise4_2
:
1485 case nir_op_fnoise4_3
:
1486 case nir_op_fnoise4_4
:
1487 unreachable("not reached: should be handled by lower_noise");
1489 case nir_op_unpack_half_2x16_split_x
:
1490 case nir_op_unpack_half_2x16_split_y
:
1491 case nir_op_pack_half_2x16_split
:
1492 unreachable("not reached: should not occur in vertex shader");
1494 case nir_op_unpack_snorm_2x16
:
1495 case nir_op_unpack_unorm_2x16
:
1496 case nir_op_pack_snorm_2x16
:
1497 case nir_op_pack_unorm_2x16
:
1498 unreachable("not reached: should be handled by lower_packing_builtins");
1500 case nir_op_pack_uvec4_to_uint
:
1501 unreachable("not reached");
1503 case nir_op_pack_uvec2_to_uint
: {
1504 dst_reg tmp1
= dst_reg(this, glsl_type::uint_type
);
1505 tmp1
.writemask
= WRITEMASK_X
;
1506 op
[0].swizzle
= BRW_SWIZZLE_YYYY
;
1507 emit(SHL(tmp1
, op
[0], src_reg(brw_imm_ud(16u))));
1509 dst_reg tmp2
= dst_reg(this, glsl_type::uint_type
);
1510 tmp2
.writemask
= WRITEMASK_X
;
1511 op
[0].swizzle
= BRW_SWIZZLE_XXXX
;
1512 emit(AND(tmp2
, op
[0], src_reg(brw_imm_ud(0xffffu
))));
1514 emit(OR(dst
, src_reg(tmp1
), src_reg(tmp2
)));
1518 case nir_op_unpack_half_2x16
:
1519 /* As NIR does not guarantee that we have a correct swizzle outside the
1520 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1521 * uses the source operand in an operation with WRITEMASK_Y while our
1522 * source operand has only size 1, it accessed incorrect data producing
1523 * regressions in Piglit. We repeat the swizzle of the first component on the
1524 * rest of components to avoid regressions. In the vec4_visitor IR code path
1525 * this is not needed because the operand has already the correct swizzle.
1527 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1528 emit_unpack_half_2x16(dst
, op
[0]);
1531 case nir_op_pack_half_2x16
:
1532 emit_pack_half_2x16(dst
, op
[0]);
1535 case nir_op_unpack_unorm_4x8
:
1536 emit_unpack_unorm_4x8(dst
, op
[0]);
1539 case nir_op_pack_unorm_4x8
:
1540 emit_pack_unorm_4x8(dst
, op
[0]);
1543 case nir_op_unpack_snorm_4x8
:
1544 emit_unpack_snorm_4x8(dst
, op
[0]);
1547 case nir_op_pack_snorm_4x8
:
1548 emit_pack_snorm_4x8(dst
, op
[0]);
1551 case nir_op_bitfield_reverse
:
1552 emit(BFREV(dst
, op
[0]));
1555 case nir_op_bit_count
:
1556 emit(CBIT(dst
, op
[0]));
1559 case nir_op_ufind_msb
:
1560 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst
, op
[0], false);
1563 case nir_op_ifind_msb
: {
1564 vec4_builder bld
= vec4_builder(this).at_end();
1567 if (devinfo
->gen
< 7) {
1568 emit_find_msb_using_lzd(bld
, dst
, op
[0], true);
1570 emit(FBH(retype(dst
, BRW_REGISTER_TYPE_UD
), op
[0]));
1572 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1573 * count from the LSB side. If FBH didn't return an error
1574 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1575 * count into an LSB count.
1577 bld
.CMP(dst_null_d(), src
, brw_imm_d(-1), BRW_CONDITIONAL_NZ
);
1579 inst
= bld
.ADD(dst
, src
, brw_imm_d(31));
1580 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1581 inst
->src
[0].negate
= true;
1586 case nir_op_find_lsb
: {
1587 vec4_builder bld
= vec4_builder(this).at_end();
1589 if (devinfo
->gen
< 7) {
1590 dst_reg temp
= bld
.vgrf(BRW_REGISTER_TYPE_D
);
1592 /* (x & -x) generates a value that consists of only the LSB of x.
1593 * For all powers of 2, findMSB(y) == findLSB(y).
1595 src_reg src
= src_reg(retype(op
[0], BRW_REGISTER_TYPE_D
));
1596 src_reg negated_src
= src
;
1598 /* One must be negated, and the other must be non-negated. It
1599 * doesn't matter which is which.
1601 negated_src
.negate
= true;
1604 bld
.AND(temp
, src
, negated_src
);
1605 emit_find_msb_using_lzd(bld
, dst
, src_reg(temp
), false);
1607 bld
.FBL(dst
, op
[0]);
1612 case nir_op_ubitfield_extract
:
1613 case nir_op_ibitfield_extract
:
1614 unreachable("should have been lowered");
1617 op
[0] = fix_3src_operand(op
[0]);
1618 op
[1] = fix_3src_operand(op
[1]);
1619 op
[2] = fix_3src_operand(op
[2]);
1621 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1625 emit(BFI1(dst
, op
[0], op
[1]));
1629 op
[0] = fix_3src_operand(op
[0]);
1630 op
[1] = fix_3src_operand(op
[1]);
1631 op
[2] = fix_3src_operand(op
[2]);
1633 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1636 case nir_op_bitfield_insert
:
1637 unreachable("not reached: should have been lowered");
1640 /* AND(val, 0x80000000) gives the sign bit.
1642 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1645 emit(CMP(dst_null_f(), op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1647 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1648 dst
.type
= BRW_REGISTER_TYPE_UD
;
1649 emit(AND(dst
, op
[0], brw_imm_ud(0x80000000u
)));
1651 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_ud(0x3f800000u
)));
1652 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1653 dst
.type
= BRW_REGISTER_TYPE_F
;
1655 if (instr
->dest
.saturate
) {
1656 inst
= emit(MOV(dst
, src_reg(dst
)));
1657 inst
->saturate
= true;
1662 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1663 * -> non-negative val generates 0x00000000.
1664 * Predicated OR sets 1 if val is positive.
1666 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_G
));
1667 emit(ASR(dst
, op
[0], brw_imm_d(31)));
1668 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_d(1)));
1669 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1673 emit(SHL(dst
, op
[0], op
[1]));
1677 emit(ASR(dst
, op
[0], op
[1]));
1681 emit(SHR(dst
, op
[0], op
[1]));
1685 op
[0] = fix_3src_operand(op
[0]);
1686 op
[1] = fix_3src_operand(op
[1]);
1687 op
[2] = fix_3src_operand(op
[2]);
1689 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1690 inst
->saturate
= instr
->dest
.saturate
;
1694 inst
= emit_lrp(dst
, op
[0], op
[1], op
[2]);
1695 inst
->saturate
= instr
->dest
.saturate
;
1699 enum brw_predicate predicate
;
1700 if (!optimize_predicate(instr
, &predicate
)) {
1701 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1702 switch (dst
.writemask
) {
1704 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1707 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1710 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1713 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1716 predicate
= BRW_PREDICATE_NORMAL
;
1720 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1721 inst
->predicate
= predicate
;
1724 case nir_op_fdot_replicated2
:
1725 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1726 inst
->saturate
= instr
->dest
.saturate
;
1729 case nir_op_fdot_replicated3
:
1730 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1731 inst
->saturate
= instr
->dest
.saturate
;
1734 case nir_op_fdot_replicated4
:
1735 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1736 inst
->saturate
= instr
->dest
.saturate
;
1739 case nir_op_fdph_replicated
:
1740 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1741 inst
->saturate
= instr
->dest
.saturate
;
1749 unreachable("not reached: should be lowered by lower_source mods");
1752 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1755 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1759 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1762 unreachable("Unimplemented ALU operation");
1765 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1766 * to sign extend the low bit to 0/~0
1768 if (devinfo
->gen
<= 5 &&
1769 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1770 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1771 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1772 masked
.writemask
= dst
.writemask
;
1773 emit(AND(masked
, src_reg(dst
), brw_imm_d(1)));
1774 src_reg masked_neg
= src_reg(masked
);
1775 masked_neg
.negate
= true;
1776 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1781 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1783 switch (instr
->type
) {
1784 case nir_jump_break
:
1785 emit(BRW_OPCODE_BREAK
);
1788 case nir_jump_continue
:
1789 emit(BRW_OPCODE_CONTINUE
);
1792 case nir_jump_return
:
1795 unreachable("unknown jump");
1799 enum ir_texture_opcode
1800 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1802 enum ir_texture_opcode op
;
1805 case nir_texop_lod
: op
= ir_lod
; break;
1806 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1807 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1808 case nir_texop_tex
: op
= ir_tex
; break;
1809 case nir_texop_tg4
: op
= ir_tg4
; break;
1810 case nir_texop_txb
: op
= ir_txb
; break;
1811 case nir_texop_txd
: op
= ir_txd
; break;
1812 case nir_texop_txf
: op
= ir_txf
; break;
1813 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1814 case nir_texop_txl
: op
= ir_txl
; break;
1815 case nir_texop_txs
: op
= ir_txs
; break;
1816 case nir_texop_samples_identical
: op
= ir_samples_identical
; break;
1818 unreachable("unknown texture opcode");
1824 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1825 unsigned components
)
1827 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type
),
1832 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1834 unsigned texture
= instr
->texture_index
;
1835 unsigned sampler
= instr
->sampler_index
;
1836 src_reg texture_reg
= brw_imm_ud(texture
);
1837 src_reg sampler_reg
= brw_imm_ud(sampler
);
1839 const glsl_type
*coord_type
= NULL
;
1840 src_reg shadow_comparator
;
1841 src_reg offset_value
;
1843 src_reg sample_index
;
1846 const glsl_type
*dest_type
=
1847 glsl_type_for_nir_alu_type(instr
->dest_type
,
1848 nir_tex_instr_dest_size(instr
));
1849 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
1851 /* The hardware requires a LOD for buffer textures */
1852 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
)
1855 /* Load the texture operation sources */
1856 uint32_t constant_offset
= 0;
1857 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1858 switch (instr
->src
[i
].src_type
) {
1859 case nir_tex_src_comparator
:
1860 shadow_comparator
= get_nir_src(instr
->src
[i
].src
,
1861 BRW_REGISTER_TYPE_F
, 1);
1864 case nir_tex_src_coord
: {
1865 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
1867 switch (instr
->op
) {
1869 case nir_texop_txf_ms
:
1870 case nir_texop_samples_identical
:
1871 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
1873 coord_type
= glsl_type::ivec(src_size
);
1877 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1879 coord_type
= glsl_type::vec(src_size
);
1885 case nir_tex_src_ddx
:
1886 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1887 nir_tex_instr_src_size(instr
, i
));
1890 case nir_tex_src_ddy
:
1891 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1892 nir_tex_instr_src_size(instr
, i
));
1895 case nir_tex_src_lod
:
1896 switch (instr
->op
) {
1899 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1903 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
1908 case nir_tex_src_ms_index
: {
1909 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1913 case nir_tex_src_offset
: {
1914 nir_const_value
*const_offset
=
1915 nir_src_as_const_value(instr
->src
[i
].src
);
1916 if (!const_offset
||
1917 !brw_texture_offset(const_offset
->i32
,
1918 nir_tex_instr_src_size(instr
, i
),
1919 &constant_offset
)) {
1921 get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
1926 case nir_tex_src_texture_offset
: {
1927 /* The highest texture which may be used by this operation is
1928 * the last element of the array. Mark it here, because the generator
1929 * doesn't have enough information to determine the bound.
1931 uint32_t array_size
= instr
->texture_array_size
;
1932 uint32_t max_used
= texture
+ array_size
- 1;
1933 if (instr
->op
== nir_texop_tg4
) {
1934 max_used
+= prog_data
->base
.binding_table
.gather_texture_start
;
1936 max_used
+= prog_data
->base
.binding_table
.texture_start
;
1939 brw_mark_surface_used(&prog_data
->base
, max_used
);
1941 /* Emit code to evaluate the actual indexing expression */
1942 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1943 src_reg
temp(this, glsl_type::uint_type
);
1944 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(texture
)));
1945 texture_reg
= emit_uniformize(temp
);
1949 case nir_tex_src_sampler_offset
: {
1950 /* Emit code to evaluate the actual indexing expression */
1951 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1952 src_reg
temp(this, glsl_type::uint_type
);
1953 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(sampler
)));
1954 sampler_reg
= emit_uniformize(temp
);
1958 case nir_tex_src_projector
:
1959 unreachable("Should be lowered by do_lower_texture_projection");
1961 case nir_tex_src_bias
:
1962 unreachable("LOD bias is not valid for vertex shaders.\n");
1965 unreachable("unknown texture source");
1969 if (instr
->op
== nir_texop_txf_ms
||
1970 instr
->op
== nir_texop_samples_identical
) {
1971 assert(coord_type
!= NULL
);
1972 if (devinfo
->gen
>= 7 &&
1973 key_tex
->compressed_multisample_layout_mask
& (1 << texture
)) {
1974 mcs
= emit_mcs_fetch(coord_type
, coordinate
, texture_reg
);
1976 mcs
= brw_imm_ud(0u);
1980 /* Stuff the channel select bits in the top of the texture offset */
1981 if (instr
->op
== nir_texop_tg4
) {
1982 if (instr
->component
== 1 &&
1983 (key_tex
->gather_channel_quirk_mask
& (1 << texture
))) {
1984 /* gather4 sampler is broken for green channel on RG32F --
1985 * we must ask for blue instead.
1987 constant_offset
|= 2 << 16;
1989 constant_offset
|= instr
->component
<< 16;
1993 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
1995 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
1997 lod
, lod2
, sample_index
,
1998 constant_offset
, offset_value
, mcs
,
1999 texture
, texture_reg
, sampler_reg
);
2003 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
2005 nir_ssa_values
[instr
->def
.index
] = dst_reg(VGRF
, alloc
.allocate(1));