2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_program.h"
31 using namespace brw::surface_access
;
36 vec4_visitor::emit_nir_code()
38 if (nir
->num_uniforms
> 0)
41 nir_setup_system_values();
43 /* get the main function and emit it */
44 nir_foreach_function(nir
, function
) {
45 assert(strcmp(function
->name
, "main") == 0);
46 assert(function
->impl
);
47 nir_emit_impl(function
->impl
);
52 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr
*instr
)
56 switch (instr
->intrinsic
) {
57 case nir_intrinsic_load_vertex_id
:
58 unreachable("should be lowered by lower_vertex_id().");
60 case nir_intrinsic_load_vertex_id_zero_base
:
61 reg
= &nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
62 if (reg
->file
== BAD_FILE
)
63 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
,
67 case nir_intrinsic_load_base_vertex
:
68 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
69 if (reg
->file
== BAD_FILE
)
70 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX
,
74 case nir_intrinsic_load_instance_id
:
75 reg
= &nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
76 if (reg
->file
== BAD_FILE
)
77 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID
,
81 case nir_intrinsic_load_base_instance
:
82 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_INSTANCE
];
83 if (reg
->file
== BAD_FILE
)
84 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_INSTANCE
,
88 case nir_intrinsic_load_draw_id
:
89 reg
= &nir_system_values
[SYSTEM_VALUE_DRAW_ID
];
90 if (reg
->file
== BAD_FILE
)
91 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_DRAW_ID
,
101 setup_system_values_block(nir_block
*block
, void *void_visitor
)
103 vec4_visitor
*v
= (vec4_visitor
*)void_visitor
;
105 nir_foreach_instr(block
, instr
) {
106 if (instr
->type
!= nir_instr_type_intrinsic
)
109 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
110 v
->nir_setup_system_value_intrinsic(intrin
);
117 vec4_visitor::nir_setup_system_values()
119 nir_system_values
= ralloc_array(mem_ctx
, dst_reg
, SYSTEM_VALUE_MAX
);
120 for (unsigned i
= 0; i
< SYSTEM_VALUE_MAX
; i
++) {
121 nir_system_values
[i
] = dst_reg();
124 nir_foreach_function(nir
, function
) {
125 assert(strcmp(function
->name
, "main") == 0);
126 assert(function
->impl
);
127 nir_foreach_block(function
->impl
, setup_system_values_block
, this);
132 vec4_visitor::nir_setup_uniforms()
134 uniforms
= nir
->num_uniforms
/ 16;
136 nir_foreach_variable(var
, &nir
->uniforms
) {
137 /* UBO's and atomics don't take up space in the uniform file */
138 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
141 if (type_size_vec4(var
->type
) > 0)
142 uniform_size
[var
->data
.driver_location
/ 16] = type_size_vec4(var
->type
);
147 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
149 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
150 for (unsigned i
= 0; i
< impl
->reg_alloc
; i
++) {
151 nir_locals
[i
] = dst_reg();
154 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
155 unsigned array_elems
=
156 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
158 nir_locals
[reg
->index
] = dst_reg(VGRF
, alloc
.allocate(array_elems
));
161 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
163 nir_emit_cf_list(&impl
->body
);
167 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
169 exec_list_validate(list
);
170 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
171 switch (node
->type
) {
173 nir_emit_if(nir_cf_node_as_if(node
));
176 case nir_cf_node_loop
:
177 nir_emit_loop(nir_cf_node_as_loop(node
));
180 case nir_cf_node_block
:
181 nir_emit_block(nir_cf_node_as_block(node
));
185 unreachable("Invalid CFG node block");
191 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
193 /* First, put the condition in f0 */
194 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
195 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
196 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
198 /* We can just predicate based on the X channel, as the condition only
199 * goes on its own line */
200 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X
));
202 nir_emit_cf_list(&if_stmt
->then_list
);
204 /* note: if the else is empty, dead CF elimination will remove it */
205 emit(BRW_OPCODE_ELSE
);
207 nir_emit_cf_list(&if_stmt
->else_list
);
209 emit(BRW_OPCODE_ENDIF
);
213 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
217 nir_emit_cf_list(&loop
->body
);
219 emit(BRW_OPCODE_WHILE
);
223 vec4_visitor::nir_emit_block(nir_block
*block
)
225 nir_foreach_instr(block
, instr
) {
226 nir_emit_instr(instr
);
231 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
235 switch (instr
->type
) {
236 case nir_instr_type_load_const
:
237 nir_emit_load_const(nir_instr_as_load_const(instr
));
240 case nir_instr_type_intrinsic
:
241 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
244 case nir_instr_type_alu
:
245 nir_emit_alu(nir_instr_as_alu(instr
));
248 case nir_instr_type_jump
:
249 nir_emit_jump(nir_instr_as_jump(instr
));
252 case nir_instr_type_tex
:
253 nir_emit_texture(nir_instr_as_tex(instr
));
256 case nir_instr_type_ssa_undef
:
257 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
261 fprintf(stderr
, "VS instruction not yet implemented by NIR->vec4\n");
267 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
268 unsigned base_offset
, nir_src
*indirect
)
272 reg
= v
->nir_locals
[nir_reg
->index
];
273 reg
= offset(reg
, base_offset
);
276 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
284 vec4_visitor::get_nir_dest(nir_dest dest
)
287 dst_reg dst
= dst_reg(VGRF
, alloc
.allocate(1));
288 nir_ssa_values
[dest
.ssa
.index
] = dst
;
291 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
297 vec4_visitor::get_nir_dest(nir_dest dest
, enum brw_reg_type type
)
299 return retype(get_nir_dest(dest
), type
);
303 vec4_visitor::get_nir_dest(nir_dest dest
, nir_alu_type type
)
305 return get_nir_dest(dest
, brw_type_for_nir_type(type
));
309 vec4_visitor::get_nir_src(nir_src src
, enum brw_reg_type type
,
310 unsigned num_components
)
315 assert(src
.ssa
!= NULL
);
316 reg
= nir_ssa_values
[src
.ssa
->index
];
319 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
323 reg
= retype(reg
, type
);
325 src_reg reg_as_src
= src_reg(reg
);
326 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
331 vec4_visitor::get_nir_src(nir_src src
, nir_alu_type type
,
332 unsigned num_components
)
334 return get_nir_src(src
, brw_type_for_nir_type(type
), num_components
);
338 vec4_visitor::get_nir_src(nir_src src
, unsigned num_components
)
340 /* if type is not specified, default to signed int */
341 return get_nir_src(src
, nir_type_int
, num_components
);
345 vec4_visitor::get_indirect_offset(nir_intrinsic_instr
*instr
)
347 nir_src
*offset_src
= nir_get_io_offset_src(instr
);
348 nir_const_value
*const_value
= nir_src_as_const_value(*offset_src
);
351 /* The only constant offset we should find is 0. brw_nir.c's
352 * add_const_offset_to_base() will fold other constant offsets
353 * into instr->const_index[0].
355 assert(const_value
->u32
[0] == 0);
359 return get_nir_src(*offset_src
, BRW_REGISTER_TYPE_UD
, 1);
363 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
365 dst_reg reg
= dst_reg(VGRF
, alloc
.allocate(1));
366 reg
.type
= BRW_REGISTER_TYPE_D
;
368 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
370 /* @FIXME: consider emitting vector operations to save some MOVs in
371 * cases where the components are representable in 8 bits.
372 * For now, we emit a MOV for each distinct value.
374 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
375 unsigned writemask
= 1 << i
;
377 if ((remaining
& writemask
) == 0)
380 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
381 if (instr
->value
.u32
[i
] == instr
->value
.u32
[j
]) {
386 reg
.writemask
= writemask
;
387 emit(MOV(reg
, brw_imm_d(instr
->value
.i32
[i
])));
389 remaining
&= ~writemask
;
392 /* Set final writemask */
393 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
395 nir_ssa_values
[instr
->def
.index
] = reg
;
399 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
404 switch (instr
->intrinsic
) {
406 case nir_intrinsic_load_input
: {
407 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
409 /* We set EmitNoIndirectInput for VS */
410 assert(const_offset
);
412 src
= src_reg(ATTR
, instr
->const_index
[0] + const_offset
->u32
[0],
413 glsl_type::uvec4_type
);
415 dest
= get_nir_dest(instr
->dest
, src
.type
);
416 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
418 emit(MOV(dest
, src
));
422 case nir_intrinsic_store_output
: {
423 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
424 assert(const_offset
);
426 int varying
= instr
->const_index
[0] + const_offset
->u32
[0];
428 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
429 instr
->num_components
);
431 output_reg
[varying
] = dst_reg(src
);
435 case nir_intrinsic_get_buffer_size
: {
436 nir_const_value
*const_uniform_block
= nir_src_as_const_value(instr
->src
[0]);
437 unsigned ssbo_index
= const_uniform_block
? const_uniform_block
->u32
[0] : 0;
439 const unsigned index
=
440 prog_data
->base
.binding_table
.ssbo_start
+ ssbo_index
;
441 dst_reg result_dst
= get_nir_dest(instr
->dest
);
442 vec4_instruction
*inst
= new(mem_ctx
)
443 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE
, result_dst
);
446 inst
->mlen
= 1; /* always at least one */
447 inst
->src
[1] = brw_imm_ud(index
);
449 /* MRF for the first parameter */
450 src_reg lod
= brw_imm_d(0);
451 int param_base
= inst
->base_mrf
;
452 int writemask
= WRITEMASK_X
;
453 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
457 brw_mark_surface_used(&prog_data
->base
, index
);
461 case nir_intrinsic_store_ssbo
: {
462 assert(devinfo
->gen
>= 7);
466 nir_const_value
*const_uniform_block
=
467 nir_src_as_const_value(instr
->src
[1]);
468 if (const_uniform_block
) {
469 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
470 const_uniform_block
->u32
[0];
471 surf_index
= brw_imm_ud(index
);
472 brw_mark_surface_used(&prog_data
->base
, index
);
474 surf_index
= src_reg(this, glsl_type::uint_type
);
475 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[1], 1),
476 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
477 surf_index
= emit_uniformize(surf_index
);
479 brw_mark_surface_used(&prog_data
->base
,
480 prog_data
->base
.binding_table
.ssbo_start
+
481 nir
->info
.num_ssbos
- 1);
486 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[2]);
488 offset_reg
= brw_imm_ud(const_offset
->u32
[0]);
490 offset_reg
= get_nir_src(instr
->src
[2], 1);
494 src_reg val_reg
= get_nir_src(instr
->src
[0], 4);
497 unsigned write_mask
= instr
->const_index
[0];
499 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
500 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
501 * typed and untyped messages and across hardware platforms, the
502 * current implementation of the untyped messages will transparently convert
503 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
504 * and enabling only channel X on the SEND instruction.
506 * The above, works well for full vector writes, but not for partial writes
507 * where we want to write some channels and not others, like when we have
508 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
509 * quite restrictive with regards to the channel enables we can configure in
510 * the message descriptor (not all combinations are allowed) we cannot simply
511 * implement these scenarios with a single message while keeping the
512 * aforementioned symmetry in the implementation. For now we de decided that
513 * it is better to keep the symmetry to reduce complexity, so in situations
514 * such as the one described we end up emitting two untyped write messages
515 * (one for xy and another for w).
517 * The code below packs consecutive channels into a single write message,
518 * detects gaps in the vector write and if needed, sends a second message
519 * with the remaining channels. If in the future we decide that we want to
520 * emit a single message at the expense of losing the symmetry in the
521 * implementation we can:
523 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
524 * message payload. In this mode we can write up to 8 offsets and dwords
525 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
526 * and select which of the 8 channels carry data to write by setting the
527 * appropriate writemask in the dst register of the SEND instruction.
528 * It would require to write a new generator opcode specifically for
529 * IvyBridge since we would need to prepare a SIMD8 payload that could
530 * use any channel, not just X.
532 * 2) For Haswell+: Simply send a single write message but set the writemask
533 * on the dst of the SEND instruction to select the channels we want to
534 * write. It would require to modify the current messages to receive
535 * and honor the writemask provided.
537 const vec4_builder bld
= vec4_builder(this).at_end()
538 .annotate(current_annotation
, base_ir
);
540 int swizzle
[4] = { 0, 0, 0, 0};
541 int num_channels
= 0;
542 unsigned skipped_channels
= 0;
543 int num_components
= instr
->num_components
;
544 for (int i
= 0; i
< num_components
; i
++) {
545 /* Check if this channel needs to be written. If so, record the
546 * channel we need to take the data from in the swizzle array
548 int component_mask
= 1 << i
;
549 int write_test
= write_mask
& component_mask
;
551 swizzle
[num_channels
++] = i
;
553 /* If we don't have to write this channel it means we have a gap in the
554 * vector, so write the channels we accumulated until now, if any. Do
555 * the same if this was the last component in the vector.
557 if (!write_test
|| i
== num_components
- 1) {
558 if (num_channels
> 0) {
559 /* We have channels to write, so update the offset we need to
560 * write at to skip the channels we skipped, if any.
562 if (skipped_channels
> 0) {
563 if (offset_reg
.file
== IMM
) {
564 offset_reg
.ud
+= 4 * skipped_channels
;
566 emit(ADD(dst_reg(offset_reg
), offset_reg
,
567 brw_imm_ud(4 * skipped_channels
)));
571 /* Swizzle the data register so we take the data from the channels
572 * we need to write and send the write message. This will write
573 * num_channels consecutive dwords starting at offset.
576 BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
577 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
578 1 /* dims */, num_channels
/* size */,
581 /* If we have to do a second write we will have to update the
582 * offset so that we jump over the channels we have just written
585 skipped_channels
= num_channels
;
587 /* Restart the count for the next write message */
591 /* We did not write the current channel, so increase skipped count */
599 case nir_intrinsic_load_ssbo
: {
600 assert(devinfo
->gen
>= 7);
602 nir_const_value
*const_uniform_block
=
603 nir_src_as_const_value(instr
->src
[0]);
606 if (const_uniform_block
) {
607 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
608 const_uniform_block
->u32
[0];
609 surf_index
= brw_imm_ud(index
);
611 brw_mark_surface_used(&prog_data
->base
, index
);
613 surf_index
= src_reg(this, glsl_type::uint_type
);
614 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], 1),
615 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
616 surf_index
= emit_uniformize(surf_index
);
618 /* Assume this may touch any UBO. It would be nice to provide
619 * a tighter bound, but the array information is already lowered away.
621 brw_mark_surface_used(&prog_data
->base
,
622 prog_data
->base
.binding_table
.ssbo_start
+
623 nir
->info
.num_ssbos
- 1);
627 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
629 offset_reg
= brw_imm_ud(const_offset
->u32
[0]);
631 offset_reg
= get_nir_src(instr
->src
[1], 1);
634 /* Read the vector */
635 const vec4_builder bld
= vec4_builder(this).at_end()
636 .annotate(current_annotation
, base_ir
);
638 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
639 1 /* dims */, 4 /* size*/,
641 dst_reg dest
= get_nir_dest(instr
->dest
);
642 read_result
.type
= dest
.type
;
643 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
644 emit(MOV(dest
, read_result
));
649 case nir_intrinsic_ssbo_atomic_add
:
650 nir_emit_ssbo_atomic(BRW_AOP_ADD
, instr
);
652 case nir_intrinsic_ssbo_atomic_imin
:
653 nir_emit_ssbo_atomic(BRW_AOP_IMIN
, instr
);
655 case nir_intrinsic_ssbo_atomic_umin
:
656 nir_emit_ssbo_atomic(BRW_AOP_UMIN
, instr
);
658 case nir_intrinsic_ssbo_atomic_imax
:
659 nir_emit_ssbo_atomic(BRW_AOP_IMAX
, instr
);
661 case nir_intrinsic_ssbo_atomic_umax
:
662 nir_emit_ssbo_atomic(BRW_AOP_UMAX
, instr
);
664 case nir_intrinsic_ssbo_atomic_and
:
665 nir_emit_ssbo_atomic(BRW_AOP_AND
, instr
);
667 case nir_intrinsic_ssbo_atomic_or
:
668 nir_emit_ssbo_atomic(BRW_AOP_OR
, instr
);
670 case nir_intrinsic_ssbo_atomic_xor
:
671 nir_emit_ssbo_atomic(BRW_AOP_XOR
, instr
);
673 case nir_intrinsic_ssbo_atomic_exchange
:
674 nir_emit_ssbo_atomic(BRW_AOP_MOV
, instr
);
676 case nir_intrinsic_ssbo_atomic_comp_swap
:
677 nir_emit_ssbo_atomic(BRW_AOP_CMPWR
, instr
);
680 case nir_intrinsic_load_vertex_id
:
681 unreachable("should be lowered by lower_vertex_id()");
683 case nir_intrinsic_load_vertex_id_zero_base
:
684 case nir_intrinsic_load_base_vertex
:
685 case nir_intrinsic_load_instance_id
:
686 case nir_intrinsic_load_base_instance
:
687 case nir_intrinsic_load_draw_id
:
688 case nir_intrinsic_load_invocation_id
: {
689 gl_system_value sv
= nir_system_value_from_intrinsic(instr
->intrinsic
);
690 src_reg val
= src_reg(nir_system_values
[sv
]);
691 assert(val
.file
!= BAD_FILE
);
692 dest
= get_nir_dest(instr
->dest
, val
.type
);
693 emit(MOV(dest
, val
));
697 case nir_intrinsic_load_uniform
: {
698 /* Offsets are in bytes but they should always be multiples of 16 */
699 assert(instr
->const_index
[0] % 16 == 0);
701 dest
= get_nir_dest(instr
->dest
);
703 src
= src_reg(dst_reg(UNIFORM
, instr
->const_index
[0] / 16));
704 src
.type
= dest
.type
;
706 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
708 /* Offsets are in bytes but they should always be multiples of 16 */
709 assert(const_offset
->u32
[0] % 16 == 0);
710 src
.reg_offset
= const_offset
->u32
[0] / 16;
712 src_reg tmp
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_D
, 1);
713 src
.reladdr
= new(mem_ctx
) src_reg(tmp
);
716 emit(MOV(dest
, src
));
720 case nir_intrinsic_atomic_counter_read
:
721 case nir_intrinsic_atomic_counter_inc
:
722 case nir_intrinsic_atomic_counter_dec
: {
723 unsigned surf_index
= prog_data
->base
.binding_table
.abo_start
+
724 (unsigned) instr
->const_index
[0];
725 src_reg offset
= get_nir_src(instr
->src
[0], nir_type_int
,
726 instr
->num_components
);
727 const src_reg surface
= brw_imm_ud(surf_index
);
728 const vec4_builder bld
=
729 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
732 dest
= get_nir_dest(instr
->dest
);
734 switch (instr
->intrinsic
) {
735 case nir_intrinsic_atomic_counter_inc
:
736 tmp
= emit_untyped_atomic(bld
, surface
, offset
,
737 src_reg(), src_reg(),
741 case nir_intrinsic_atomic_counter_dec
:
742 tmp
= emit_untyped_atomic(bld
, surface
, offset
,
743 src_reg(), src_reg(),
747 case nir_intrinsic_atomic_counter_read
:
748 tmp
= emit_untyped_read(bld
, surface
, offset
, 1, 1);
751 unreachable("Unreachable");
754 bld
.MOV(retype(dest
, tmp
.type
), tmp
);
755 brw_mark_surface_used(stage_prog_data
, surf_index
);
759 case nir_intrinsic_load_ubo
: {
760 nir_const_value
*const_block_index
= nir_src_as_const_value(instr
->src
[0]);
763 dest
= get_nir_dest(instr
->dest
);
765 if (const_block_index
) {
766 /* The block index is a constant, so just emit the binding table entry
769 const unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
770 const_block_index
->u32
[0];
771 surf_index
= brw_imm_ud(index
);
772 brw_mark_surface_used(&prog_data
->base
, index
);
774 /* The block index is not a constant. Evaluate the index expression
775 * per-channel and add the base UBO index; we have to select a value
776 * from any live channel.
778 surf_index
= src_reg(this, glsl_type::uint_type
);
779 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int
,
780 instr
->num_components
),
781 brw_imm_ud(prog_data
->base
.binding_table
.ubo_start
)));
782 surf_index
= emit_uniformize(surf_index
);
784 /* Assume this may touch any UBO. It would be nice to provide
785 * a tighter bound, but the array information is already lowered away.
787 brw_mark_surface_used(&prog_data
->base
,
788 prog_data
->base
.binding_table
.ubo_start
+
789 nir
->info
.num_ubos
- 1);
793 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[1]);
795 offset
= brw_imm_ud(const_offset
->u32
[0] & ~15);
797 offset
= get_nir_src(instr
->src
[1], nir_type_int
, 1);
800 src_reg packed_consts
= src_reg(this, glsl_type::vec4_type
);
801 packed_consts
.type
= dest
.type
;
803 emit_pull_constant_load_reg(dst_reg(packed_consts
),
806 NULL
, NULL
/* before_block/inst */);
808 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
810 packed_consts
.swizzle
+= BRW_SWIZZLE4(const_offset
->u32
[0] % 16 / 4,
811 const_offset
->u32
[0] % 16 / 4,
812 const_offset
->u32
[0] % 16 / 4,
813 const_offset
->u32
[0] % 16 / 4);
816 emit(MOV(dest
, packed_consts
));
820 case nir_intrinsic_memory_barrier
: {
821 const vec4_builder bld
=
822 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
823 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
824 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
)
829 case nir_intrinsic_shader_clock
: {
830 /* We cannot do anything if there is an event, so ignore it for now */
831 const src_reg shader_clock
= get_timestamp();
832 const enum brw_reg_type type
= brw_type_for_base_type(glsl_type::uvec2_type
);
834 dest
= get_nir_dest(instr
->dest
, type
);
835 emit(MOV(dest
, shader_clock
));
840 unreachable("Unknown intrinsic");
845 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
848 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
849 dest
= get_nir_dest(instr
->dest
);
852 nir_const_value
*const_surface
= nir_src_as_const_value(instr
->src
[0]);
854 unsigned surf_index
= prog_data
->base
.binding_table
.ssbo_start
+
855 const_surface
->u32
[0];
856 surface
= brw_imm_ud(surf_index
);
857 brw_mark_surface_used(&prog_data
->base
, surf_index
);
859 surface
= src_reg(this, glsl_type::uint_type
);
860 emit(ADD(dst_reg(surface
), get_nir_src(instr
->src
[0]),
861 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
863 /* Assume this may touch any UBO. This is the same we do for other
864 * UBO/SSBO accesses with non-constant surface.
866 brw_mark_surface_used(&prog_data
->base
,
867 prog_data
->base
.binding_table
.ssbo_start
+
868 nir
->info
.num_ssbos
- 1);
871 src_reg offset
= get_nir_src(instr
->src
[1], 1);
872 src_reg data1
= get_nir_src(instr
->src
[2], 1);
874 if (op
== BRW_AOP_CMPWR
)
875 data2
= get_nir_src(instr
->src
[3], 1);
877 /* Emit the actual atomic operation operation */
878 const vec4_builder bld
=
879 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
881 src_reg atomic_result
= emit_untyped_atomic(bld
, surface
, offset
,
883 1 /* dims */, 1 /* rsize */,
886 dest
.type
= atomic_result
.type
;
887 bld
.MOV(dest
, atomic_result
);
891 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
893 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
896 static enum brw_conditional_mod
897 brw_conditional_for_nir_comparison(nir_op op
)
903 return BRW_CONDITIONAL_L
;
908 return BRW_CONDITIONAL_GE
;
912 case nir_op_ball_fequal2
:
913 case nir_op_ball_iequal2
:
914 case nir_op_ball_fequal3
:
915 case nir_op_ball_iequal3
:
916 case nir_op_ball_fequal4
:
917 case nir_op_ball_iequal4
:
918 return BRW_CONDITIONAL_Z
;
922 case nir_op_bany_fnequal2
:
923 case nir_op_bany_inequal2
:
924 case nir_op_bany_fnequal3
:
925 case nir_op_bany_inequal3
:
926 case nir_op_bany_fnequal4
:
927 case nir_op_bany_inequal4
:
928 return BRW_CONDITIONAL_NZ
;
931 unreachable("not reached: bad operation for comparison");
936 vec4_visitor::optimize_predicate(nir_alu_instr
*instr
,
937 enum brw_predicate
*predicate
)
939 if (!instr
->src
[0].src
.is_ssa
||
940 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
943 nir_alu_instr
*cmp_instr
=
944 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
946 switch (cmp_instr
->op
) {
947 case nir_op_bany_fnequal2
:
948 case nir_op_bany_inequal2
:
949 case nir_op_bany_fnequal3
:
950 case nir_op_bany_inequal3
:
951 case nir_op_bany_fnequal4
:
952 case nir_op_bany_inequal4
:
953 *predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
955 case nir_op_ball_fequal2
:
956 case nir_op_ball_iequal2
:
957 case nir_op_ball_fequal3
:
958 case nir_op_ball_iequal3
:
959 case nir_op_ball_fequal4
:
960 case nir_op_ball_iequal4
:
961 *predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
967 unsigned size_swizzle
=
968 brw_swizzle_for_size(nir_op_infos
[cmp_instr
->op
].input_sizes
[0]);
971 assert(nir_op_infos
[cmp_instr
->op
].num_inputs
== 2);
972 for (unsigned i
= 0; i
< 2; i
++) {
973 op
[i
] = get_nir_src(cmp_instr
->src
[i
].src
,
974 nir_op_infos
[cmp_instr
->op
].input_types
[i
], 4);
975 unsigned base_swizzle
=
976 brw_swizzle_for_nir_swizzle(cmp_instr
->src
[i
].swizzle
);
977 op
[i
].swizzle
= brw_compose_swizzle(size_swizzle
, base_swizzle
);
978 op
[i
].abs
= cmp_instr
->src
[i
].abs
;
979 op
[i
].negate
= cmp_instr
->src
[i
].negate
;
982 emit(CMP(dst_null_d(), op
[0], op
[1],
983 brw_conditional_for_nir_comparison(cmp_instr
->op
)));
989 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
991 vec4_instruction
*inst
;
993 dst_reg dst
= get_nir_dest(instr
->dest
.dest
,
994 nir_op_infos
[instr
->op
].output_type
);
995 dst
.writemask
= instr
->dest
.write_mask
;
998 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
999 op
[i
] = get_nir_src(instr
->src
[i
].src
,
1000 nir_op_infos
[instr
->op
].input_types
[i
], 4);
1001 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
1002 op
[i
].abs
= instr
->src
[i
].abs
;
1003 op
[i
].negate
= instr
->src
[i
].negate
;
1006 switch (instr
->op
) {
1009 inst
= emit(MOV(dst
, op
[0]));
1010 inst
->saturate
= instr
->dest
.saturate
;
1016 unreachable("not reached: should be handled by lower_vec_to_movs()");
1020 inst
= emit(MOV(dst
, op
[0]));
1021 inst
->saturate
= instr
->dest
.saturate
;
1026 inst
= emit(MOV(dst
, op
[0]));
1032 inst
= emit(ADD(dst
, op
[0], op
[1]));
1033 inst
->saturate
= instr
->dest
.saturate
;
1037 inst
= emit(MUL(dst
, op
[0], op
[1]));
1038 inst
->saturate
= instr
->dest
.saturate
;
1042 if (devinfo
->gen
< 8) {
1043 nir_const_value
*value0
= nir_src_as_const_value(instr
->src
[0].src
);
1044 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
1046 /* For integer multiplication, the MUL uses the low 16 bits of one of
1047 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1048 * accumulates in the contribution of the upper 16 bits of that
1049 * operand. If we can determine that one of the args is in the low
1050 * 16 bits, though, we can just emit a single MUL.
1052 if (value0
&& value0
->u32
[0] < (1 << 16)) {
1053 if (devinfo
->gen
< 7)
1054 emit(MUL(dst
, op
[0], op
[1]));
1056 emit(MUL(dst
, op
[1], op
[0]));
1057 } else if (value1
&& value1
->u32
[0] < (1 << 16)) {
1058 if (devinfo
->gen
< 7)
1059 emit(MUL(dst
, op
[1], op
[0]));
1061 emit(MUL(dst
, op
[0], op
[1]));
1063 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1065 emit(MUL(acc
, op
[0], op
[1]));
1066 emit(MACH(dst_null_d(), op
[0], op
[1]));
1067 emit(MOV(dst
, src_reg(acc
)));
1070 emit(MUL(dst
, op
[0], op
[1]));
1075 case nir_op_imul_high
:
1076 case nir_op_umul_high
: {
1077 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1079 if (devinfo
->gen
>= 8)
1080 emit(MUL(acc
, op
[0], retype(op
[1], BRW_REGISTER_TYPE_UW
)));
1082 emit(MUL(acc
, op
[0], op
[1]));
1084 emit(MACH(dst
, op
[0], op
[1]));
1089 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1090 inst
->saturate
= instr
->dest
.saturate
;
1094 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1095 inst
->saturate
= instr
->dest
.saturate
;
1099 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1100 inst
->saturate
= instr
->dest
.saturate
;
1104 if (!compiler
->precise_trig
) {
1105 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1107 src_reg tmp
= src_reg(this, glsl_type::vec4_type
);
1108 inst
= emit_math(SHADER_OPCODE_SIN
, dst_reg(tmp
), op
[0]);
1109 inst
= emit(MUL(dst
, tmp
, brw_imm_f(0.99997)));
1111 inst
->saturate
= instr
->dest
.saturate
;
1115 if (!compiler
->precise_trig
) {
1116 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1118 src_reg tmp
= src_reg(this, glsl_type::vec4_type
);
1119 inst
= emit_math(SHADER_OPCODE_COS
, dst_reg(tmp
), op
[0]);
1120 inst
= emit(MUL(dst
, tmp
, brw_imm_f(0.99997)));
1122 inst
->saturate
= instr
->dest
.saturate
;
1127 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1131 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1135 unreachable("not reached: should be handled by ldexp_to_arith()");
1138 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1139 inst
->saturate
= instr
->dest
.saturate
;
1143 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1144 inst
->saturate
= instr
->dest
.saturate
;
1148 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1149 inst
->saturate
= instr
->dest
.saturate
;
1152 case nir_op_uadd_carry
: {
1153 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1155 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1156 emit(MOV(dst
, src_reg(acc
)));
1160 case nir_op_usub_borrow
: {
1161 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1163 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1164 emit(MOV(dst
, src_reg(acc
)));
1169 inst
= emit(RNDZ(dst
, op
[0]));
1170 inst
->saturate
= instr
->dest
.saturate
;
1173 case nir_op_fceil
: {
1174 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1176 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1177 instr
->src
[0].src
.ssa
->num_components
:
1178 instr
->src
[0].src
.reg
.reg
->num_components
);
1180 op
[0].negate
= !op
[0].negate
;
1181 emit(RNDD(dst_reg(tmp
), op
[0]));
1183 inst
= emit(MOV(dst
, tmp
));
1184 inst
->saturate
= instr
->dest
.saturate
;
1189 inst
= emit(RNDD(dst
, op
[0]));
1190 inst
->saturate
= instr
->dest
.saturate
;
1194 inst
= emit(FRC(dst
, op
[0]));
1195 inst
->saturate
= instr
->dest
.saturate
;
1198 case nir_op_fround_even
:
1199 inst
= emit(RNDE(dst
, op
[0]));
1200 inst
->saturate
= instr
->dest
.saturate
;
1203 case nir_op_fquantize2f16
: {
1204 /* See also vec4_visitor::emit_pack_half_2x16() */
1205 src_reg tmp16
= src_reg(this, glsl_type::uvec4_type
);
1206 src_reg tmp32
= src_reg(this, glsl_type::vec4_type
);
1207 src_reg zero
= src_reg(this, glsl_type::vec4_type
);
1209 /* Check for denormal */
1210 src_reg abs_src0
= op
[0];
1211 abs_src0
.abs
= true;
1212 emit(CMP(dst_null_f(), abs_src0
, brw_imm_f(ldexpf(1.0, -14)),
1213 BRW_CONDITIONAL_L
));
1214 /* Get the appropriately signed zero */
1215 emit(AND(retype(dst_reg(zero
), BRW_REGISTER_TYPE_UD
),
1216 retype(op
[0], BRW_REGISTER_TYPE_UD
),
1217 brw_imm_ud(0x80000000)));
1218 /* Do the actual F32 -> F16 -> F32 conversion */
1219 emit(F32TO16(dst_reg(tmp16
), op
[0]));
1220 emit(F16TO32(dst_reg(tmp32
), tmp16
));
1221 /* Select that or zero based on normal status */
1222 inst
= emit(BRW_OPCODE_SEL
, dst
, zero
, tmp32
);
1223 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1224 inst
->saturate
= instr
->dest
.saturate
;
1231 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1232 inst
->saturate
= instr
->dest
.saturate
;
1238 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1239 inst
->saturate
= instr
->dest
.saturate
;
1243 case nir_op_fddx_coarse
:
1244 case nir_op_fddx_fine
:
1246 case nir_op_fddy_coarse
:
1247 case nir_op_fddy_fine
:
1248 unreachable("derivatives are not valid in vertex shaders");
1260 emit(CMP(dst
, op
[0], op
[1],
1261 brw_conditional_for_nir_comparison(instr
->op
)));
1264 case nir_op_ball_fequal2
:
1265 case nir_op_ball_iequal2
:
1266 case nir_op_ball_fequal3
:
1267 case nir_op_ball_iequal3
:
1268 case nir_op_ball_fequal4
:
1269 case nir_op_ball_iequal4
: {
1271 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1273 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1274 brw_conditional_for_nir_comparison(instr
->op
)));
1275 emit(MOV(dst
, brw_imm_d(0)));
1276 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1277 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1281 case nir_op_bany_fnequal2
:
1282 case nir_op_bany_inequal2
:
1283 case nir_op_bany_fnequal3
:
1284 case nir_op_bany_inequal3
:
1285 case nir_op_bany_fnequal4
:
1286 case nir_op_bany_inequal4
: {
1288 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1290 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1291 brw_conditional_for_nir_comparison(instr
->op
)));
1293 emit(MOV(dst
, brw_imm_d(0)));
1294 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1295 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1300 if (devinfo
->gen
>= 8) {
1301 op
[0] = resolve_source_modifiers(op
[0]);
1303 emit(NOT(dst
, op
[0]));
1307 if (devinfo
->gen
>= 8) {
1308 op
[0] = resolve_source_modifiers(op
[0]);
1309 op
[1] = resolve_source_modifiers(op
[1]);
1311 emit(XOR(dst
, op
[0], op
[1]));
1315 if (devinfo
->gen
>= 8) {
1316 op
[0] = resolve_source_modifiers(op
[0]);
1317 op
[1] = resolve_source_modifiers(op
[1]);
1319 emit(OR(dst
, op
[0], op
[1]));
1323 if (devinfo
->gen
>= 8) {
1324 op
[0] = resolve_source_modifiers(op
[0]);
1325 op
[1] = resolve_source_modifiers(op
[1]);
1327 emit(AND(dst
, op
[0], op
[1]));
1332 emit(MOV(dst
, negate(op
[0])));
1336 emit(CMP(dst
, op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1340 emit(CMP(dst
, op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1343 case nir_op_fnoise1_1
:
1344 case nir_op_fnoise1_2
:
1345 case nir_op_fnoise1_3
:
1346 case nir_op_fnoise1_4
:
1347 case nir_op_fnoise2_1
:
1348 case nir_op_fnoise2_2
:
1349 case nir_op_fnoise2_3
:
1350 case nir_op_fnoise2_4
:
1351 case nir_op_fnoise3_1
:
1352 case nir_op_fnoise3_2
:
1353 case nir_op_fnoise3_3
:
1354 case nir_op_fnoise3_4
:
1355 case nir_op_fnoise4_1
:
1356 case nir_op_fnoise4_2
:
1357 case nir_op_fnoise4_3
:
1358 case nir_op_fnoise4_4
:
1359 unreachable("not reached: should be handled by lower_noise");
1361 case nir_op_unpack_half_2x16_split_x
:
1362 case nir_op_unpack_half_2x16_split_y
:
1363 case nir_op_pack_half_2x16_split
:
1364 unreachable("not reached: should not occur in vertex shader");
1366 case nir_op_unpack_snorm_2x16
:
1367 case nir_op_unpack_unorm_2x16
:
1368 case nir_op_pack_snorm_2x16
:
1369 case nir_op_pack_unorm_2x16
:
1370 unreachable("not reached: should be handled by lower_packing_builtins");
1372 case nir_op_pack_uvec4_to_uint
:
1373 unreachable("not reached");
1375 case nir_op_pack_uvec2_to_uint
: {
1376 dst_reg tmp1
= dst_reg(this, glsl_type::uint_type
);
1377 tmp1
.writemask
= WRITEMASK_X
;
1378 op
[0].swizzle
= BRW_SWIZZLE_YYYY
;
1379 emit(SHL(tmp1
, op
[0], src_reg(brw_imm_ud(16u))));
1381 dst_reg tmp2
= dst_reg(this, glsl_type::uint_type
);
1382 tmp2
.writemask
= WRITEMASK_X
;
1383 op
[0].swizzle
= BRW_SWIZZLE_XXXX
;
1384 emit(AND(tmp2
, op
[0], src_reg(brw_imm_ud(0xffffu
))));
1386 emit(OR(dst
, src_reg(tmp1
), src_reg(tmp2
)));
1390 case nir_op_unpack_half_2x16
:
1391 /* As NIR does not guarantee that we have a correct swizzle outside the
1392 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1393 * uses the source operand in an operation with WRITEMASK_Y while our
1394 * source operand has only size 1, it accessed incorrect data producing
1395 * regressions in Piglit. We repeat the swizzle of the first component on the
1396 * rest of components to avoid regressions. In the vec4_visitor IR code path
1397 * this is not needed because the operand has already the correct swizzle.
1399 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1400 emit_unpack_half_2x16(dst
, op
[0]);
1403 case nir_op_pack_half_2x16
:
1404 emit_pack_half_2x16(dst
, op
[0]);
1407 case nir_op_unpack_unorm_4x8
:
1408 emit_unpack_unorm_4x8(dst
, op
[0]);
1411 case nir_op_pack_unorm_4x8
:
1412 emit_pack_unorm_4x8(dst
, op
[0]);
1415 case nir_op_unpack_snorm_4x8
:
1416 emit_unpack_snorm_4x8(dst
, op
[0]);
1419 case nir_op_pack_snorm_4x8
:
1420 emit_pack_snorm_4x8(dst
, op
[0]);
1423 case nir_op_bitfield_reverse
:
1424 emit(BFREV(dst
, op
[0]));
1427 case nir_op_bit_count
:
1428 emit(CBIT(dst
, op
[0]));
1431 case nir_op_ufind_msb
:
1432 case nir_op_ifind_msb
: {
1433 emit(FBH(retype(dst
, BRW_REGISTER_TYPE_UD
), op
[0]));
1435 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1436 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1437 * subtract the result from 31 to convert the MSB count into an LSB count.
1440 emit(CMP(dst_null_d(), src
, brw_imm_d(-1), BRW_CONDITIONAL_NZ
));
1442 inst
= emit(ADD(dst
, src
, brw_imm_d(31)));
1443 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1444 inst
->src
[0].negate
= true;
1448 case nir_op_find_lsb
:
1449 emit(FBL(dst
, op
[0]));
1452 case nir_op_ubitfield_extract
:
1453 case nir_op_ibitfield_extract
:
1454 unreachable("should have been lowered");
1457 op
[0] = fix_3src_operand(op
[0]);
1458 op
[1] = fix_3src_operand(op
[1]);
1459 op
[2] = fix_3src_operand(op
[2]);
1461 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1465 emit(BFI1(dst
, op
[0], op
[1]));
1469 op
[0] = fix_3src_operand(op
[0]);
1470 op
[1] = fix_3src_operand(op
[1]);
1471 op
[2] = fix_3src_operand(op
[2]);
1473 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1476 case nir_op_bitfield_insert
:
1477 unreachable("not reached: should have been lowered");
1480 /* AND(val, 0x80000000) gives the sign bit.
1482 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1485 emit(CMP(dst_null_f(), op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1487 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1488 dst
.type
= BRW_REGISTER_TYPE_UD
;
1489 emit(AND(dst
, op
[0], brw_imm_ud(0x80000000u
)));
1491 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_ud(0x3f800000u
)));
1492 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1493 dst
.type
= BRW_REGISTER_TYPE_F
;
1495 if (instr
->dest
.saturate
) {
1496 inst
= emit(MOV(dst
, src_reg(dst
)));
1497 inst
->saturate
= true;
1502 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1503 * -> non-negative val generates 0x00000000.
1504 * Predicated OR sets 1 if val is positive.
1506 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_G
));
1507 emit(ASR(dst
, op
[0], brw_imm_d(31)));
1508 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_d(1)));
1509 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1513 emit(SHL(dst
, op
[0], op
[1]));
1517 emit(ASR(dst
, op
[0], op
[1]));
1521 emit(SHR(dst
, op
[0], op
[1]));
1525 op
[0] = fix_3src_operand(op
[0]);
1526 op
[1] = fix_3src_operand(op
[1]);
1527 op
[2] = fix_3src_operand(op
[2]);
1529 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1530 inst
->saturate
= instr
->dest
.saturate
;
1534 inst
= emit_lrp(dst
, op
[0], op
[1], op
[2]);
1535 inst
->saturate
= instr
->dest
.saturate
;
1539 enum brw_predicate predicate
;
1540 if (!optimize_predicate(instr
, &predicate
)) {
1541 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1542 switch (dst
.writemask
) {
1544 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1547 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1550 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1553 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1556 predicate
= BRW_PREDICATE_NORMAL
;
1560 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1561 inst
->predicate
= predicate
;
1564 case nir_op_fdot_replicated2
:
1565 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1566 inst
->saturate
= instr
->dest
.saturate
;
1569 case nir_op_fdot_replicated3
:
1570 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1571 inst
->saturate
= instr
->dest
.saturate
;
1574 case nir_op_fdot_replicated4
:
1575 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1576 inst
->saturate
= instr
->dest
.saturate
;
1579 case nir_op_fdph_replicated
:
1580 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1581 inst
->saturate
= instr
->dest
.saturate
;
1589 unreachable("not reached: should be lowered by lower_source mods");
1592 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1595 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1599 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1602 unreachable("Unimplemented ALU operation");
1605 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1606 * to sign extend the low bit to 0/~0
1608 if (devinfo
->gen
<= 5 &&
1609 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1610 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1611 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1612 masked
.writemask
= dst
.writemask
;
1613 emit(AND(masked
, src_reg(dst
), brw_imm_d(1)));
1614 src_reg masked_neg
= src_reg(masked
);
1615 masked_neg
.negate
= true;
1616 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1621 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1623 switch (instr
->type
) {
1624 case nir_jump_break
:
1625 emit(BRW_OPCODE_BREAK
);
1628 case nir_jump_continue
:
1629 emit(BRW_OPCODE_CONTINUE
);
1632 case nir_jump_return
:
1635 unreachable("unknown jump");
1639 enum ir_texture_opcode
1640 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1642 enum ir_texture_opcode op
;
1645 case nir_texop_lod
: op
= ir_lod
; break;
1646 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1647 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1648 case nir_texop_tex
: op
= ir_tex
; break;
1649 case nir_texop_tg4
: op
= ir_tg4
; break;
1650 case nir_texop_txb
: op
= ir_txb
; break;
1651 case nir_texop_txd
: op
= ir_txd
; break;
1652 case nir_texop_txf
: op
= ir_txf
; break;
1653 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1654 case nir_texop_txl
: op
= ir_txl
; break;
1655 case nir_texop_txs
: op
= ir_txs
; break;
1656 case nir_texop_samples_identical
: op
= ir_samples_identical
; break;
1658 unreachable("unknown texture opcode");
1664 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1665 unsigned components
)
1668 case nir_type_float
:
1669 return glsl_type::vec(components
);
1671 return glsl_type::ivec(components
);
1673 return glsl_type::uvec(components
);
1675 return glsl_type::bvec(components
);
1677 return glsl_type::error_type
;
1680 return glsl_type::error_type
;
1684 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1686 unsigned texture
= instr
->texture_index
;
1687 unsigned sampler
= instr
->sampler_index
;
1688 src_reg texture_reg
= brw_imm_ud(texture
);
1689 src_reg sampler_reg
= brw_imm_ud(sampler
);
1691 const glsl_type
*coord_type
= NULL
;
1692 src_reg shadow_comparitor
;
1693 src_reg offset_value
;
1695 src_reg sample_index
;
1698 const glsl_type
*dest_type
=
1699 glsl_type_for_nir_alu_type(instr
->dest_type
,
1700 nir_tex_instr_dest_size(instr
));
1701 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
1703 /* The hardware requires a LOD for buffer textures */
1704 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
)
1707 /* Load the texture operation sources */
1708 uint32_t constant_offset
= 0;
1709 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1710 switch (instr
->src
[i
].src_type
) {
1711 case nir_tex_src_comparitor
:
1712 shadow_comparitor
= get_nir_src(instr
->src
[i
].src
,
1713 BRW_REGISTER_TYPE_F
, 1);
1716 case nir_tex_src_coord
: {
1717 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
1719 switch (instr
->op
) {
1721 case nir_texop_txf_ms
:
1722 case nir_texop_samples_identical
:
1723 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
1725 coord_type
= glsl_type::ivec(src_size
);
1729 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1731 coord_type
= glsl_type::vec(src_size
);
1737 case nir_tex_src_ddx
:
1738 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1739 nir_tex_instr_src_size(instr
, i
));
1742 case nir_tex_src_ddy
:
1743 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1744 nir_tex_instr_src_size(instr
, i
));
1747 case nir_tex_src_lod
:
1748 switch (instr
->op
) {
1751 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1755 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
1760 case nir_tex_src_ms_index
: {
1761 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1765 case nir_tex_src_offset
: {
1766 nir_const_value
*const_offset
=
1767 nir_src_as_const_value(instr
->src
[i
].src
);
1769 constant_offset
= brw_texture_offset(const_offset
->i32
, 3);
1772 get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
1777 case nir_tex_src_texture_offset
: {
1778 /* The highest texture which may be used by this operation is
1779 * the last element of the array. Mark it here, because the generator
1780 * doesn't have enough information to determine the bound.
1782 uint32_t array_size
= instr
->texture_array_size
;
1783 uint32_t max_used
= texture
+ array_size
- 1;
1784 if (instr
->op
== nir_texop_tg4
) {
1785 max_used
+= prog_data
->base
.binding_table
.gather_texture_start
;
1787 max_used
+= prog_data
->base
.binding_table
.texture_start
;
1790 brw_mark_surface_used(&prog_data
->base
, max_used
);
1792 /* Emit code to evaluate the actual indexing expression */
1793 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1794 src_reg
temp(this, glsl_type::uint_type
);
1795 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(texture
)));
1796 texture_reg
= emit_uniformize(temp
);
1800 case nir_tex_src_sampler_offset
: {
1801 /* Emit code to evaluate the actual indexing expression */
1802 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1803 src_reg
temp(this, glsl_type::uint_type
);
1804 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(sampler
)));
1805 sampler_reg
= emit_uniformize(temp
);
1809 case nir_tex_src_projector
:
1810 unreachable("Should be lowered by do_lower_texture_projection");
1812 case nir_tex_src_bias
:
1813 unreachable("LOD bias is not valid for vertex shaders.\n");
1816 unreachable("unknown texture source");
1820 if (instr
->op
== nir_texop_txf_ms
||
1821 instr
->op
== nir_texop_samples_identical
) {
1822 assert(coord_type
!= NULL
);
1823 if (devinfo
->gen
>= 7 &&
1824 key_tex
->compressed_multisample_layout_mask
& (1 << texture
)) {
1825 mcs
= emit_mcs_fetch(coord_type
, coordinate
, texture_reg
);
1827 mcs
= brw_imm_ud(0u);
1831 /* Stuff the channel select bits in the top of the texture offset */
1832 if (instr
->op
== nir_texop_tg4
) {
1833 if (instr
->component
== 1 &&
1834 (key_tex
->gather_channel_quirk_mask
& (1 << texture
))) {
1835 /* gather4 sampler is broken for green channel on RG32F --
1836 * we must ask for blue instead.
1838 constant_offset
|= 2 << 16;
1840 constant_offset
|= instr
->component
<< 16;
1844 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
1846 bool is_cube_array
=
1847 instr
->op
== nir_texop_txs
&&
1848 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1851 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
1853 lod
, lod2
, sample_index
,
1854 constant_offset
, offset_value
,
1856 texture
, texture_reg
, sampler
, sampler_reg
);
1860 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
1862 nir_ssa_values
[instr
->def
.index
] = dst_reg(VGRF
, alloc
.allocate(1));