2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "glsl/ir_uniform.h"
31 using namespace brw::surface_access
;
36 vec4_visitor::emit_nir_code()
38 if (nir
->num_inputs
> 0)
41 if (nir
->num_uniforms
> 0)
44 nir_setup_system_values();
46 /* get the main function and emit it */
47 nir_foreach_overload(nir
, overload
) {
48 assert(strcmp(overload
->function
->name
, "main") == 0);
49 assert(overload
->impl
);
50 nir_emit_impl(overload
->impl
);
55 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr
*instr
)
59 switch (instr
->intrinsic
) {
60 case nir_intrinsic_load_vertex_id
:
61 unreachable("should be lowered by lower_vertex_id().");
63 case nir_intrinsic_load_vertex_id_zero_base
:
64 reg
= &nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
65 if (reg
->file
== BAD_FILE
)
66 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
,
70 case nir_intrinsic_load_base_vertex
:
71 reg
= &nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
72 if (reg
->file
== BAD_FILE
)
73 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX
,
77 case nir_intrinsic_load_instance_id
:
78 reg
= &nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
79 if (reg
->file
== BAD_FILE
)
80 *reg
= *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID
,
90 setup_system_values_block(nir_block
*block
, void *void_visitor
)
92 vec4_visitor
*v
= (vec4_visitor
*)void_visitor
;
94 nir_foreach_instr(block
, instr
) {
95 if (instr
->type
!= nir_instr_type_intrinsic
)
98 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
99 v
->nir_setup_system_value_intrinsic(intrin
);
106 vec4_visitor::nir_setup_system_values()
108 nir_system_values
= ralloc_array(mem_ctx
, dst_reg
, SYSTEM_VALUE_MAX
);
109 for (unsigned i
= 0; i
< SYSTEM_VALUE_MAX
; i
++) {
110 nir_system_values
[i
] = dst_reg();
113 nir_foreach_overload(nir
, overload
) {
114 assert(strcmp(overload
->function
->name
, "main") == 0);
115 assert(overload
->impl
);
116 nir_foreach_block(overload
->impl
, setup_system_values_block
, this);
121 vec4_visitor::nir_setup_inputs()
123 nir_inputs
= ralloc_array(mem_ctx
, src_reg
, nir
->num_inputs
);
124 for (unsigned i
= 0; i
< nir
->num_inputs
; i
++) {
125 nir_inputs
[i
] = src_reg();
128 nir_foreach_variable(var
, &nir
->inputs
) {
129 int offset
= var
->data
.driver_location
;
130 unsigned size
= type_size_vec4(var
->type
);
131 for (unsigned i
= 0; i
< size
; i
++) {
132 src_reg src
= src_reg(ATTR
, var
->data
.location
+ i
, var
->type
);
133 nir_inputs
[offset
+ i
] = src
;
139 vec4_visitor::nir_setup_uniforms()
141 uniforms
= nir
->num_uniforms
;
143 nir_foreach_variable(var
, &nir
->uniforms
) {
144 /* UBO's and atomics don't take up space in the uniform file */
145 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
148 if (type_size_vec4(var
->type
) > 0)
149 uniform_size
[var
->data
.driver_location
] = type_size_vec4(var
->type
);
154 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
156 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
157 for (unsigned i
= 0; i
< impl
->reg_alloc
; i
++) {
158 nir_locals
[i
] = dst_reg();
161 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
162 unsigned array_elems
=
163 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
165 nir_locals
[reg
->index
] = dst_reg(VGRF
, alloc
.allocate(array_elems
));
168 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
170 nir_emit_cf_list(&impl
->body
);
174 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
176 exec_list_validate(list
);
177 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
178 switch (node
->type
) {
180 nir_emit_if(nir_cf_node_as_if(node
));
183 case nir_cf_node_loop
:
184 nir_emit_loop(nir_cf_node_as_loop(node
));
187 case nir_cf_node_block
:
188 nir_emit_block(nir_cf_node_as_block(node
));
192 unreachable("Invalid CFG node block");
198 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
200 /* First, put the condition in f0 */
201 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
202 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
203 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
205 /* We can just predicate based on the X channel, as the condition only
206 * goes on its own line */
207 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X
));
209 nir_emit_cf_list(&if_stmt
->then_list
);
211 /* note: if the else is empty, dead CF elimination will remove it */
212 emit(BRW_OPCODE_ELSE
);
214 nir_emit_cf_list(&if_stmt
->else_list
);
216 emit(BRW_OPCODE_ENDIF
);
220 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
224 nir_emit_cf_list(&loop
->body
);
226 emit(BRW_OPCODE_WHILE
);
230 vec4_visitor::nir_emit_block(nir_block
*block
)
232 nir_foreach_instr(block
, instr
) {
233 nir_emit_instr(instr
);
238 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
242 switch (instr
->type
) {
243 case nir_instr_type_load_const
:
244 nir_emit_load_const(nir_instr_as_load_const(instr
));
247 case nir_instr_type_intrinsic
:
248 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
251 case nir_instr_type_alu
:
252 nir_emit_alu(nir_instr_as_alu(instr
));
255 case nir_instr_type_jump
:
256 nir_emit_jump(nir_instr_as_jump(instr
));
259 case nir_instr_type_tex
:
260 nir_emit_texture(nir_instr_as_tex(instr
));
263 case nir_instr_type_ssa_undef
:
264 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
268 fprintf(stderr
, "VS instruction not yet implemented by NIR->vec4\n");
274 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
275 unsigned base_offset
, nir_src
*indirect
)
279 reg
= v
->nir_locals
[nir_reg
->index
];
280 reg
= offset(reg
, base_offset
);
283 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
291 vec4_visitor::get_nir_dest(nir_dest dest
)
294 dst_reg dst
= dst_reg(VGRF
, alloc
.allocate(1));
295 nir_ssa_values
[dest
.ssa
.index
] = dst
;
298 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
304 vec4_visitor::get_nir_dest(nir_dest dest
, enum brw_reg_type type
)
306 return retype(get_nir_dest(dest
), type
);
310 vec4_visitor::get_nir_dest(nir_dest dest
, nir_alu_type type
)
312 return get_nir_dest(dest
, brw_type_for_nir_type(type
));
316 vec4_visitor::get_nir_src(nir_src src
, enum brw_reg_type type
,
317 unsigned num_components
)
322 assert(src
.ssa
!= NULL
);
323 reg
= nir_ssa_values
[src
.ssa
->index
];
326 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
330 reg
= retype(reg
, type
);
332 src_reg reg_as_src
= src_reg(reg
);
333 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
338 vec4_visitor::get_nir_src(nir_src src
, nir_alu_type type
,
339 unsigned num_components
)
341 return get_nir_src(src
, brw_type_for_nir_type(type
), num_components
);
345 vec4_visitor::get_nir_src(nir_src src
, unsigned num_components
)
347 /* if type is not specified, default to signed int */
348 return get_nir_src(src
, nir_type_int
, num_components
);
352 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
354 dst_reg reg
= dst_reg(VGRF
, alloc
.allocate(1));
355 reg
.type
= BRW_REGISTER_TYPE_D
;
357 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
359 /* @FIXME: consider emitting vector operations to save some MOVs in
360 * cases where the components are representable in 8 bits.
361 * For now, we emit a MOV for each distinct value.
363 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
364 unsigned writemask
= 1 << i
;
366 if ((remaining
& writemask
) == 0)
369 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
370 if (instr
->value
.u
[i
] == instr
->value
.u
[j
]) {
375 reg
.writemask
= writemask
;
376 emit(MOV(reg
, brw_imm_d(instr
->value
.i
[i
])));
378 remaining
&= ~writemask
;
381 /* Set final writemask */
382 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
384 nir_ssa_values
[instr
->def
.index
] = reg
;
388 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
393 bool has_indirect
= false;
395 switch (instr
->intrinsic
) {
397 case nir_intrinsic_load_input_indirect
:
400 case nir_intrinsic_load_input
: {
401 int offset
= instr
->const_index
[0];
402 src
= nir_inputs
[offset
];
405 dest
.reladdr
= new(mem_ctx
) src_reg(get_nir_src(instr
->src
[0],
409 dest
= get_nir_dest(instr
->dest
, src
.type
);
410 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
412 emit(MOV(dest
, src
));
416 case nir_intrinsic_store_output_indirect
:
419 case nir_intrinsic_store_output
: {
420 int varying
= instr
->const_index
[0];
422 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
423 instr
->num_components
);
427 dest
.reladdr
= new(mem_ctx
) src_reg(get_nir_src(instr
->src
[1],
431 output_reg
[varying
] = dest
;
435 case nir_intrinsic_get_buffer_size
: {
436 nir_const_value
*const_uniform_block
= nir_src_as_const_value(instr
->src
[0]);
437 unsigned ssbo_index
= const_uniform_block
? const_uniform_block
->u
[0] : 0;
439 const unsigned index
=
440 prog_data
->base
.binding_table
.ssbo_start
+ ssbo_index
;
441 dst_reg result_dst
= get_nir_dest(instr
->dest
);
442 vec4_instruction
*inst
= new(mem_ctx
)
443 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE
, result_dst
);
446 inst
->mlen
= 1; /* always at least one */
447 inst
->src
[1] = brw_imm_ud(index
);
449 /* MRF for the first parameter */
450 src_reg lod
= brw_imm_d(0);
451 int param_base
= inst
->base_mrf
;
452 int writemask
= WRITEMASK_X
;
453 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
457 brw_mark_surface_used(&prog_data
->base
, index
);
461 case nir_intrinsic_store_ssbo_indirect
:
464 case nir_intrinsic_store_ssbo
: {
465 assert(devinfo
->gen
>= 7);
469 nir_const_value
*const_uniform_block
=
470 nir_src_as_const_value(instr
->src
[1]);
471 if (const_uniform_block
) {
472 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
473 const_uniform_block
->u
[0];
474 surf_index
= brw_imm_ud(index
);
475 brw_mark_surface_used(&prog_data
->base
, index
);
477 surf_index
= src_reg(this, glsl_type::uint_type
);
478 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[1], 1),
479 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
480 surf_index
= emit_uniformize(surf_index
);
482 brw_mark_surface_used(&prog_data
->base
,
483 prog_data
->base
.binding_table
.ssbo_start
+
484 nir
->info
.num_ssbos
- 1);
488 src_reg offset_reg
= src_reg(this, glsl_type::uint_type
);
489 unsigned const_offset_bytes
= 0;
491 emit(MOV(dst_reg(offset_reg
), get_nir_src(instr
->src
[2], 1)));
493 const_offset_bytes
= instr
->const_index
[0];
494 emit(MOV(dst_reg(offset_reg
), brw_imm_ud(const_offset_bytes
)));
498 src_reg val_reg
= get_nir_src(instr
->src
[0], 4);
501 unsigned write_mask
= instr
->const_index
[1];
503 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
504 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
505 * typed and untyped messages and across hardware platforms, the
506 * current implementation of the untyped messages will transparently convert
507 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
508 * and enabling only channel X on the SEND instruction.
510 * The above, works well for full vector writes, but not for partial writes
511 * where we want to write some channels and not others, like when we have
512 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
513 * quite restrictive with regards to the channel enables we can configure in
514 * the message descriptor (not all combinations are allowed) we cannot simply
515 * implement these scenarios with a single message while keeping the
516 * aforementioned symmetry in the implementation. For now we de decided that
517 * it is better to keep the symmetry to reduce complexity, so in situations
518 * such as the one described we end up emitting two untyped write messages
519 * (one for xy and another for w).
521 * The code below packs consecutive channels into a single write message,
522 * detects gaps in the vector write and if needed, sends a second message
523 * with the remaining channels. If in the future we decide that we want to
524 * emit a single message at the expense of losing the symmetry in the
525 * implementation we can:
527 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
528 * message payload. In this mode we can write up to 8 offsets and dwords
529 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
530 * and select which of the 8 channels carry data to write by setting the
531 * appropriate writemask in the dst register of the SEND instruction.
532 * It would require to write a new generator opcode specifically for
533 * IvyBridge since we would need to prepare a SIMD8 payload that could
534 * use any channel, not just X.
536 * 2) For Haswell+: Simply send a single write message but set the writemask
537 * on the dst of the SEND instruction to select the channels we want to
538 * write. It would require to modify the current messages to receive
539 * and honor the writemask provided.
541 const vec4_builder bld
= vec4_builder(this).at_end()
542 .annotate(current_annotation
, base_ir
);
544 int swizzle
[4] = { 0, 0, 0, 0};
545 int num_channels
= 0;
546 unsigned skipped_channels
= 0;
547 int num_components
= instr
->num_components
;
548 for (int i
= 0; i
< num_components
; i
++) {
549 /* Check if this channel needs to be written. If so, record the
550 * channel we need to take the data from in the swizzle array
552 int component_mask
= 1 << i
;
553 int write_test
= write_mask
& component_mask
;
555 swizzle
[num_channels
++] = i
;
557 /* If we don't have to write this channel it means we have a gap in the
558 * vector, so write the channels we accumulated until now, if any. Do
559 * the same if this was the last component in the vector.
561 if (!write_test
|| i
== num_components
- 1) {
562 if (num_channels
> 0) {
563 /* We have channels to write, so update the offset we need to
564 * write at to skip the channels we skipped, if any.
566 if (skipped_channels
> 0) {
568 const_offset_bytes
+= 4 * skipped_channels
;
569 offset_reg
= brw_imm_ud(const_offset_bytes
);
571 emit(ADD(dst_reg(offset_reg
), offset_reg
,
572 brw_imm_ud(4 * skipped_channels
)));
576 /* Swizzle the data register so we take the data from the channels
577 * we need to write and send the write message. This will write
578 * num_channels consecutive dwords starting at offset.
581 BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
582 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
583 1 /* dims */, num_channels
/* size */,
586 /* If we have to do a second write we will have to update the
587 * offset so that we jump over the channels we have just written
590 skipped_channels
= num_channels
;
592 /* Restart the count for the next write message */
596 /* We did not write the current channel, so increase skipped count */
604 case nir_intrinsic_load_ssbo_indirect
:
607 case nir_intrinsic_load_ssbo
: {
608 assert(devinfo
->gen
>= 7);
610 nir_const_value
*const_uniform_block
=
611 nir_src_as_const_value(instr
->src
[0]);
614 if (const_uniform_block
) {
615 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
616 const_uniform_block
->u
[0];
617 surf_index
= brw_imm_ud(index
);
619 brw_mark_surface_used(&prog_data
->base
, index
);
621 surf_index
= src_reg(this, glsl_type::uint_type
);
622 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], 1),
623 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
624 surf_index
= emit_uniformize(surf_index
);
626 /* Assume this may touch any UBO. It would be nice to provide
627 * a tighter bound, but the array information is already lowered away.
629 brw_mark_surface_used(&prog_data
->base
,
630 prog_data
->base
.binding_table
.ssbo_start
+
631 nir
->info
.num_ssbos
- 1);
634 src_reg offset_reg
= src_reg(this, glsl_type::uint_type
);
635 unsigned const_offset_bytes
= 0;
637 emit(MOV(dst_reg(offset_reg
), get_nir_src(instr
->src
[1], 1)));
639 const_offset_bytes
= instr
->const_index
[0];
640 emit(MOV(dst_reg(offset_reg
), brw_imm_ud((const_offset_bytes
))));
643 /* Read the vector */
644 const vec4_builder bld
= vec4_builder(this).at_end()
645 .annotate(current_annotation
, base_ir
);
647 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
648 1 /* dims */, 4 /* size*/,
650 dst_reg dest
= get_nir_dest(instr
->dest
);
651 read_result
.type
= dest
.type
;
652 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
653 emit(MOV(dest
, read_result
));
658 case nir_intrinsic_ssbo_atomic_add
:
659 nir_emit_ssbo_atomic(BRW_AOP_ADD
, instr
);
661 case nir_intrinsic_ssbo_atomic_imin
:
662 nir_emit_ssbo_atomic(BRW_AOP_IMIN
, instr
);
664 case nir_intrinsic_ssbo_atomic_umin
:
665 nir_emit_ssbo_atomic(BRW_AOP_UMIN
, instr
);
667 case nir_intrinsic_ssbo_atomic_imax
:
668 nir_emit_ssbo_atomic(BRW_AOP_IMAX
, instr
);
670 case nir_intrinsic_ssbo_atomic_umax
:
671 nir_emit_ssbo_atomic(BRW_AOP_UMAX
, instr
);
673 case nir_intrinsic_ssbo_atomic_and
:
674 nir_emit_ssbo_atomic(BRW_AOP_AND
, instr
);
676 case nir_intrinsic_ssbo_atomic_or
:
677 nir_emit_ssbo_atomic(BRW_AOP_OR
, instr
);
679 case nir_intrinsic_ssbo_atomic_xor
:
680 nir_emit_ssbo_atomic(BRW_AOP_XOR
, instr
);
682 case nir_intrinsic_ssbo_atomic_exchange
:
683 nir_emit_ssbo_atomic(BRW_AOP_MOV
, instr
);
685 case nir_intrinsic_ssbo_atomic_comp_swap
:
686 nir_emit_ssbo_atomic(BRW_AOP_CMPWR
, instr
);
689 case nir_intrinsic_load_vertex_id
:
690 unreachable("should be lowered by lower_vertex_id()");
692 case nir_intrinsic_load_vertex_id_zero_base
:
693 case nir_intrinsic_load_base_vertex
:
694 case nir_intrinsic_load_instance_id
: {
695 gl_system_value sv
= nir_system_value_from_intrinsic(instr
->intrinsic
);
696 src_reg val
= src_reg(nir_system_values
[sv
]);
697 assert(val
.file
!= BAD_FILE
);
698 dest
= get_nir_dest(instr
->dest
, val
.type
);
699 emit(MOV(dest
, val
));
703 case nir_intrinsic_load_uniform_indirect
:
706 case nir_intrinsic_load_uniform
: {
707 dest
= get_nir_dest(instr
->dest
);
709 src
= src_reg(dst_reg(UNIFORM
, instr
->const_index
[0]));
710 src
.reg_offset
= instr
->const_index
[1];
713 src_reg tmp
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_D
, 1);
714 src
.reladdr
= new(mem_ctx
) src_reg(tmp
);
717 emit(MOV(dest
, src
));
721 case nir_intrinsic_atomic_counter_read
:
722 case nir_intrinsic_atomic_counter_inc
:
723 case nir_intrinsic_atomic_counter_dec
: {
724 unsigned surf_index
= prog_data
->base
.binding_table
.abo_start
+
725 (unsigned) instr
->const_index
[0];
726 src_reg offset
= get_nir_src(instr
->src
[0], nir_type_int
,
727 instr
->num_components
);
728 dest
= get_nir_dest(instr
->dest
);
730 switch (instr
->intrinsic
) {
731 case nir_intrinsic_atomic_counter_inc
:
732 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
733 src_reg(), src_reg());
735 case nir_intrinsic_atomic_counter_dec
:
736 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
737 src_reg(), src_reg());
739 case nir_intrinsic_atomic_counter_read
:
740 emit_untyped_surface_read(surf_index
, dest
, offset
);
743 unreachable("Unreachable");
746 brw_mark_surface_used(stage_prog_data
, surf_index
);
750 case nir_intrinsic_load_ubo_indirect
:
753 case nir_intrinsic_load_ubo
: {
754 nir_const_value
*const_block_index
= nir_src_as_const_value(instr
->src
[0]);
757 dest
= get_nir_dest(instr
->dest
);
759 if (const_block_index
) {
760 /* The block index is a constant, so just emit the binding table entry
763 const unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
764 const_block_index
->u
[0];
765 surf_index
= brw_imm_ud(index
);
766 brw_mark_surface_used(&prog_data
->base
, index
);
768 /* The block index is not a constant. Evaluate the index expression
769 * per-channel and add the base UBO index; we have to select a value
770 * from any live channel.
772 surf_index
= src_reg(this, glsl_type::uint_type
);
773 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int
,
774 instr
->num_components
),
775 brw_imm_ud(prog_data
->base
.binding_table
.ubo_start
)));
776 surf_index
= emit_uniformize(surf_index
);
778 /* Assume this may touch any UBO. It would be nice to provide
779 * a tighter bound, but the array information is already lowered away.
781 brw_mark_surface_used(&prog_data
->base
,
782 prog_data
->base
.binding_table
.ubo_start
+
783 nir
->info
.num_ubos
- 1);
786 unsigned const_offset
= instr
->const_index
[0];
790 offset
= brw_imm_ud(const_offset
/ 16);
792 offset
= src_reg(this, glsl_type::uint_type
);
793 emit(SHR(dst_reg(offset
), get_nir_src(instr
->src
[1], nir_type_int
, 1),
797 src_reg packed_consts
= src_reg(this, glsl_type::vec4_type
);
798 packed_consts
.type
= dest
.type
;
800 emit_pull_constant_load_reg(dst_reg(packed_consts
),
803 NULL
, NULL
/* before_block/inst */);
805 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
806 packed_consts
.swizzle
+= BRW_SWIZZLE4(const_offset
% 16 / 4,
807 const_offset
% 16 / 4,
808 const_offset
% 16 / 4,
809 const_offset
% 16 / 4);
811 emit(MOV(dest
, packed_consts
));
815 case nir_intrinsic_memory_barrier
: {
816 const vec4_builder bld
=
817 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
818 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
819 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
)
824 case nir_intrinsic_shader_clock
: {
825 /* We cannot do anything if there is an event, so ignore it for now */
826 const src_reg shader_clock
= get_timestamp();
827 const enum brw_reg_type type
= brw_type_for_base_type(glsl_type::uvec2_type
);
829 dest
= get_nir_dest(instr
->dest
, type
);
830 emit(MOV(dest
, shader_clock
));
835 unreachable("Unknown intrinsic");
840 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
843 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
844 dest
= get_nir_dest(instr
->dest
);
847 nir_const_value
*const_surface
= nir_src_as_const_value(instr
->src
[0]);
849 unsigned surf_index
= prog_data
->base
.binding_table
.ssbo_start
+
851 surface
= brw_imm_ud(surf_index
);
852 brw_mark_surface_used(&prog_data
->base
, surf_index
);
854 surface
= src_reg(this, glsl_type::uint_type
);
855 emit(ADD(dst_reg(surface
), get_nir_src(instr
->src
[0]),
856 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
858 /* Assume this may touch any UBO. This is the same we do for other
859 * UBO/SSBO accesses with non-constant surface.
861 brw_mark_surface_used(&prog_data
->base
,
862 prog_data
->base
.binding_table
.ssbo_start
+
863 nir
->info
.num_ssbos
- 1);
866 src_reg offset
= get_nir_src(instr
->src
[1], 1);
867 src_reg data1
= get_nir_src(instr
->src
[2], 1);
869 if (op
== BRW_AOP_CMPWR
)
870 data2
= get_nir_src(instr
->src
[3], 1);
872 /* Emit the actual atomic operation operation */
873 const vec4_builder bld
=
874 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
876 src_reg atomic_result
=
877 surface_access::emit_untyped_atomic(bld
, surface
, offset
,
879 1 /* dims */, 1 /* rsize */,
882 dest
.type
= atomic_result
.type
;
883 bld
.MOV(dest
, atomic_result
);
887 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
889 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
892 static enum brw_conditional_mod
893 brw_conditional_for_nir_comparison(nir_op op
)
899 return BRW_CONDITIONAL_L
;
904 return BRW_CONDITIONAL_GE
;
908 case nir_op_ball_fequal2
:
909 case nir_op_ball_iequal2
:
910 case nir_op_ball_fequal3
:
911 case nir_op_ball_iequal3
:
912 case nir_op_ball_fequal4
:
913 case nir_op_ball_iequal4
:
914 return BRW_CONDITIONAL_Z
;
918 case nir_op_bany_fnequal2
:
919 case nir_op_bany_inequal2
:
920 case nir_op_bany_fnequal3
:
921 case nir_op_bany_inequal3
:
922 case nir_op_bany_fnequal4
:
923 case nir_op_bany_inequal4
:
924 return BRW_CONDITIONAL_NZ
;
927 unreachable("not reached: bad operation for comparison");
932 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
934 vec4_instruction
*inst
;
936 dst_reg dst
= get_nir_dest(instr
->dest
.dest
,
937 nir_op_infos
[instr
->op
].output_type
);
938 dst
.writemask
= instr
->dest
.write_mask
;
941 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
942 op
[i
] = get_nir_src(instr
->src
[i
].src
,
943 nir_op_infos
[instr
->op
].input_types
[i
], 4);
944 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
945 op
[i
].abs
= instr
->src
[i
].abs
;
946 op
[i
].negate
= instr
->src
[i
].negate
;
952 inst
= emit(MOV(dst
, op
[0]));
953 inst
->saturate
= instr
->dest
.saturate
;
959 unreachable("not reached: should be handled by lower_vec_to_movs()");
963 inst
= emit(MOV(dst
, op
[0]));
964 inst
->saturate
= instr
->dest
.saturate
;
969 inst
= emit(MOV(dst
, op
[0]));
975 inst
= emit(ADD(dst
, op
[0], op
[1]));
976 inst
->saturate
= instr
->dest
.saturate
;
980 inst
= emit(MUL(dst
, op
[0], op
[1]));
981 inst
->saturate
= instr
->dest
.saturate
;
985 if (devinfo
->gen
< 8) {
986 nir_const_value
*value0
= nir_src_as_const_value(instr
->src
[0].src
);
987 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
989 /* For integer multiplication, the MUL uses the low 16 bits of one of
990 * the operands (src0 through SNB, src1 on IVB and later). The MACH
991 * accumulates in the contribution of the upper 16 bits of that
992 * operand. If we can determine that one of the args is in the low
993 * 16 bits, though, we can just emit a single MUL.
995 if (value0
&& value0
->u
[0] < (1 << 16)) {
996 if (devinfo
->gen
< 7)
997 emit(MUL(dst
, op
[0], op
[1]));
999 emit(MUL(dst
, op
[1], op
[0]));
1000 } else if (value1
&& value1
->u
[0] < (1 << 16)) {
1001 if (devinfo
->gen
< 7)
1002 emit(MUL(dst
, op
[1], op
[0]));
1004 emit(MUL(dst
, op
[0], op
[1]));
1006 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1008 emit(MUL(acc
, op
[0], op
[1]));
1009 emit(MACH(dst_null_d(), op
[0], op
[1]));
1010 emit(MOV(dst
, src_reg(acc
)));
1013 emit(MUL(dst
, op
[0], op
[1]));
1018 case nir_op_imul_high
:
1019 case nir_op_umul_high
: {
1020 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1022 emit(MUL(acc
, op
[0], op
[1]));
1023 emit(MACH(dst
, op
[0], op
[1]));
1028 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1029 inst
->saturate
= instr
->dest
.saturate
;
1033 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1034 inst
->saturate
= instr
->dest
.saturate
;
1038 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1039 inst
->saturate
= instr
->dest
.saturate
;
1043 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1044 inst
->saturate
= instr
->dest
.saturate
;
1048 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1049 inst
->saturate
= instr
->dest
.saturate
;
1054 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1058 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1062 unreachable("not reached: should be handled by ldexp_to_arith()");
1065 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1066 inst
->saturate
= instr
->dest
.saturate
;
1070 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1071 inst
->saturate
= instr
->dest
.saturate
;
1075 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1076 inst
->saturate
= instr
->dest
.saturate
;
1079 case nir_op_uadd_carry
: {
1080 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1082 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1083 emit(MOV(dst
, src_reg(acc
)));
1087 case nir_op_usub_borrow
: {
1088 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1090 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1091 emit(MOV(dst
, src_reg(acc
)));
1096 inst
= emit(RNDZ(dst
, op
[0]));
1097 inst
->saturate
= instr
->dest
.saturate
;
1100 case nir_op_fceil
: {
1101 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1103 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1104 instr
->src
[0].src
.ssa
->num_components
:
1105 instr
->src
[0].src
.reg
.reg
->num_components
);
1107 op
[0].negate
= !op
[0].negate
;
1108 emit(RNDD(dst_reg(tmp
), op
[0]));
1110 inst
= emit(MOV(dst
, tmp
));
1111 inst
->saturate
= instr
->dest
.saturate
;
1116 inst
= emit(RNDD(dst
, op
[0]));
1117 inst
->saturate
= instr
->dest
.saturate
;
1121 inst
= emit(FRC(dst
, op
[0]));
1122 inst
->saturate
= instr
->dest
.saturate
;
1125 case nir_op_fround_even
:
1126 inst
= emit(RNDE(dst
, op
[0]));
1127 inst
->saturate
= instr
->dest
.saturate
;
1133 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1134 inst
->saturate
= instr
->dest
.saturate
;
1140 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1141 inst
->saturate
= instr
->dest
.saturate
;
1145 case nir_op_fddx_coarse
:
1146 case nir_op_fddx_fine
:
1148 case nir_op_fddy_coarse
:
1149 case nir_op_fddy_fine
:
1150 unreachable("derivatives are not valid in vertex shaders");
1162 emit(CMP(dst
, op
[0], op
[1],
1163 brw_conditional_for_nir_comparison(instr
->op
)));
1166 case nir_op_ball_fequal2
:
1167 case nir_op_ball_iequal2
:
1168 case nir_op_ball_fequal3
:
1169 case nir_op_ball_iequal3
:
1170 case nir_op_ball_fequal4
:
1171 case nir_op_ball_iequal4
: {
1173 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1175 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1176 brw_conditional_for_nir_comparison(instr
->op
)));
1177 emit(MOV(dst
, brw_imm_d(0)));
1178 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1179 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1183 case nir_op_bany_fnequal2
:
1184 case nir_op_bany_inequal2
:
1185 case nir_op_bany_fnequal3
:
1186 case nir_op_bany_inequal3
:
1187 case nir_op_bany_fnequal4
:
1188 case nir_op_bany_inequal4
: {
1190 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1192 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1193 brw_conditional_for_nir_comparison(instr
->op
)));
1195 emit(MOV(dst
, brw_imm_d(0)));
1196 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1197 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1202 if (devinfo
->gen
>= 8) {
1203 op
[0] = resolve_source_modifiers(op
[0]);
1205 emit(NOT(dst
, op
[0]));
1209 if (devinfo
->gen
>= 8) {
1210 op
[0] = resolve_source_modifiers(op
[0]);
1211 op
[1] = resolve_source_modifiers(op
[1]);
1213 emit(XOR(dst
, op
[0], op
[1]));
1217 if (devinfo
->gen
>= 8) {
1218 op
[0] = resolve_source_modifiers(op
[0]);
1219 op
[1] = resolve_source_modifiers(op
[1]);
1221 emit(OR(dst
, op
[0], op
[1]));
1225 if (devinfo
->gen
>= 8) {
1226 op
[0] = resolve_source_modifiers(op
[0]);
1227 op
[1] = resolve_source_modifiers(op
[1]);
1229 emit(AND(dst
, op
[0], op
[1]));
1234 emit(MOV(dst
, negate(op
[0])));
1238 emit(CMP(dst
, op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1242 emit(CMP(dst
, op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1245 case nir_op_fnoise1_1
:
1246 case nir_op_fnoise1_2
:
1247 case nir_op_fnoise1_3
:
1248 case nir_op_fnoise1_4
:
1249 case nir_op_fnoise2_1
:
1250 case nir_op_fnoise2_2
:
1251 case nir_op_fnoise2_3
:
1252 case nir_op_fnoise2_4
:
1253 case nir_op_fnoise3_1
:
1254 case nir_op_fnoise3_2
:
1255 case nir_op_fnoise3_3
:
1256 case nir_op_fnoise3_4
:
1257 case nir_op_fnoise4_1
:
1258 case nir_op_fnoise4_2
:
1259 case nir_op_fnoise4_3
:
1260 case nir_op_fnoise4_4
:
1261 unreachable("not reached: should be handled by lower_noise");
1263 case nir_op_unpack_half_2x16_split_x
:
1264 case nir_op_unpack_half_2x16_split_y
:
1265 case nir_op_pack_half_2x16_split
:
1266 unreachable("not reached: should not occur in vertex shader");
1268 case nir_op_unpack_snorm_2x16
:
1269 case nir_op_unpack_unorm_2x16
:
1270 case nir_op_pack_snorm_2x16
:
1271 case nir_op_pack_unorm_2x16
:
1272 unreachable("not reached: should be handled by lower_packing_builtins");
1274 case nir_op_unpack_half_2x16
:
1275 /* As NIR does not guarantee that we have a correct swizzle outside the
1276 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1277 * uses the source operand in an operation with WRITEMASK_Y while our
1278 * source operand has only size 1, it accessed incorrect data producing
1279 * regressions in Piglit. We repeat the swizzle of the first component on the
1280 * rest of components to avoid regressions. In the vec4_visitor IR code path
1281 * this is not needed because the operand has already the correct swizzle.
1283 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1284 emit_unpack_half_2x16(dst
, op
[0]);
1287 case nir_op_pack_half_2x16
:
1288 emit_pack_half_2x16(dst
, op
[0]);
1291 case nir_op_unpack_unorm_4x8
:
1292 emit_unpack_unorm_4x8(dst
, op
[0]);
1295 case nir_op_pack_unorm_4x8
:
1296 emit_pack_unorm_4x8(dst
, op
[0]);
1299 case nir_op_unpack_snorm_4x8
:
1300 emit_unpack_snorm_4x8(dst
, op
[0]);
1303 case nir_op_pack_snorm_4x8
:
1304 emit_pack_snorm_4x8(dst
, op
[0]);
1307 case nir_op_bitfield_reverse
:
1308 emit(BFREV(dst
, op
[0]));
1311 case nir_op_bit_count
:
1312 emit(CBIT(dst
, op
[0]));
1315 case nir_op_ufind_msb
:
1316 case nir_op_ifind_msb
: {
1317 emit(FBH(retype(dst
, BRW_REGISTER_TYPE_UD
), op
[0]));
1319 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1320 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1321 * subtract the result from 31 to convert the MSB count into an LSB count.
1324 emit(CMP(dst_null_d(), src
, brw_imm_d(-1), BRW_CONDITIONAL_NZ
));
1326 inst
= emit(ADD(dst
, src
, brw_imm_d(31)));
1327 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1328 inst
->src
[0].negate
= true;
1332 case nir_op_find_lsb
:
1333 emit(FBL(dst
, op
[0]));
1336 case nir_op_ubitfield_extract
:
1337 case nir_op_ibitfield_extract
:
1338 op
[0] = fix_3src_operand(op
[0]);
1339 op
[1] = fix_3src_operand(op
[1]);
1340 op
[2] = fix_3src_operand(op
[2]);
1342 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1346 emit(BFI1(dst
, op
[0], op
[1]));
1350 op
[0] = fix_3src_operand(op
[0]);
1351 op
[1] = fix_3src_operand(op
[1]);
1352 op
[2] = fix_3src_operand(op
[2]);
1354 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1357 case nir_op_bitfield_insert
:
1358 unreachable("not reached: should be handled by "
1359 "lower_instructions::bitfield_insert_to_bfm_bfi");
1362 /* AND(val, 0x80000000) gives the sign bit.
1364 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1367 emit(CMP(dst_null_f(), op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1369 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1370 dst
.type
= BRW_REGISTER_TYPE_UD
;
1371 emit(AND(dst
, op
[0], brw_imm_ud(0x80000000u
)));
1373 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_ud(0x3f800000u
)));
1374 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1375 dst
.type
= BRW_REGISTER_TYPE_F
;
1377 if (instr
->dest
.saturate
) {
1378 inst
= emit(MOV(dst
, src_reg(dst
)));
1379 inst
->saturate
= true;
1384 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1385 * -> non-negative val generates 0x00000000.
1386 * Predicated OR sets 1 if val is positive.
1388 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_G
));
1389 emit(ASR(dst
, op
[0], brw_imm_d(31)));
1390 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_d(1)));
1391 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1395 emit(SHL(dst
, op
[0], op
[1]));
1399 emit(ASR(dst
, op
[0], op
[1]));
1403 emit(SHR(dst
, op
[0], op
[1]));
1407 op
[0] = fix_3src_operand(op
[0]);
1408 op
[1] = fix_3src_operand(op
[1]);
1409 op
[2] = fix_3src_operand(op
[2]);
1411 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1412 inst
->saturate
= instr
->dest
.saturate
;
1416 inst
= emit_lrp(dst
, op
[0], op
[1], op
[2]);
1417 inst
->saturate
= instr
->dest
.saturate
;
1421 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1422 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1423 switch (dst
.writemask
) {
1425 inst
->predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1428 inst
->predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1431 inst
->predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1434 inst
->predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1437 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1442 case nir_op_fdot_replicated2
:
1443 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1444 inst
->saturate
= instr
->dest
.saturate
;
1447 case nir_op_fdot_replicated3
:
1448 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1449 inst
->saturate
= instr
->dest
.saturate
;
1452 case nir_op_fdot_replicated4
:
1453 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1454 inst
->saturate
= instr
->dest
.saturate
;
1457 case nir_op_fdph_replicated
:
1458 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1459 inst
->saturate
= instr
->dest
.saturate
;
1464 case nir_op_bany4
: {
1466 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1468 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), brw_imm_d(0),
1469 BRW_CONDITIONAL_NZ
));
1470 emit(MOV(dst
, brw_imm_d(0)));
1471 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1472 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1481 unreachable("not reached: should be lowered by lower_source mods");
1484 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1487 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1491 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1494 unreachable("Unimplemented ALU operation");
1497 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1498 * to sign extend the low bit to 0/~0
1500 if (devinfo
->gen
<= 5 &&
1501 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1502 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1503 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1504 masked
.writemask
= dst
.writemask
;
1505 emit(AND(masked
, src_reg(dst
), brw_imm_d(1)));
1506 src_reg masked_neg
= src_reg(masked
);
1507 masked_neg
.negate
= true;
1508 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1513 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1515 switch (instr
->type
) {
1516 case nir_jump_break
:
1517 emit(BRW_OPCODE_BREAK
);
1520 case nir_jump_continue
:
1521 emit(BRW_OPCODE_CONTINUE
);
1524 case nir_jump_return
:
1527 unreachable("unknown jump");
1531 enum ir_texture_opcode
1532 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1534 enum ir_texture_opcode op
;
1537 case nir_texop_lod
: op
= ir_lod
; break;
1538 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1539 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1540 case nir_texop_tex
: op
= ir_tex
; break;
1541 case nir_texop_tg4
: op
= ir_tg4
; break;
1542 case nir_texop_txb
: op
= ir_txb
; break;
1543 case nir_texop_txd
: op
= ir_txd
; break;
1544 case nir_texop_txf
: op
= ir_txf
; break;
1545 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1546 case nir_texop_txl
: op
= ir_txl
; break;
1547 case nir_texop_txs
: op
= ir_txs
; break;
1548 case nir_texop_samples_identical
: op
= ir_samples_identical
; break;
1550 unreachable("unknown texture opcode");
1556 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1557 unsigned components
)
1560 case nir_type_float
:
1561 return glsl_type::vec(components
);
1563 return glsl_type::ivec(components
);
1564 case nir_type_unsigned
:
1565 return glsl_type::uvec(components
);
1567 return glsl_type::bvec(components
);
1569 return glsl_type::error_type
;
1572 return glsl_type::error_type
;
1576 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1578 unsigned sampler
= instr
->sampler_index
;
1579 src_reg sampler_reg
= brw_imm_ud(sampler
);
1581 const glsl_type
*coord_type
= NULL
;
1582 src_reg shadow_comparitor
;
1583 src_reg offset_value
;
1585 src_reg sample_index
;
1588 const glsl_type
*dest_type
=
1589 glsl_type_for_nir_alu_type(instr
->dest_type
,
1590 nir_tex_instr_dest_size(instr
));
1591 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
1593 /* When tg4 is used with the degenerate ZERO/ONE swizzles, don't bother
1594 * emitting anything other than setting up the constant result.
1596 if (instr
->op
== nir_texop_tg4
) {
1597 int swiz
= GET_SWZ(key_tex
->swizzles
[sampler
], instr
->component
);
1598 if (swiz
== SWIZZLE_ZERO
|| swiz
== SWIZZLE_ONE
) {
1599 emit(MOV(dest
, brw_imm_f(swiz
== SWIZZLE_ONE
? 1.0f
: 0.0f
)));
1604 /* Load the texture operation sources */
1605 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1606 switch (instr
->src
[i
].src_type
) {
1607 case nir_tex_src_comparitor
:
1608 shadow_comparitor
= get_nir_src(instr
->src
[i
].src
,
1609 BRW_REGISTER_TYPE_F
, 1);
1612 case nir_tex_src_coord
: {
1613 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
1615 switch (instr
->op
) {
1617 case nir_texop_txf_ms
:
1618 case nir_texop_samples_identical
:
1619 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
1621 coord_type
= glsl_type::ivec(src_size
);
1625 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1627 coord_type
= glsl_type::vec(src_size
);
1633 case nir_tex_src_ddx
:
1634 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1635 nir_tex_instr_src_size(instr
, i
));
1638 case nir_tex_src_ddy
:
1639 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1640 nir_tex_instr_src_size(instr
, i
));
1643 case nir_tex_src_lod
:
1644 switch (instr
->op
) {
1647 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1651 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
1656 case nir_tex_src_ms_index
: {
1657 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
1661 case nir_tex_src_offset
:
1662 offset_value
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
1665 case nir_tex_src_sampler_offset
: {
1666 /* The highest sampler which may be used by this operation is
1667 * the last element of the array. Mark it here, because the generator
1668 * doesn't have enough information to determine the bound.
1670 uint32_t array_size
= instr
->sampler_array_size
;
1671 uint32_t max_used
= sampler
+ array_size
- 1;
1672 if (instr
->op
== nir_texop_tg4
) {
1673 max_used
+= prog_data
->base
.binding_table
.gather_texture_start
;
1675 max_used
+= prog_data
->base
.binding_table
.texture_start
;
1678 brw_mark_surface_used(&prog_data
->base
, max_used
);
1680 /* Emit code to evaluate the actual indexing expression */
1681 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
1682 src_reg
temp(this, glsl_type::uint_type
);
1683 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(sampler
)));
1684 sampler_reg
= emit_uniformize(temp
);
1688 case nir_tex_src_projector
:
1689 unreachable("Should be lowered by do_lower_texture_projection");
1691 case nir_tex_src_bias
:
1692 unreachable("LOD bias is not valid for vertex shaders.\n");
1695 unreachable("unknown texture source");
1699 if (instr
->op
== nir_texop_txf_ms
||
1700 instr
->op
== nir_texop_samples_identical
) {
1701 assert(coord_type
!= NULL
);
1702 if (devinfo
->gen
>= 7 &&
1703 key_tex
->compressed_multisample_layout_mask
& (1 << sampler
)) {
1704 mcs
= emit_mcs_fetch(coord_type
, coordinate
, sampler_reg
);
1706 mcs
= brw_imm_ud(0u);
1710 uint32_t constant_offset
= 0;
1711 for (unsigned i
= 0; i
< 3; i
++) {
1712 if (instr
->const_offset
[i
] != 0) {
1713 constant_offset
= brw_texture_offset(instr
->const_offset
, 3);
1718 /* Stuff the channel select bits in the top of the texture offset */
1719 if (instr
->op
== nir_texop_tg4
)
1720 constant_offset
|= gather_channel(instr
->component
, sampler
) << 16;
1722 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
1724 bool is_cube_array
=
1725 instr
->op
== nir_texop_txs
&&
1726 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1729 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
1731 lod
, lod2
, sample_index
,
1732 constant_offset
, offset_value
,
1733 mcs
, is_cube_array
, sampler
, sampler_reg
);
1737 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
1739 nir_ssa_values
[instr
->def
.index
] = dst_reg(VGRF
, alloc
.allocate(1));