2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
30 using namespace brw::surface_access
;
35 vec4_visitor::emit_nir_code()
37 if (nir
->num_uniforms
> 0)
40 nir_emit_impl(nir_shader_get_entrypoint((nir_shader
*)nir
));
44 vec4_visitor::nir_setup_uniforms()
46 uniforms
= nir
->num_uniforms
/ 16;
50 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
52 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
53 for (unsigned i
= 0; i
< impl
->reg_alloc
; i
++) {
54 nir_locals
[i
] = dst_reg();
57 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
58 unsigned array_elems
=
59 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
60 const unsigned num_regs
= array_elems
* DIV_ROUND_UP(reg
->bit_size
, 32);
61 nir_locals
[reg
->index
] = dst_reg(VGRF
, alloc
.allocate(num_regs
));
63 if (reg
->bit_size
== 64)
64 nir_locals
[reg
->index
].type
= BRW_REGISTER_TYPE_DF
;
67 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
69 nir_emit_cf_list(&impl
->body
);
73 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
75 exec_list_validate(list
);
76 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
79 nir_emit_if(nir_cf_node_as_if(node
));
82 case nir_cf_node_loop
:
83 nir_emit_loop(nir_cf_node_as_loop(node
));
86 case nir_cf_node_block
:
87 nir_emit_block(nir_cf_node_as_block(node
));
91 unreachable("Invalid CFG node block");
97 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
99 /* First, put the condition in f0 */
100 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
101 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
102 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
104 /* We can just predicate based on the X channel, as the condition only
105 * goes on its own line */
106 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X
));
108 nir_emit_cf_list(&if_stmt
->then_list
);
110 /* note: if the else is empty, dead CF elimination will remove it */
111 emit(BRW_OPCODE_ELSE
);
113 nir_emit_cf_list(&if_stmt
->else_list
);
115 emit(BRW_OPCODE_ENDIF
);
119 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
123 nir_emit_cf_list(&loop
->body
);
125 emit(BRW_OPCODE_WHILE
);
129 vec4_visitor::nir_emit_block(nir_block
*block
)
131 nir_foreach_instr(instr
, block
) {
132 nir_emit_instr(instr
);
137 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
141 switch (instr
->type
) {
142 case nir_instr_type_load_const
:
143 nir_emit_load_const(nir_instr_as_load_const(instr
));
146 case nir_instr_type_intrinsic
:
147 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
150 case nir_instr_type_alu
:
151 nir_emit_alu(nir_instr_as_alu(instr
));
154 case nir_instr_type_jump
:
155 nir_emit_jump(nir_instr_as_jump(instr
));
158 case nir_instr_type_tex
:
159 nir_emit_texture(nir_instr_as_tex(instr
));
162 case nir_instr_type_ssa_undef
:
163 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
167 unreachable("VS instruction not yet implemented by NIR->vec4");
172 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
173 unsigned base_offset
, nir_src
*indirect
)
177 reg
= v
->nir_locals
[nir_reg
->index
];
178 if (nir_reg
->bit_size
== 64)
179 reg
.type
= BRW_REGISTER_TYPE_DF
;
180 reg
= offset(reg
, 8, base_offset
);
183 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
191 vec4_visitor::get_nir_dest(const nir_dest
&dest
)
195 dst_reg(VGRF
, alloc
.allocate(DIV_ROUND_UP(dest
.ssa
.bit_size
, 32)));
196 if (dest
.ssa
.bit_size
== 64)
197 dst
.type
= BRW_REGISTER_TYPE_DF
;
198 nir_ssa_values
[dest
.ssa
.index
] = dst
;
201 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
207 vec4_visitor::get_nir_dest(const nir_dest
&dest
, enum brw_reg_type type
)
209 return retype(get_nir_dest(dest
), type
);
213 vec4_visitor::get_nir_dest(const nir_dest
&dest
, nir_alu_type type
)
215 return get_nir_dest(dest
, brw_type_for_nir_type(devinfo
, type
));
219 vec4_visitor::get_nir_src(const nir_src
&src
, enum brw_reg_type type
,
220 unsigned num_components
)
225 assert(src
.ssa
!= NULL
);
226 reg
= nir_ssa_values
[src
.ssa
->index
];
229 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
233 reg
= retype(reg
, type
);
235 src_reg reg_as_src
= src_reg(reg
);
236 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
241 vec4_visitor::get_nir_src(const nir_src
&src
, nir_alu_type type
,
242 unsigned num_components
)
244 return get_nir_src(src
, brw_type_for_nir_type(devinfo
, type
),
249 vec4_visitor::get_nir_src(const nir_src
&src
, unsigned num_components
)
251 /* if type is not specified, default to signed int */
252 return get_nir_src(src
, nir_type_int32
, num_components
);
256 vec4_visitor::get_nir_src_imm(const nir_src
&src
)
258 assert(nir_src_num_components(src
) == 1);
259 assert(nir_src_bit_size(src
) == 32);
260 return nir_src_is_const(src
) ? src_reg(brw_imm_d(nir_src_as_int(src
))) :
265 vec4_visitor::get_indirect_offset(nir_intrinsic_instr
*instr
)
267 nir_src
*offset_src
= nir_get_io_offset_src(instr
);
269 if (nir_src_is_const(*offset_src
)) {
270 /* The only constant offset we should find is 0. brw_nir.c's
271 * add_const_offset_to_base() will fold other constant offsets
272 * into instr->const_index[0].
274 assert(nir_src_as_uint(*offset_src
) == 0);
278 return get_nir_src(*offset_src
, BRW_REGISTER_TYPE_UD
, 1);
282 setup_imm_df(const vec4_builder
&bld
, double v
)
284 const gen_device_info
*devinfo
= bld
.shader
->devinfo
;
285 assert(devinfo
->gen
>= 7);
287 if (devinfo
->gen
>= 8)
288 return brw_imm_df(v
);
290 /* gen7.5 does not support DF immediates straighforward but the DIM
291 * instruction allows to set the 64-bit immediate value.
293 if (devinfo
->is_haswell
) {
294 const vec4_builder ubld
= bld
.exec_all();
295 const dst_reg dst
= bld
.vgrf(BRW_REGISTER_TYPE_DF
);
296 ubld
.DIM(dst
, brw_imm_df(v
));
297 return swizzle(src_reg(dst
), BRW_SWIZZLE_XXXX
);
300 /* gen7 does not support DF immediates */
311 /* Write the low 32-bit of the constant to the X:UD channel and the
312 * high 32-bit to the Y:UD channel to build the constant in a VGRF.
313 * We have to do this twice (offset 0 and offset 1), since a DF VGRF takes
314 * two SIMD8 registers in SIMD4x2 execution. Finally, return a swizzle
315 * XXXX so any access to the VGRF only reads the constant data in these
318 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
319 for (unsigned n
= 0; n
< 2; n
++) {
320 const vec4_builder ubld
= bld
.exec_all().group(4, n
);
321 ubld
.MOV(writemask(offset(tmp
, 8, n
), WRITEMASK_X
), brw_imm_ud(di
.i1
));
322 ubld
.MOV(writemask(offset(tmp
, 8, n
), WRITEMASK_Y
), brw_imm_ud(di
.i2
));
325 return swizzle(src_reg(retype(tmp
, BRW_REGISTER_TYPE_DF
)), BRW_SWIZZLE_XXXX
);
329 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
333 if (instr
->def
.bit_size
== 64) {
334 reg
= dst_reg(VGRF
, alloc
.allocate(2));
335 reg
.type
= BRW_REGISTER_TYPE_DF
;
337 reg
= dst_reg(VGRF
, alloc
.allocate(1));
338 reg
.type
= BRW_REGISTER_TYPE_D
;
341 const vec4_builder ibld
= vec4_builder(this).at_end();
342 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
344 /* @FIXME: consider emitting vector operations to save some MOVs in
345 * cases where the components are representable in 8 bits.
346 * For now, we emit a MOV for each distinct value.
348 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
349 unsigned writemask
= 1 << i
;
351 if ((remaining
& writemask
) == 0)
354 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
355 if ((instr
->def
.bit_size
== 32 &&
356 instr
->value
.u32
[i
] == instr
->value
.u32
[j
]) ||
357 (instr
->def
.bit_size
== 64 &&
358 instr
->value
.f64
[i
] == instr
->value
.f64
[j
])) {
363 reg
.writemask
= writemask
;
364 if (instr
->def
.bit_size
== 64) {
365 emit(MOV(reg
, setup_imm_df(ibld
, instr
->value
.f64
[i
])));
367 emit(MOV(reg
, brw_imm_d(instr
->value
.i32
[i
])));
370 remaining
&= ~writemask
;
373 /* Set final writemask */
374 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
376 nir_ssa_values
[instr
->def
.index
] = reg
;
380 vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr
*instr
)
382 /* SSBO stores are weird in that their index is in src[1] */
383 const unsigned src
= instr
->intrinsic
== nir_intrinsic_store_ssbo
? 1 : 0;
386 if (nir_src_is_const(instr
->src
[src
])) {
387 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
388 nir_src_as_uint(instr
->src
[src
]);
389 surf_index
= brw_imm_ud(index
);
390 brw_mark_surface_used(&prog_data
->base
, index
);
392 surf_index
= src_reg(this, glsl_type::uint_type
);
393 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[src
], 1),
394 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
395 surf_index
= emit_uniformize(surf_index
);
397 brw_mark_surface_used(&prog_data
->base
,
398 prog_data
->base
.binding_table
.ssbo_start
+
399 nir
->info
.num_ssbos
- 1);
406 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
411 switch (instr
->intrinsic
) {
413 case nir_intrinsic_load_input
: {
414 /* We set EmitNoIndirectInput for VS */
415 unsigned load_offset
= nir_src_as_uint(instr
->src
[0]);
417 dest
= get_nir_dest(instr
->dest
);
418 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
420 src
= src_reg(ATTR
, instr
->const_index
[0] + load_offset
,
421 glsl_type::uvec4_type
);
422 src
= retype(src
, dest
.type
);
424 bool is_64bit
= nir_dest_bit_size(instr
->dest
) == 64;
426 dst_reg tmp
= dst_reg(this, glsl_type::dvec4_type
);
427 src
.swizzle
= BRW_SWIZZLE_XYZW
;
428 shuffle_64bit_data(tmp
, src
, false);
429 emit(MOV(dest
, src_reg(tmp
)));
431 /* Swizzle source based on component layout qualifier */
432 src
.swizzle
= BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr
));
433 emit(MOV(dest
, src
));
438 case nir_intrinsic_store_output
: {
439 unsigned store_offset
= nir_src_as_uint(instr
->src
[1]);
440 int varying
= instr
->const_index
[0] + store_offset
;
442 bool is_64bit
= nir_src_bit_size(instr
->src
[0]) == 64;
445 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_DF
,
446 instr
->num_components
);
447 data
= src_reg(this, glsl_type::dvec4_type
);
448 shuffle_64bit_data(dst_reg(data
), src
, true);
449 src
= retype(data
, BRW_REGISTER_TYPE_F
);
451 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
452 instr
->num_components
);
455 unsigned c
= nir_intrinsic_component(instr
);
456 output_reg
[varying
][c
] = dst_reg(src
);
457 output_num_components
[varying
][c
] = instr
->num_components
;
459 unsigned num_components
= instr
->num_components
;
463 output_reg
[varying
][c
] = dst_reg(src
);
464 output_num_components
[varying
][c
] = MIN2(4, num_components
);
466 if (is_64bit
&& num_components
> 4) {
467 assert(num_components
<= 8);
468 output_reg
[varying
+ 1][c
] = byte_offset(dst_reg(src
), REG_SIZE
);
469 output_num_components
[varying
+ 1][c
] = num_components
- 4;
474 case nir_intrinsic_get_buffer_size
: {
475 unsigned ssbo_index
= nir_src_is_const(instr
->src
[0]) ?
476 nir_src_as_uint(instr
->src
[0]) : 0;
478 const unsigned index
=
479 prog_data
->base
.binding_table
.ssbo_start
+ ssbo_index
;
480 dst_reg result_dst
= get_nir_dest(instr
->dest
);
481 vec4_instruction
*inst
= new(mem_ctx
)
482 vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE
, result_dst
);
485 inst
->mlen
= 1; /* always at least one */
486 inst
->src
[1] = brw_imm_ud(index
);
488 /* MRF for the first parameter */
489 src_reg lod
= brw_imm_d(0);
490 int param_base
= inst
->base_mrf
;
491 int writemask
= WRITEMASK_X
;
492 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
496 brw_mark_surface_used(&prog_data
->base
, index
);
500 case nir_intrinsic_store_ssbo
: {
501 assert(devinfo
->gen
>= 7);
503 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
504 assert(nir_src_bit_size(instr
->src
[0]) == 32);
505 assert(nir_intrinsic_write_mask(instr
) ==
506 (1u << instr
->num_components
) - 1);
508 src_reg surf_index
= get_nir_ssbo_intrinsic_index(instr
);
509 src_reg offset_reg
= retype(get_nir_src_imm(instr
->src
[2]),
510 BRW_REGISTER_TYPE_UD
);
513 src_reg val_reg
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
, 4);
515 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
516 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
517 * typed and untyped messages and across hardware platforms, the
518 * current implementation of the untyped messages will transparently convert
519 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
520 * and enabling only channel X on the SEND instruction.
522 * The above, works well for full vector writes, but not for partial writes
523 * where we want to write some channels and not others, like when we have
524 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
525 * quite restrictive with regards to the channel enables we can configure in
526 * the message descriptor (not all combinations are allowed) we cannot simply
527 * implement these scenarios with a single message while keeping the
528 * aforementioned symmetry in the implementation. For now we de decided that
529 * it is better to keep the symmetry to reduce complexity, so in situations
530 * such as the one described we end up emitting two untyped write messages
531 * (one for xy and another for w).
533 * The code below packs consecutive channels into a single write message,
534 * detects gaps in the vector write and if needed, sends a second message
535 * with the remaining channels. If in the future we decide that we want to
536 * emit a single message at the expense of losing the symmetry in the
537 * implementation we can:
539 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
540 * message payload. In this mode we can write up to 8 offsets and dwords
541 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
542 * and select which of the 8 channels carry data to write by setting the
543 * appropriate writemask in the dst register of the SEND instruction.
544 * It would require to write a new generator opcode specifically for
545 * IvyBridge since we would need to prepare a SIMD8 payload that could
546 * use any channel, not just X.
548 * 2) For Haswell+: Simply send a single write message but set the writemask
549 * on the dst of the SEND instruction to select the channels we want to
550 * write. It would require to modify the current messages to receive
551 * and honor the writemask provided.
553 const vec4_builder bld
= vec4_builder(this).at_end()
554 .annotate(current_annotation
, base_ir
);
556 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
557 1 /* dims */, instr
->num_components
/* size */,
562 case nir_intrinsic_load_ssbo
: {
563 assert(devinfo
->gen
>= 7);
565 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
566 assert(nir_dest_bit_size(instr
->dest
) == 32);
568 src_reg surf_index
= get_nir_ssbo_intrinsic_index(instr
);
569 src_reg offset_reg
= retype(get_nir_src_imm(instr
->src
[1]),
570 BRW_REGISTER_TYPE_UD
);
572 /* Read the vector */
573 const vec4_builder bld
= vec4_builder(this).at_end()
574 .annotate(current_annotation
, base_ir
);
576 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
577 1 /* dims */, 4 /* size*/,
579 dst_reg dest
= get_nir_dest(instr
->dest
);
580 read_result
.type
= dest
.type
;
581 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
582 emit(MOV(dest
, read_result
));
586 case nir_intrinsic_ssbo_atomic_add
: {
587 int op
= BRW_AOP_ADD
;
589 if (nir_src_is_const(instr
->src
[2])) {
590 int add_val
= nir_src_as_int(instr
->src
[2]);
593 else if (add_val
== -1)
597 nir_emit_ssbo_atomic(op
, instr
);
600 case nir_intrinsic_ssbo_atomic_imin
:
601 nir_emit_ssbo_atomic(BRW_AOP_IMIN
, instr
);
603 case nir_intrinsic_ssbo_atomic_umin
:
604 nir_emit_ssbo_atomic(BRW_AOP_UMIN
, instr
);
606 case nir_intrinsic_ssbo_atomic_imax
:
607 nir_emit_ssbo_atomic(BRW_AOP_IMAX
, instr
);
609 case nir_intrinsic_ssbo_atomic_umax
:
610 nir_emit_ssbo_atomic(BRW_AOP_UMAX
, instr
);
612 case nir_intrinsic_ssbo_atomic_and
:
613 nir_emit_ssbo_atomic(BRW_AOP_AND
, instr
);
615 case nir_intrinsic_ssbo_atomic_or
:
616 nir_emit_ssbo_atomic(BRW_AOP_OR
, instr
);
618 case nir_intrinsic_ssbo_atomic_xor
:
619 nir_emit_ssbo_atomic(BRW_AOP_XOR
, instr
);
621 case nir_intrinsic_ssbo_atomic_exchange
:
622 nir_emit_ssbo_atomic(BRW_AOP_MOV
, instr
);
624 case nir_intrinsic_ssbo_atomic_comp_swap
:
625 nir_emit_ssbo_atomic(BRW_AOP_CMPWR
, instr
);
628 case nir_intrinsic_load_vertex_id
:
629 unreachable("should be lowered by lower_vertex_id()");
631 case nir_intrinsic_load_vertex_id_zero_base
:
632 case nir_intrinsic_load_base_vertex
:
633 case nir_intrinsic_load_instance_id
:
634 case nir_intrinsic_load_base_instance
:
635 case nir_intrinsic_load_draw_id
:
636 case nir_intrinsic_load_invocation_id
:
637 unreachable("should be lowered by brw_nir_lower_vs_inputs()");
639 case nir_intrinsic_load_uniform
: {
640 /* Offsets are in bytes but they should always be multiples of 4 */
641 assert(nir_intrinsic_base(instr
) % 4 == 0);
643 dest
= get_nir_dest(instr
->dest
);
645 src
= src_reg(dst_reg(UNIFORM
, nir_intrinsic_base(instr
) / 16));
646 src
.type
= dest
.type
;
648 /* Uniforms don't actually have to be vec4 aligned. In the case that
649 * it isn't, we have to use a swizzle to shift things around. They
650 * do still have the std140 alignment requirement that vec2's have to
651 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
653 * The swizzle also works in the indirect case as the generator adds
654 * the swizzle to the offset for us.
656 const int type_size
= type_sz(src
.type
);
657 unsigned shift
= (nir_intrinsic_base(instr
) % 16) / type_size
;
658 assert(shift
+ instr
->num_components
<= 4);
660 if (nir_src_is_const(instr
->src
[0])) {
661 const unsigned load_offset
= nir_src_as_uint(instr
->src
[0]);
662 /* Offsets are in bytes but they should always be multiples of 4 */
663 assert(load_offset
% 4 == 0);
665 src
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
666 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
667 unsigned offset
= load_offset
+ shift
* type_size
;
668 src
.offset
= ROUND_DOWN_TO(offset
, 16);
669 shift
= (offset
% 16) / type_size
;
670 assert(shift
+ instr
->num_components
<= 4);
671 src
.swizzle
+= BRW_SWIZZLE4(shift
, shift
, shift
, shift
);
673 emit(MOV(dest
, src
));
675 /* Uniform arrays are vec4 aligned, because of std140 alignment
680 src_reg indirect
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_UD
, 1);
682 /* MOV_INDIRECT is going to stomp the whole thing anyway */
683 dest
.writemask
= WRITEMASK_XYZW
;
685 emit(SHADER_OPCODE_MOV_INDIRECT
, dest
, src
,
686 indirect
, brw_imm_ud(instr
->const_index
[1]));
691 case nir_intrinsic_load_ubo
: {
694 dest
= get_nir_dest(instr
->dest
);
696 if (nir_src_is_const(instr
->src
[0])) {
697 /* The block index is a constant, so just emit the binding table entry
700 const unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
701 nir_src_as_uint(instr
->src
[0]);
702 surf_index
= brw_imm_ud(index
);
703 brw_mark_surface_used(&prog_data
->base
, index
);
705 /* The block index is not a constant. Evaluate the index expression
706 * per-channel and add the base UBO index; we have to select a value
707 * from any live channel.
709 surf_index
= src_reg(this, glsl_type::uint_type
);
710 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int32
,
711 instr
->num_components
),
712 brw_imm_ud(prog_data
->base
.binding_table
.ubo_start
)));
713 surf_index
= emit_uniformize(surf_index
);
715 /* Assume this may touch any UBO. It would be nice to provide
716 * a tighter bound, but the array information is already lowered away.
718 brw_mark_surface_used(&prog_data
->base
,
719 prog_data
->base
.binding_table
.ubo_start
+
720 nir
->info
.num_ubos
- 1);
724 if (nir_src_is_const(instr
->src
[1])) {
725 unsigned load_offset
= nir_src_as_uint(instr
->src
[1]);
726 offset_reg
= brw_imm_ud(load_offset
& ~15);
728 offset_reg
= src_reg(this, glsl_type::uint_type
);
729 emit(MOV(dst_reg(offset_reg
),
730 get_nir_src(instr
->src
[1], nir_type_uint32
, 1)));
733 src_reg packed_consts
;
734 if (nir_dest_bit_size(instr
->dest
) == 32) {
735 packed_consts
= src_reg(this, glsl_type::vec4_type
);
736 emit_pull_constant_load_reg(dst_reg(packed_consts
),
739 NULL
, NULL
/* before_block/inst */);
741 src_reg temp
= src_reg(this, glsl_type::dvec4_type
);
742 src_reg temp_float
= retype(temp
, BRW_REGISTER_TYPE_F
);
744 emit_pull_constant_load_reg(dst_reg(temp_float
),
745 surf_index
, offset_reg
, NULL
, NULL
);
746 if (offset_reg
.file
== IMM
)
749 emit(ADD(dst_reg(offset_reg
), offset_reg
, brw_imm_ud(16u)));
750 emit_pull_constant_load_reg(dst_reg(byte_offset(temp_float
, REG_SIZE
)),
751 surf_index
, offset_reg
, NULL
, NULL
);
753 packed_consts
= src_reg(this, glsl_type::dvec4_type
);
754 shuffle_64bit_data(dst_reg(packed_consts
), temp
, false);
757 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
758 if (nir_src_is_const(instr
->src
[1])) {
759 unsigned load_offset
= nir_src_as_uint(instr
->src
[1]);
760 unsigned type_size
= type_sz(dest
.type
);
761 packed_consts
.swizzle
+=
762 BRW_SWIZZLE4(load_offset
% 16 / type_size
,
763 load_offset
% 16 / type_size
,
764 load_offset
% 16 / type_size
,
765 load_offset
% 16 / type_size
);
768 emit(MOV(dest
, retype(packed_consts
, dest
.type
)));
773 case nir_intrinsic_memory_barrier
: {
774 const vec4_builder bld
=
775 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
776 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
777 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
)
778 ->size_written
= 2 * REG_SIZE
;
782 case nir_intrinsic_shader_clock
: {
783 /* We cannot do anything if there is an event, so ignore it for now */
784 const src_reg shader_clock
= get_timestamp();
785 const enum brw_reg_type type
= brw_type_for_base_type(glsl_type::uvec2_type
);
787 dest
= get_nir_dest(instr
->dest
, type
);
788 emit(MOV(dest
, shader_clock
));
793 unreachable("Unknown intrinsic");
798 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
801 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
802 dest
= get_nir_dest(instr
->dest
);
804 src_reg surface
= get_nir_ssbo_intrinsic_index(instr
);
805 src_reg offset
= get_nir_src(instr
->src
[1], 1);
807 if (op
!= BRW_AOP_INC
&& op
!= BRW_AOP_DEC
&& op
!= BRW_AOP_PREDEC
)
808 data1
= get_nir_src(instr
->src
[2], 1);
810 if (op
== BRW_AOP_CMPWR
)
811 data2
= get_nir_src(instr
->src
[3], 1);
813 /* Emit the actual atomic operation operation */
814 const vec4_builder bld
=
815 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
817 src_reg atomic_result
= emit_untyped_atomic(bld
, surface
, offset
,
819 1 /* dims */, 1 /* rsize */,
822 dest
.type
= atomic_result
.type
;
823 bld
.MOV(dest
, atomic_result
);
827 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
829 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
832 static enum brw_conditional_mod
833 brw_conditional_for_nir_comparison(nir_op op
)
839 return BRW_CONDITIONAL_L
;
844 return BRW_CONDITIONAL_GE
;
848 case nir_op_ball_fequal2
:
849 case nir_op_ball_iequal2
:
850 case nir_op_ball_fequal3
:
851 case nir_op_ball_iequal3
:
852 case nir_op_ball_fequal4
:
853 case nir_op_ball_iequal4
:
854 return BRW_CONDITIONAL_Z
;
858 case nir_op_bany_fnequal2
:
859 case nir_op_bany_inequal2
:
860 case nir_op_bany_fnequal3
:
861 case nir_op_bany_inequal3
:
862 case nir_op_bany_fnequal4
:
863 case nir_op_bany_inequal4
:
864 return BRW_CONDITIONAL_NZ
;
867 unreachable("not reached: bad operation for comparison");
872 vec4_visitor::optimize_predicate(nir_alu_instr
*instr
,
873 enum brw_predicate
*predicate
)
875 if (!instr
->src
[0].src
.is_ssa
||
876 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
879 nir_alu_instr
*cmp_instr
=
880 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
882 switch (cmp_instr
->op
) {
883 case nir_op_bany_fnequal2
:
884 case nir_op_bany_inequal2
:
885 case nir_op_bany_fnequal3
:
886 case nir_op_bany_inequal3
:
887 case nir_op_bany_fnequal4
:
888 case nir_op_bany_inequal4
:
889 *predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
891 case nir_op_ball_fequal2
:
892 case nir_op_ball_iequal2
:
893 case nir_op_ball_fequal3
:
894 case nir_op_ball_iequal3
:
895 case nir_op_ball_fequal4
:
896 case nir_op_ball_iequal4
:
897 *predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
903 unsigned size_swizzle
=
904 brw_swizzle_for_size(nir_op_infos
[cmp_instr
->op
].input_sizes
[0]);
907 assert(nir_op_infos
[cmp_instr
->op
].num_inputs
== 2);
908 for (unsigned i
= 0; i
< 2; i
++) {
909 nir_alu_type type
= nir_op_infos
[cmp_instr
->op
].input_types
[i
];
910 unsigned bit_size
= nir_src_bit_size(cmp_instr
->src
[i
].src
);
911 type
= (nir_alu_type
) (((unsigned) type
) | bit_size
);
912 op
[i
] = get_nir_src(cmp_instr
->src
[i
].src
, type
, 4);
913 unsigned base_swizzle
=
914 brw_swizzle_for_nir_swizzle(cmp_instr
->src
[i
].swizzle
);
915 op
[i
].swizzle
= brw_compose_swizzle(size_swizzle
, base_swizzle
);
916 op
[i
].abs
= cmp_instr
->src
[i
].abs
;
917 op
[i
].negate
= cmp_instr
->src
[i
].negate
;
920 emit(CMP(dst_null_d(), op
[0], op
[1],
921 brw_conditional_for_nir_comparison(cmp_instr
->op
)));
927 emit_find_msb_using_lzd(const vec4_builder
&bld
,
932 vec4_instruction
*inst
;
936 /* LZD of an absolute value source almost always does the right
937 * thing. There are two problem values:
939 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
940 * 0. However, findMSB(int(0x80000000)) == 30.
942 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
943 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
945 * For a value of zero or negative one, -1 will be returned.
947 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
948 * findMSB(-(1<<x)) should return x-1.
950 * For all negative number cases, including 0x80000000 and
951 * 0xffffffff, the correct value is obtained from LZD if instead of
952 * negating the (already negative) value the logical-not is used. A
953 * conditonal logical-not can be achieved in two instructions.
955 temp
= src_reg(bld
.vgrf(BRW_REGISTER_TYPE_D
));
957 bld
.ASR(dst_reg(temp
), src
, brw_imm_d(31));
958 bld
.XOR(dst_reg(temp
), temp
, src
);
961 bld
.LZD(retype(dst
, BRW_REGISTER_TYPE_UD
),
962 retype(temp
, BRW_REGISTER_TYPE_UD
));
964 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
965 * from the LSB side. Subtract the result from 31 to convert the MSB count
966 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
967 * which is exactly what findMSB() is supposed to return.
969 inst
= bld
.ADD(dst
, retype(src_reg(dst
), BRW_REGISTER_TYPE_D
),
971 inst
->src
[0].negate
= true;
975 vec4_visitor::emit_conversion_from_double(dst_reg dst
, src_reg src
,
978 /* BDW PRM vol 15 - workarounds:
979 * DF->f format conversion for Align16 has wrong emask calculation when
980 * source is immediate.
982 if (devinfo
->gen
== 8 && dst
.type
== BRW_REGISTER_TYPE_F
&&
983 src
.file
== BRW_IMMEDIATE_VALUE
) {
984 vec4_instruction
*inst
= emit(MOV(dst
, brw_imm_f(src
.df
)));
985 inst
->saturate
= saturate
;
991 case BRW_REGISTER_TYPE_D
:
992 op
= VEC4_OPCODE_DOUBLE_TO_D32
;
994 case BRW_REGISTER_TYPE_UD
:
995 op
= VEC4_OPCODE_DOUBLE_TO_U32
;
997 case BRW_REGISTER_TYPE_F
:
998 op
= VEC4_OPCODE_DOUBLE_TO_F32
;
1001 unreachable("Unknown conversion");
1004 dst_reg temp
= dst_reg(this, glsl_type::dvec4_type
);
1005 emit(MOV(temp
, src
));
1006 dst_reg temp2
= dst_reg(this, glsl_type::dvec4_type
);
1007 emit(op
, temp2
, src_reg(temp
));
1009 emit(VEC4_OPCODE_PICK_LOW_32BIT
, retype(temp2
, dst
.type
), src_reg(temp2
));
1010 vec4_instruction
*inst
= emit(MOV(dst
, src_reg(retype(temp2
, dst
.type
))));
1011 inst
->saturate
= saturate
;
1015 vec4_visitor::emit_conversion_to_double(dst_reg dst
, src_reg src
,
1018 dst_reg tmp_dst
= dst_reg(src_reg(this, glsl_type::dvec4_type
));
1019 src_reg tmp_src
= retype(src_reg(this, glsl_type::vec4_type
), src
.type
);
1020 emit(MOV(dst_reg(tmp_src
), src
));
1021 emit(VEC4_OPCODE_TO_DOUBLE
, tmp_dst
, tmp_src
);
1022 vec4_instruction
*inst
= emit(MOV(dst
, src_reg(tmp_dst
)));
1023 inst
->saturate
= saturate
;
1027 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
1029 vec4_instruction
*inst
;
1031 nir_alu_type dst_type
= (nir_alu_type
) (nir_op_infos
[instr
->op
].output_type
|
1032 nir_dest_bit_size(instr
->dest
.dest
));
1033 dst_reg dst
= get_nir_dest(instr
->dest
.dest
, dst_type
);
1034 dst
.writemask
= instr
->dest
.write_mask
;
1037 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1038 nir_alu_type src_type
= (nir_alu_type
)
1039 (nir_op_infos
[instr
->op
].input_types
[i
] |
1040 nir_src_bit_size(instr
->src
[i
].src
));
1041 op
[i
] = get_nir_src(instr
->src
[i
].src
, src_type
, 4);
1042 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
1043 op
[i
].abs
= instr
->src
[i
].abs
;
1044 op
[i
].negate
= instr
->src
[i
].negate
;
1047 switch (instr
->op
) {
1050 inst
= emit(MOV(dst
, op
[0]));
1051 inst
->saturate
= instr
->dest
.saturate
;
1057 unreachable("not reached: should be handled by lower_vec_to_movs()");
1061 inst
= emit(MOV(dst
, op
[0]));
1062 inst
->saturate
= instr
->dest
.saturate
;
1068 if (nir_src_bit_size(instr
->src
[0].src
) == 64)
1069 emit_conversion_from_double(dst
, op
[0], instr
->dest
.saturate
);
1071 inst
= emit(MOV(dst
, op
[0]));
1077 emit_conversion_to_double(dst
, op
[0], instr
->dest
.saturate
);
1081 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1084 inst
= emit(ADD(dst
, op
[0], op
[1]));
1085 inst
->saturate
= instr
->dest
.saturate
;
1088 case nir_op_uadd_sat
:
1089 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1090 inst
= emit(ADD(dst
, op
[0], op
[1]));
1091 inst
->saturate
= true;
1095 inst
= emit(MUL(dst
, op
[0], op
[1]));
1096 inst
->saturate
= instr
->dest
.saturate
;
1100 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1101 if (devinfo
->gen
< 8) {
1102 /* For integer multiplication, the MUL uses the low 16 bits of one of
1103 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1104 * accumulates in the contribution of the upper 16 bits of that
1105 * operand. If we can determine that one of the args is in the low
1106 * 16 bits, though, we can just emit a single MUL.
1108 if (nir_src_is_const(instr
->src
[0].src
) &&
1109 nir_alu_instr_src_read_mask(instr
, 0) == 1 &&
1110 nir_src_comp_as_uint(instr
->src
[0].src
, 0) < (1 << 16)) {
1111 if (devinfo
->gen
< 7)
1112 emit(MUL(dst
, op
[0], op
[1]));
1114 emit(MUL(dst
, op
[1], op
[0]));
1115 } else if (nir_src_is_const(instr
->src
[1].src
) &&
1116 nir_alu_instr_src_read_mask(instr
, 1) == 1 &&
1117 nir_src_comp_as_uint(instr
->src
[1].src
, 0) < (1 << 16)) {
1118 if (devinfo
->gen
< 7)
1119 emit(MUL(dst
, op
[1], op
[0]));
1121 emit(MUL(dst
, op
[0], op
[1]));
1123 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1125 emit(MUL(acc
, op
[0], op
[1]));
1126 emit(MACH(dst_null_d(), op
[0], op
[1]));
1127 emit(MOV(dst
, src_reg(acc
)));
1130 emit(MUL(dst
, op
[0], op
[1]));
1135 case nir_op_imul_high
:
1136 case nir_op_umul_high
: {
1137 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1138 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1140 if (devinfo
->gen
>= 8)
1141 emit(MUL(acc
, op
[0], retype(op
[1], BRW_REGISTER_TYPE_UW
)));
1143 emit(MUL(acc
, op
[0], op
[1]));
1145 emit(MACH(dst
, op
[0], op
[1]));
1150 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1151 inst
->saturate
= instr
->dest
.saturate
;
1155 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1156 inst
->saturate
= instr
->dest
.saturate
;
1160 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1161 inst
->saturate
= instr
->dest
.saturate
;
1165 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1166 inst
->saturate
= instr
->dest
.saturate
;
1170 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1171 inst
->saturate
= instr
->dest
.saturate
;
1176 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1177 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1182 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1183 * appears that our hardware just does the right thing for signed
1186 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1187 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1191 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1192 inst
= emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1194 /* Math instructions don't support conditional mod */
1195 inst
= emit(MOV(dst_null_d(), src_reg(dst
)));
1196 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1198 /* Now, we need to determine if signs of the sources are different.
1199 * When we XOR the sources, the top bit is 0 if they are the same and 1
1200 * if they are different. We can then use a conditional modifier to
1201 * turn that into a predicate. This leads us to an XOR.l instruction.
1203 * Technically, according to the PRM, you're not allowed to use .l on a
1204 * XOR instruction. However, emperical experiments and Curro's reading
1205 * of the simulator source both indicate that it's safe.
1207 src_reg tmp
= src_reg(this, glsl_type::ivec4_type
);
1208 inst
= emit(XOR(dst_reg(tmp
), op
[0], op
[1]));
1209 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1210 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1212 /* If the result of the initial remainder operation is non-zero and the
1213 * two sources have different signs, add in a copy of op[1] to get the
1214 * final integer modulus value.
1216 inst
= emit(ADD(dst
, src_reg(dst
), op
[1]));
1217 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1222 unreachable("not reached: should be handled by ldexp_to_arith()");
1225 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1226 inst
->saturate
= instr
->dest
.saturate
;
1230 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1231 inst
->saturate
= instr
->dest
.saturate
;
1235 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1236 inst
->saturate
= instr
->dest
.saturate
;
1239 case nir_op_uadd_carry
: {
1240 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1241 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1243 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1244 emit(MOV(dst
, src_reg(acc
)));
1248 case nir_op_usub_borrow
: {
1249 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1250 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1252 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1253 emit(MOV(dst
, src_reg(acc
)));
1258 inst
= emit(RNDZ(dst
, op
[0]));
1259 inst
->saturate
= instr
->dest
.saturate
;
1262 case nir_op_fceil
: {
1263 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1265 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1266 instr
->src
[0].src
.ssa
->num_components
:
1267 instr
->src
[0].src
.reg
.reg
->num_components
);
1269 op
[0].negate
= !op
[0].negate
;
1270 emit(RNDD(dst_reg(tmp
), op
[0]));
1272 inst
= emit(MOV(dst
, tmp
));
1273 inst
->saturate
= instr
->dest
.saturate
;
1278 inst
= emit(RNDD(dst
, op
[0]));
1279 inst
->saturate
= instr
->dest
.saturate
;
1283 inst
= emit(FRC(dst
, op
[0]));
1284 inst
->saturate
= instr
->dest
.saturate
;
1287 case nir_op_fround_even
:
1288 inst
= emit(RNDE(dst
, op
[0]));
1289 inst
->saturate
= instr
->dest
.saturate
;
1292 case nir_op_fquantize2f16
: {
1293 /* See also vec4_visitor::emit_pack_half_2x16() */
1294 src_reg tmp16
= src_reg(this, glsl_type::uvec4_type
);
1295 src_reg tmp32
= src_reg(this, glsl_type::vec4_type
);
1296 src_reg zero
= src_reg(this, glsl_type::vec4_type
);
1298 /* Check for denormal */
1299 src_reg abs_src0
= op
[0];
1300 abs_src0
.abs
= true;
1301 emit(CMP(dst_null_f(), abs_src0
, brw_imm_f(ldexpf(1.0, -14)),
1302 BRW_CONDITIONAL_L
));
1303 /* Get the appropriately signed zero */
1304 emit(AND(retype(dst_reg(zero
), BRW_REGISTER_TYPE_UD
),
1305 retype(op
[0], BRW_REGISTER_TYPE_UD
),
1306 brw_imm_ud(0x80000000)));
1307 /* Do the actual F32 -> F16 -> F32 conversion */
1308 emit(F32TO16(dst_reg(tmp16
), op
[0]));
1309 emit(F16TO32(dst_reg(tmp32
), tmp16
));
1310 /* Select that or zero based on normal status */
1311 inst
= emit(BRW_OPCODE_SEL
, dst
, zero
, tmp32
);
1312 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1313 inst
->saturate
= instr
->dest
.saturate
;
1319 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1322 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1323 inst
->saturate
= instr
->dest
.saturate
;
1328 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1331 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1332 inst
->saturate
= instr
->dest
.saturate
;
1336 case nir_op_fddx_coarse
:
1337 case nir_op_fddx_fine
:
1339 case nir_op_fddy_coarse
:
1340 case nir_op_fddy_fine
:
1341 unreachable("derivatives are not valid in vertex shaders");
1349 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1355 enum brw_conditional_mod conditional_mod
=
1356 brw_conditional_for_nir_comparison(instr
->op
);
1358 if (nir_src_bit_size(instr
->src
[0].src
) < 64) {
1359 emit(CMP(dst
, op
[0], op
[1], conditional_mod
));
1361 /* Produce a 32-bit boolean result from the DF comparison by selecting
1362 * only the low 32-bit in each DF produced. Do this in a temporary
1363 * so we can then move from there to the result using align16 again
1364 * to honor the original writemask.
1366 dst_reg temp
= dst_reg(this, glsl_type::dvec4_type
);
1367 emit(CMP(temp
, op
[0], op
[1], conditional_mod
));
1368 dst_reg result
= dst_reg(this, glsl_type::bvec4_type
);
1369 emit(VEC4_OPCODE_PICK_LOW_32BIT
, result
, src_reg(temp
));
1370 emit(MOV(dst
, src_reg(result
)));
1375 case nir_op_ball_iequal2
:
1376 case nir_op_ball_iequal3
:
1377 case nir_op_ball_iequal4
:
1378 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1380 case nir_op_ball_fequal2
:
1381 case nir_op_ball_fequal3
:
1382 case nir_op_ball_fequal4
: {
1384 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1386 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1387 brw_conditional_for_nir_comparison(instr
->op
)));
1388 emit(MOV(dst
, brw_imm_d(0)));
1389 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1390 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1394 case nir_op_bany_inequal2
:
1395 case nir_op_bany_inequal3
:
1396 case nir_op_bany_inequal4
:
1397 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1399 case nir_op_bany_fnequal2
:
1400 case nir_op_bany_fnequal3
:
1401 case nir_op_bany_fnequal4
: {
1403 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1405 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1406 brw_conditional_for_nir_comparison(instr
->op
)));
1408 emit(MOV(dst
, brw_imm_d(0)));
1409 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1410 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1415 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1416 if (devinfo
->gen
>= 8) {
1417 op
[0] = resolve_source_modifiers(op
[0]);
1419 emit(NOT(dst
, op
[0]));
1423 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1424 if (devinfo
->gen
>= 8) {
1425 op
[0] = resolve_source_modifiers(op
[0]);
1426 op
[1] = resolve_source_modifiers(op
[1]);
1428 emit(XOR(dst
, op
[0], op
[1]));
1432 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1433 if (devinfo
->gen
>= 8) {
1434 op
[0] = resolve_source_modifiers(op
[0]);
1435 op
[1] = resolve_source_modifiers(op
[1]);
1437 emit(OR(dst
, op
[0], op
[1]));
1441 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1442 if (devinfo
->gen
>= 8) {
1443 op
[0] = resolve_source_modifiers(op
[0]);
1444 op
[1] = resolve_source_modifiers(op
[1]);
1446 emit(AND(dst
, op
[0], op
[1]));
1452 if (nir_dest_bit_size(instr
->dest
.dest
) > 32) {
1453 assert(dst
.type
== BRW_REGISTER_TYPE_DF
);
1454 emit_conversion_to_double(dst
, negate(op
[0]), false);
1456 emit(MOV(dst
, negate(op
[0])));
1461 if (nir_src_bit_size(instr
->src
[0].src
) == 64) {
1462 /* We use a MOV with conditional_mod to check if the provided value is
1463 * 0.0. We want this to flush denormalized numbers to zero, so we set a
1464 * source modifier on the source operand to trigger this, as source
1465 * modifiers don't affect the result of the testing against 0.0.
1467 src_reg value
= op
[0];
1469 vec4_instruction
*inst
= emit(MOV(dst_null_df(), value
));
1470 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1472 src_reg one
= src_reg(this, glsl_type::ivec4_type
);
1473 emit(MOV(dst_reg(one
), brw_imm_d(~0)));
1474 inst
= emit(BRW_OPCODE_SEL
, dst
, one
, brw_imm_d(0));
1475 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1477 emit(CMP(dst
, op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1482 emit(CMP(dst
, op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1485 case nir_op_fnoise1_1
:
1486 case nir_op_fnoise1_2
:
1487 case nir_op_fnoise1_3
:
1488 case nir_op_fnoise1_4
:
1489 case nir_op_fnoise2_1
:
1490 case nir_op_fnoise2_2
:
1491 case nir_op_fnoise2_3
:
1492 case nir_op_fnoise2_4
:
1493 case nir_op_fnoise3_1
:
1494 case nir_op_fnoise3_2
:
1495 case nir_op_fnoise3_3
:
1496 case nir_op_fnoise3_4
:
1497 case nir_op_fnoise4_1
:
1498 case nir_op_fnoise4_2
:
1499 case nir_op_fnoise4_3
:
1500 case nir_op_fnoise4_4
:
1501 unreachable("not reached: should be handled by lower_noise");
1503 case nir_op_unpack_half_2x16_split_x
:
1504 case nir_op_unpack_half_2x16_split_y
:
1505 case nir_op_pack_half_2x16_split
:
1506 unreachable("not reached: should not occur in vertex shader");
1508 case nir_op_unpack_snorm_2x16
:
1509 case nir_op_unpack_unorm_2x16
:
1510 case nir_op_pack_snorm_2x16
:
1511 case nir_op_pack_unorm_2x16
:
1512 unreachable("not reached: should be handled by lower_packing_builtins");
1514 case nir_op_pack_uvec4_to_uint
:
1515 unreachable("not reached");
1517 case nir_op_pack_uvec2_to_uint
: {
1518 dst_reg tmp1
= dst_reg(this, glsl_type::uint_type
);
1519 tmp1
.writemask
= WRITEMASK_X
;
1520 op
[0].swizzle
= BRW_SWIZZLE_YYYY
;
1521 emit(SHL(tmp1
, op
[0], src_reg(brw_imm_ud(16u))));
1523 dst_reg tmp2
= dst_reg(this, glsl_type::uint_type
);
1524 tmp2
.writemask
= WRITEMASK_X
;
1525 op
[0].swizzle
= BRW_SWIZZLE_XXXX
;
1526 emit(AND(tmp2
, op
[0], src_reg(brw_imm_ud(0xffffu
))));
1528 emit(OR(dst
, src_reg(tmp1
), src_reg(tmp2
)));
1532 case nir_op_pack_64_2x32_split
: {
1533 dst_reg result
= dst_reg(this, glsl_type::dvec4_type
);
1534 dst_reg tmp
= dst_reg(this, glsl_type::uvec4_type
);
1535 emit(MOV(tmp
, retype(op
[0], BRW_REGISTER_TYPE_UD
)));
1536 emit(VEC4_OPCODE_SET_LOW_32BIT
, result
, src_reg(tmp
));
1537 emit(MOV(tmp
, retype(op
[1], BRW_REGISTER_TYPE_UD
)));
1538 emit(VEC4_OPCODE_SET_HIGH_32BIT
, result
, src_reg(tmp
));
1539 emit(MOV(dst
, src_reg(result
)));
1543 case nir_op_unpack_64_2x32_split_x
:
1544 case nir_op_unpack_64_2x32_split_y
: {
1545 enum opcode oper
= (instr
->op
== nir_op_unpack_64_2x32_split_x
) ?
1546 VEC4_OPCODE_PICK_LOW_32BIT
: VEC4_OPCODE_PICK_HIGH_32BIT
;
1547 dst_reg tmp
= dst_reg(this, glsl_type::dvec4_type
);
1548 emit(MOV(tmp
, op
[0]));
1549 dst_reg tmp2
= dst_reg(this, glsl_type::uvec4_type
);
1550 emit(oper
, tmp2
, src_reg(tmp
));
1551 emit(MOV(dst
, src_reg(tmp2
)));
1555 case nir_op_unpack_half_2x16
:
1556 /* As NIR does not guarantee that we have a correct swizzle outside the
1557 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1558 * uses the source operand in an operation with WRITEMASK_Y while our
1559 * source operand has only size 1, it accessed incorrect data producing
1560 * regressions in Piglit. We repeat the swizzle of the first component on the
1561 * rest of components to avoid regressions. In the vec4_visitor IR code path
1562 * this is not needed because the operand has already the correct swizzle.
1564 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1565 emit_unpack_half_2x16(dst
, op
[0]);
1568 case nir_op_pack_half_2x16
:
1569 emit_pack_half_2x16(dst
, op
[0]);
1572 case nir_op_unpack_unorm_4x8
:
1573 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1574 emit_unpack_unorm_4x8(dst
, op
[0]);
1577 case nir_op_pack_unorm_4x8
:
1578 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1579 emit_pack_unorm_4x8(dst
, op
[0]);
1582 case nir_op_unpack_snorm_4x8
:
1583 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1584 emit_unpack_snorm_4x8(dst
, op
[0]);
1587 case nir_op_pack_snorm_4x8
:
1588 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1589 emit_pack_snorm_4x8(dst
, op
[0]);
1592 case nir_op_bitfield_reverse
:
1593 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1594 emit(BFREV(dst
, op
[0]));
1597 case nir_op_bit_count
:
1598 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1599 emit(CBIT(dst
, op
[0]));
1602 case nir_op_ufind_msb
:
1603 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1604 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst
, op
[0], false);
1607 case nir_op_ifind_msb
: {
1608 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1609 vec4_builder bld
= vec4_builder(this).at_end();
1612 if (devinfo
->gen
< 7) {
1613 emit_find_msb_using_lzd(bld
, dst
, op
[0], true);
1615 emit(FBH(retype(dst
, BRW_REGISTER_TYPE_UD
), op
[0]));
1617 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1618 * count from the LSB side. If FBH didn't return an error
1619 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1620 * count into an LSB count.
1622 bld
.CMP(dst_null_d(), src
, brw_imm_d(-1), BRW_CONDITIONAL_NZ
);
1624 inst
= bld
.ADD(dst
, src
, brw_imm_d(31));
1625 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1626 inst
->src
[0].negate
= true;
1631 case nir_op_find_lsb
: {
1632 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1633 vec4_builder bld
= vec4_builder(this).at_end();
1635 if (devinfo
->gen
< 7) {
1636 dst_reg temp
= bld
.vgrf(BRW_REGISTER_TYPE_D
);
1638 /* (x & -x) generates a value that consists of only the LSB of x.
1639 * For all powers of 2, findMSB(y) == findLSB(y).
1641 src_reg src
= src_reg(retype(op
[0], BRW_REGISTER_TYPE_D
));
1642 src_reg negated_src
= src
;
1644 /* One must be negated, and the other must be non-negated. It
1645 * doesn't matter which is which.
1647 negated_src
.negate
= true;
1650 bld
.AND(temp
, src
, negated_src
);
1651 emit_find_msb_using_lzd(bld
, dst
, src_reg(temp
), false);
1653 bld
.FBL(dst
, op
[0]);
1658 case nir_op_ubitfield_extract
:
1659 case nir_op_ibitfield_extract
:
1660 unreachable("should have been lowered");
1663 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1664 op
[0] = fix_3src_operand(op
[0]);
1665 op
[1] = fix_3src_operand(op
[1]);
1666 op
[2] = fix_3src_operand(op
[2]);
1668 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1672 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1673 emit(BFI1(dst
, op
[0], op
[1]));
1677 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1678 op
[0] = fix_3src_operand(op
[0]);
1679 op
[1] = fix_3src_operand(op
[1]);
1680 op
[2] = fix_3src_operand(op
[2]);
1682 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1685 case nir_op_bitfield_insert
:
1686 unreachable("not reached: should have been lowered");
1689 assert(!instr
->dest
.saturate
);
1691 /* Straightforward since the source can be assumed to be either
1692 * strictly >= 0 or strictly <= 0 depending on the setting of the
1695 inst
= emit(MOV(dst
, op
[0]));
1696 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1698 inst
= (op
[0].negate
)
1699 ? emit(MOV(dst
, brw_imm_f(-1.0f
)))
1700 : emit(MOV(dst
, brw_imm_f(1.0f
)));
1701 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1702 } else if (type_sz(op
[0].type
) < 8) {
1703 /* AND(val, 0x80000000) gives the sign bit.
1705 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1708 emit(CMP(dst_null_f(), op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1710 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1711 dst
.type
= BRW_REGISTER_TYPE_UD
;
1712 emit(AND(dst
, op
[0], brw_imm_ud(0x80000000u
)));
1714 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_ud(0x3f800000u
)));
1715 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1716 dst
.type
= BRW_REGISTER_TYPE_F
;
1718 /* For doubles we do the same but we need to consider:
1720 * - We use a MOV with conditional_mod instead of a CMP so that we can
1721 * skip loading a 0.0 immediate. We use a source modifier on the
1722 * source of the MOV so that we flush denormalized values to 0.
1723 * Since we want to compare against 0, this won't alter the result.
1724 * - We need to extract the high 32-bit of each DF where the sign
1726 * - We need to produce a DF result.
1729 /* Check for zero */
1730 src_reg value
= op
[0];
1732 inst
= emit(MOV(dst_null_df(), value
));
1733 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1735 /* AND each high 32-bit channel with 0x80000000u */
1736 dst_reg tmp
= dst_reg(this, glsl_type::uvec4_type
);
1737 emit(VEC4_OPCODE_PICK_HIGH_32BIT
, tmp
, op
[0]);
1738 emit(AND(tmp
, src_reg(tmp
), brw_imm_ud(0x80000000u
)));
1740 /* Add 1.0 to each channel, predicated to skip the cases where the
1741 * channel's value was 0
1743 inst
= emit(OR(tmp
, src_reg(tmp
), brw_imm_ud(0x3f800000u
)));
1744 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1746 /* Now convert the result from float to double */
1747 emit_conversion_to_double(dst
, retype(src_reg(tmp
),
1748 BRW_REGISTER_TYPE_F
),
1754 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1755 * -> non-negative val generates 0x00000000.
1756 * Predicated OR sets 1 if val is positive.
1758 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1759 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_G
));
1760 emit(ASR(dst
, op
[0], brw_imm_d(31)));
1761 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_d(1)));
1762 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1766 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1767 emit(SHL(dst
, op
[0], op
[1]));
1771 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1772 emit(ASR(dst
, op
[0], op
[1]));
1776 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1777 emit(SHR(dst
, op
[0], op
[1]));
1781 if (type_sz(dst
.type
) == 8) {
1782 dst_reg mul_dst
= dst_reg(this, glsl_type::dvec4_type
);
1783 emit(MUL(mul_dst
, op
[1], op
[0]));
1784 inst
= emit(ADD(dst
, src_reg(mul_dst
), op
[2]));
1785 inst
->saturate
= instr
->dest
.saturate
;
1787 op
[0] = fix_3src_operand(op
[0]);
1788 op
[1] = fix_3src_operand(op
[1]);
1789 op
[2] = fix_3src_operand(op
[2]);
1791 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1792 inst
->saturate
= instr
->dest
.saturate
;
1797 inst
= emit_lrp(dst
, op
[0], op
[1], op
[2]);
1798 inst
->saturate
= instr
->dest
.saturate
;
1802 enum brw_predicate predicate
;
1803 if (!optimize_predicate(instr
, &predicate
)) {
1804 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1805 switch (dst
.writemask
) {
1807 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1810 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1813 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1816 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1819 predicate
= BRW_PREDICATE_NORMAL
;
1823 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1824 inst
->predicate
= predicate
;
1827 case nir_op_fdot_replicated2
:
1828 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1829 inst
->saturate
= instr
->dest
.saturate
;
1832 case nir_op_fdot_replicated3
:
1833 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1834 inst
->saturate
= instr
->dest
.saturate
;
1837 case nir_op_fdot_replicated4
:
1838 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1839 inst
->saturate
= instr
->dest
.saturate
;
1842 case nir_op_fdph_replicated
:
1843 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1844 inst
->saturate
= instr
->dest
.saturate
;
1849 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1854 unreachable("not reached: should be lowered by lower_source mods");
1857 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1860 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1864 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1867 unreachable("Unimplemented ALU operation");
1870 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1871 * to sign extend the low bit to 0/~0
1873 if (devinfo
->gen
<= 5 &&
1874 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1875 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1876 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1877 masked
.writemask
= dst
.writemask
;
1878 emit(AND(masked
, src_reg(dst
), brw_imm_d(1)));
1879 src_reg masked_neg
= src_reg(masked
);
1880 masked_neg
.negate
= true;
1881 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1886 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1888 switch (instr
->type
) {
1889 case nir_jump_break
:
1890 emit(BRW_OPCODE_BREAK
);
1893 case nir_jump_continue
:
1894 emit(BRW_OPCODE_CONTINUE
);
1897 case nir_jump_return
:
1900 unreachable("unknown jump");
1904 static enum ir_texture_opcode
1905 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1907 enum ir_texture_opcode op
;
1910 case nir_texop_lod
: op
= ir_lod
; break;
1911 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1912 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1913 case nir_texop_tex
: op
= ir_tex
; break;
1914 case nir_texop_tg4
: op
= ir_tg4
; break;
1915 case nir_texop_txb
: op
= ir_txb
; break;
1916 case nir_texop_txd
: op
= ir_txd
; break;
1917 case nir_texop_txf
: op
= ir_txf
; break;
1918 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1919 case nir_texop_txl
: op
= ir_txl
; break;
1920 case nir_texop_txs
: op
= ir_txs
; break;
1921 case nir_texop_samples_identical
: op
= ir_samples_identical
; break;
1923 unreachable("unknown texture opcode");
1929 static const glsl_type
*
1930 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1931 unsigned components
)
1933 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type
),
1938 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
1940 unsigned texture
= instr
->texture_index
;
1941 unsigned sampler
= instr
->sampler_index
;
1942 src_reg texture_reg
= brw_imm_ud(texture
);
1943 src_reg sampler_reg
= brw_imm_ud(sampler
);
1945 const glsl_type
*coord_type
= NULL
;
1946 src_reg shadow_comparator
;
1947 src_reg offset_value
;
1949 src_reg sample_index
;
1952 const glsl_type
*dest_type
=
1953 glsl_type_for_nir_alu_type(instr
->dest_type
,
1954 nir_tex_instr_dest_size(instr
));
1955 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
1957 /* The hardware requires a LOD for buffer textures */
1958 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
)
1961 /* Load the texture operation sources */
1962 uint32_t constant_offset
= 0;
1963 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1964 switch (instr
->src
[i
].src_type
) {
1965 case nir_tex_src_comparator
:
1966 shadow_comparator
= get_nir_src(instr
->src
[i
].src
,
1967 BRW_REGISTER_TYPE_F
, 1);
1970 case nir_tex_src_coord
: {
1971 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
1973 switch (instr
->op
) {
1975 case nir_texop_txf_ms
:
1976 case nir_texop_samples_identical
:
1977 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
1979 coord_type
= glsl_type::ivec(src_size
);
1983 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1985 coord_type
= glsl_type::vec(src_size
);
1991 case nir_tex_src_ddx
:
1992 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1993 nir_tex_instr_src_size(instr
, i
));
1996 case nir_tex_src_ddy
:
1997 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
1998 nir_tex_instr_src_size(instr
, i
));
2001 case nir_tex_src_lod
:
2002 switch (instr
->op
) {
2005 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
2009 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
2014 case nir_tex_src_ms_index
: {
2015 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
2019 case nir_tex_src_offset
: {
2020 nir_const_value
*const_offset
=
2021 nir_src_as_const_value(instr
->src
[i
].src
);
2022 assert(nir_src_bit_size(instr
->src
[i
].src
) == 32);
2023 if (!const_offset
||
2024 !brw_texture_offset(const_offset
->i32
,
2025 nir_tex_instr_src_size(instr
, i
),
2026 &constant_offset
)) {
2028 get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
2033 case nir_tex_src_texture_offset
: {
2034 /* The highest texture which may be used by this operation is
2035 * the last element of the array. Mark it here, because the generator
2036 * doesn't have enough information to determine the bound.
2038 uint32_t array_size
= instr
->texture_array_size
;
2039 uint32_t max_used
= texture
+ array_size
- 1;
2040 if (instr
->op
== nir_texop_tg4
) {
2041 max_used
+= prog_data
->base
.binding_table
.gather_texture_start
;
2043 max_used
+= prog_data
->base
.binding_table
.texture_start
;
2046 brw_mark_surface_used(&prog_data
->base
, max_used
);
2048 /* Emit code to evaluate the actual indexing expression */
2049 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
2050 src_reg
temp(this, glsl_type::uint_type
);
2051 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(texture
)));
2052 texture_reg
= emit_uniformize(temp
);
2056 case nir_tex_src_sampler_offset
: {
2057 /* Emit code to evaluate the actual indexing expression */
2058 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
2059 src_reg
temp(this, glsl_type::uint_type
);
2060 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(sampler
)));
2061 sampler_reg
= emit_uniformize(temp
);
2065 case nir_tex_src_projector
:
2066 unreachable("Should be lowered by do_lower_texture_projection");
2068 case nir_tex_src_bias
:
2069 unreachable("LOD bias is not valid for vertex shaders.\n");
2072 unreachable("unknown texture source");
2076 if (instr
->op
== nir_texop_txf_ms
||
2077 instr
->op
== nir_texop_samples_identical
) {
2078 assert(coord_type
!= NULL
);
2079 if (devinfo
->gen
>= 7 &&
2080 key_tex
->compressed_multisample_layout_mask
& (1 << texture
)) {
2081 mcs
= emit_mcs_fetch(coord_type
, coordinate
, texture_reg
);
2083 mcs
= brw_imm_ud(0u);
2087 /* Stuff the channel select bits in the top of the texture offset */
2088 if (instr
->op
== nir_texop_tg4
) {
2089 if (instr
->component
== 1 &&
2090 (key_tex
->gather_channel_quirk_mask
& (1 << texture
))) {
2091 /* gather4 sampler is broken for green channel on RG32F --
2092 * we must ask for blue instead.
2094 constant_offset
|= 2 << 16;
2096 constant_offset
|= instr
->component
<< 16;
2100 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
2102 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
2104 lod
, lod2
, sample_index
,
2105 constant_offset
, offset_value
, mcs
,
2106 texture
, texture_reg
, sampler_reg
);
2110 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
2112 nir_ssa_values
[instr
->def
.index
] =
2113 dst_reg(VGRF
, alloc
.allocate(DIV_ROUND_UP(instr
->def
.bit_size
, 32)));
2116 /* SIMD4x2 64bit data is stored in register space like this:
2118 * r0.0:DF x0 y0 z0 w0
2119 * r1.0:DF x1 y1 z1 w1
2121 * When we need to write data such as this to memory using 32-bit write
2122 * messages we need to shuffle it in this fashion:
2124 * r0.0:DF x0 y0 x1 y1 (to be written at base offset)
2125 * r0.0:DF z0 w0 z1 w1 (to be written at base offset + 16)
2127 * We need to do the inverse operation when we read using 32-bit messages,
2128 * which we can do by applying the same exact shuffling on the 64-bit data
2129 * read, only that because the data for each vertex is positioned differently
2130 * we need to apply different channel enables.
2132 * This function takes 64bit data and shuffles it as explained above.
2134 * The @for_write parameter is used to specify if the shuffling is being done
2135 * for proper SIMD4x2 64-bit data that needs to be shuffled prior to a 32-bit
2136 * write message (for_write = true), or instead we are doing the inverse
2137 * operation and we have just read 64-bit data using a 32-bit messages that we
2138 * need to shuffle to create valid SIMD4x2 64-bit data (for_write = false).
2140 * If @block and @ref are non-NULL, then the shuffling is done after @ref,
2141 * otherwise the instructions are emitted normally at the end. The function
2142 * returns the last instruction inserted.
2144 * Notice that @src and @dst cannot be the same register.
2147 vec4_visitor::shuffle_64bit_data(dst_reg dst
, src_reg src
, bool for_write
,
2148 bblock_t
*block
, vec4_instruction
*ref
)
2150 assert(type_sz(src
.type
) == 8);
2151 assert(type_sz(dst
.type
) == 8);
2152 assert(!regions_overlap(dst
, 2 * REG_SIZE
, src
, 2 * REG_SIZE
));
2153 assert(!ref
== !block
);
2155 const vec4_builder bld
= !ref
? vec4_builder(this).at_end() :
2156 vec4_builder(this).at(block
, ref
->next
);
2158 /* Resolve swizzle in src */
2159 vec4_instruction
*inst
;
2160 if (src
.swizzle
!= BRW_SWIZZLE_XYZW
) {
2161 dst_reg data
= dst_reg(this, glsl_type::dvec4_type
);
2162 inst
= bld
.MOV(data
, src
);
2163 src
= src_reg(data
);
2166 /* dst+0.XY = src+0.XY */
2167 inst
= bld
.group(4, 0).MOV(writemask(dst
, WRITEMASK_XY
), src
);
2169 /* dst+0.ZW = src+1.XY */
2170 inst
= bld
.group(4, for_write
? 1 : 0)
2171 .MOV(writemask(dst
, WRITEMASK_ZW
),
2172 swizzle(byte_offset(src
, REG_SIZE
), BRW_SWIZZLE_XYXY
));
2174 /* dst+1.XY = src+0.ZW */
2175 inst
= bld
.group(4, for_write
? 0 : 1)
2176 .MOV(writemask(byte_offset(dst
, REG_SIZE
), WRITEMASK_XY
),
2177 swizzle(src
, BRW_SWIZZLE_ZWZW
));
2179 /* dst+1.ZW = src+1.ZW */
2180 inst
= bld
.group(4, 1)
2181 .MOV(writemask(byte_offset(dst
, REG_SIZE
), WRITEMASK_ZW
),
2182 byte_offset(src
, REG_SIZE
));