2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
31 using namespace brw::surface_access
;
36 vec4_visitor::emit_nir_code()
38 if (nir
->num_uniforms
> 0)
41 nir_emit_impl(nir_shader_get_entrypoint((nir_shader
*)nir
));
45 vec4_visitor::nir_setup_uniforms()
47 uniforms
= nir
->num_uniforms
/ 16;
51 vec4_visitor::nir_emit_impl(nir_function_impl
*impl
)
53 nir_locals
= ralloc_array(mem_ctx
, dst_reg
, impl
->reg_alloc
);
54 for (unsigned i
= 0; i
< impl
->reg_alloc
; i
++) {
55 nir_locals
[i
] = dst_reg();
58 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
59 unsigned array_elems
=
60 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
61 const unsigned num_regs
= array_elems
* DIV_ROUND_UP(reg
->bit_size
, 32);
62 nir_locals
[reg
->index
] = dst_reg(VGRF
, alloc
.allocate(num_regs
));
64 if (reg
->bit_size
== 64)
65 nir_locals
[reg
->index
].type
= BRW_REGISTER_TYPE_DF
;
68 nir_ssa_values
= ralloc_array(mem_ctx
, dst_reg
, impl
->ssa_alloc
);
70 nir_emit_cf_list(&impl
->body
);
74 vec4_visitor::nir_emit_cf_list(exec_list
*list
)
76 exec_list_validate(list
);
77 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
80 nir_emit_if(nir_cf_node_as_if(node
));
83 case nir_cf_node_loop
:
84 nir_emit_loop(nir_cf_node_as_loop(node
));
87 case nir_cf_node_block
:
88 nir_emit_block(nir_cf_node_as_block(node
));
92 unreachable("Invalid CFG node block");
98 vec4_visitor::nir_emit_if(nir_if
*if_stmt
)
100 /* First, put the condition in f0 */
101 src_reg condition
= get_nir_src(if_stmt
->condition
, BRW_REGISTER_TYPE_D
, 1);
102 vec4_instruction
*inst
= emit(MOV(dst_null_d(), condition
));
103 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
105 /* We can just predicate based on the X channel, as the condition only
106 * goes on its own line */
107 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X
));
109 nir_emit_cf_list(&if_stmt
->then_list
);
111 /* note: if the else is empty, dead CF elimination will remove it */
112 emit(BRW_OPCODE_ELSE
);
114 nir_emit_cf_list(&if_stmt
->else_list
);
116 emit(BRW_OPCODE_ENDIF
);
120 vec4_visitor::nir_emit_loop(nir_loop
*loop
)
124 nir_emit_cf_list(&loop
->body
);
126 emit(BRW_OPCODE_WHILE
);
130 vec4_visitor::nir_emit_block(nir_block
*block
)
132 nir_foreach_instr(instr
, block
) {
133 nir_emit_instr(instr
);
138 vec4_visitor::nir_emit_instr(nir_instr
*instr
)
142 switch (instr
->type
) {
143 case nir_instr_type_load_const
:
144 nir_emit_load_const(nir_instr_as_load_const(instr
));
147 case nir_instr_type_intrinsic
:
148 nir_emit_intrinsic(nir_instr_as_intrinsic(instr
));
151 case nir_instr_type_alu
:
152 nir_emit_alu(nir_instr_as_alu(instr
));
155 case nir_instr_type_jump
:
156 nir_emit_jump(nir_instr_as_jump(instr
));
159 case nir_instr_type_tex
:
160 nir_emit_texture(nir_instr_as_tex(instr
));
163 case nir_instr_type_ssa_undef
:
164 nir_emit_undef(nir_instr_as_ssa_undef(instr
));
168 unreachable("VS instruction not yet implemented by NIR->vec4");
173 dst_reg_for_nir_reg(vec4_visitor
*v
, nir_register
*nir_reg
,
174 unsigned base_offset
, nir_src
*indirect
)
178 reg
= v
->nir_locals
[nir_reg
->index
];
179 if (nir_reg
->bit_size
== 64)
180 reg
.type
= BRW_REGISTER_TYPE_DF
;
181 reg
= offset(reg
, 8, base_offset
);
184 new(v
->mem_ctx
) src_reg(v
->get_nir_src(*indirect
,
192 vec4_visitor::get_nir_dest(const nir_dest
&dest
)
196 dst_reg(VGRF
, alloc
.allocate(DIV_ROUND_UP(dest
.ssa
.bit_size
, 32)));
197 if (dest
.ssa
.bit_size
== 64)
198 dst
.type
= BRW_REGISTER_TYPE_DF
;
199 nir_ssa_values
[dest
.ssa
.index
] = dst
;
202 return dst_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
208 vec4_visitor::get_nir_dest(const nir_dest
&dest
, enum brw_reg_type type
)
210 return retype(get_nir_dest(dest
), type
);
214 vec4_visitor::get_nir_dest(const nir_dest
&dest
, nir_alu_type type
)
216 return get_nir_dest(dest
, brw_type_for_nir_type(devinfo
, type
));
220 vec4_visitor::get_nir_src(const nir_src
&src
, enum brw_reg_type type
,
221 unsigned num_components
)
226 assert(src
.ssa
!= NULL
);
227 reg
= nir_ssa_values
[src
.ssa
->index
];
230 reg
= dst_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
234 reg
= retype(reg
, type
);
236 src_reg reg_as_src
= src_reg(reg
);
237 reg_as_src
.swizzle
= brw_swizzle_for_size(num_components
);
242 vec4_visitor::get_nir_src(const nir_src
&src
, nir_alu_type type
,
243 unsigned num_components
)
245 return get_nir_src(src
, brw_type_for_nir_type(devinfo
, type
),
250 vec4_visitor::get_nir_src(const nir_src
&src
, unsigned num_components
)
252 /* if type is not specified, default to signed int */
253 return get_nir_src(src
, nir_type_int32
, num_components
);
257 vec4_visitor::get_nir_src_imm(const nir_src
&src
)
259 assert(nir_src_num_components(src
) == 1);
260 assert(nir_src_bit_size(src
) == 32);
261 return nir_src_is_const(src
) ? src_reg(brw_imm_d(nir_src_as_int(src
))) :
266 vec4_visitor::get_indirect_offset(nir_intrinsic_instr
*instr
)
268 nir_src
*offset_src
= nir_get_io_offset_src(instr
);
270 if (nir_src_is_const(*offset_src
)) {
271 /* The only constant offset we should find is 0. brw_nir.c's
272 * add_const_offset_to_base() will fold other constant offsets
273 * into instr->const_index[0].
275 assert(nir_src_as_uint(*offset_src
) == 0);
279 return get_nir_src(*offset_src
, BRW_REGISTER_TYPE_UD
, 1);
283 setup_imm_df(const vec4_builder
&bld
, double v
)
285 const gen_device_info
*devinfo
= bld
.shader
->devinfo
;
286 assert(devinfo
->gen
>= 7);
288 if (devinfo
->gen
>= 8)
289 return brw_imm_df(v
);
291 /* gen7.5 does not support DF immediates straighforward but the DIM
292 * instruction allows to set the 64-bit immediate value.
294 if (devinfo
->is_haswell
) {
295 const vec4_builder ubld
= bld
.exec_all();
296 const dst_reg dst
= bld
.vgrf(BRW_REGISTER_TYPE_DF
);
297 ubld
.DIM(dst
, brw_imm_df(v
));
298 return swizzle(src_reg(dst
), BRW_SWIZZLE_XXXX
);
301 /* gen7 does not support DF immediates */
312 /* Write the low 32-bit of the constant to the X:UD channel and the
313 * high 32-bit to the Y:UD channel to build the constant in a VGRF.
314 * We have to do this twice (offset 0 and offset 1), since a DF VGRF takes
315 * two SIMD8 registers in SIMD4x2 execution. Finally, return a swizzle
316 * XXXX so any access to the VGRF only reads the constant data in these
319 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 2);
320 for (unsigned n
= 0; n
< 2; n
++) {
321 const vec4_builder ubld
= bld
.exec_all().group(4, n
);
322 ubld
.MOV(writemask(offset(tmp
, 8, n
), WRITEMASK_X
), brw_imm_ud(di
.i1
));
323 ubld
.MOV(writemask(offset(tmp
, 8, n
), WRITEMASK_Y
), brw_imm_ud(di
.i2
));
326 return swizzle(src_reg(retype(tmp
, BRW_REGISTER_TYPE_DF
)), BRW_SWIZZLE_XXXX
);
330 vec4_visitor::nir_emit_load_const(nir_load_const_instr
*instr
)
334 if (instr
->def
.bit_size
== 64) {
335 reg
= dst_reg(VGRF
, alloc
.allocate(2));
336 reg
.type
= BRW_REGISTER_TYPE_DF
;
338 reg
= dst_reg(VGRF
, alloc
.allocate(1));
339 reg
.type
= BRW_REGISTER_TYPE_D
;
342 const vec4_builder ibld
= vec4_builder(this).at_end();
343 unsigned remaining
= brw_writemask_for_size(instr
->def
.num_components
);
345 /* @FIXME: consider emitting vector operations to save some MOVs in
346 * cases where the components are representable in 8 bits.
347 * For now, we emit a MOV for each distinct value.
349 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++) {
350 unsigned writemask
= 1 << i
;
352 if ((remaining
& writemask
) == 0)
355 for (unsigned j
= i
; j
< instr
->def
.num_components
; j
++) {
356 if ((instr
->def
.bit_size
== 32 &&
357 instr
->value
[i
].u32
== instr
->value
[j
].u32
) ||
358 (instr
->def
.bit_size
== 64 &&
359 instr
->value
[i
].f64
== instr
->value
[j
].f64
)) {
364 reg
.writemask
= writemask
;
365 if (instr
->def
.bit_size
== 64) {
366 emit(MOV(reg
, setup_imm_df(ibld
, instr
->value
[i
].f64
)));
368 emit(MOV(reg
, brw_imm_d(instr
->value
[i
].i32
)));
371 remaining
&= ~writemask
;
374 /* Set final writemask */
375 reg
.writemask
= brw_writemask_for_size(instr
->def
.num_components
);
377 nir_ssa_values
[instr
->def
.index
] = reg
;
381 vec4_visitor::get_nir_ssbo_intrinsic_index(nir_intrinsic_instr
*instr
)
383 /* SSBO stores are weird in that their index is in src[1] */
384 const unsigned src
= instr
->intrinsic
== nir_intrinsic_store_ssbo
? 1 : 0;
387 if (nir_src_is_const(instr
->src
[src
])) {
388 unsigned index
= prog_data
->base
.binding_table
.ssbo_start
+
389 nir_src_as_uint(instr
->src
[src
]);
390 surf_index
= brw_imm_ud(index
);
392 surf_index
= src_reg(this, glsl_type::uint_type
);
393 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[src
], 1),
394 brw_imm_ud(prog_data
->base
.binding_table
.ssbo_start
)));
395 surf_index
= emit_uniformize(surf_index
);
402 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
407 switch (instr
->intrinsic
) {
409 case nir_intrinsic_load_input
: {
410 assert(nir_dest_bit_size(instr
->dest
) == 32);
411 /* We set EmitNoIndirectInput for VS */
412 unsigned load_offset
= nir_src_as_uint(instr
->src
[0]);
414 dest
= get_nir_dest(instr
->dest
);
415 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
417 src
= src_reg(ATTR
, instr
->const_index
[0] + load_offset
,
418 glsl_type::uvec4_type
);
419 src
= retype(src
, dest
.type
);
421 /* Swizzle source based on component layout qualifier */
422 src
.swizzle
= BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr
));
423 emit(MOV(dest
, src
));
427 case nir_intrinsic_store_output
: {
428 assert(nir_src_bit_size(instr
->src
[0]) == 32);
429 unsigned store_offset
= nir_src_as_uint(instr
->src
[1]);
430 int varying
= instr
->const_index
[0] + store_offset
;
431 src
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
,
432 instr
->num_components
);
434 unsigned c
= nir_intrinsic_component(instr
);
435 output_reg
[varying
][c
] = dst_reg(src
);
436 output_num_components
[varying
][c
] = instr
->num_components
;
440 case nir_intrinsic_get_buffer_size
: {
441 assert(nir_src_num_components(instr
->src
[0]) == 1);
442 unsigned ssbo_index
= nir_src_is_const(instr
->src
[0]) ?
443 nir_src_as_uint(instr
->src
[0]) : 0;
445 const unsigned index
=
446 prog_data
->base
.binding_table
.ssbo_start
+ ssbo_index
;
447 dst_reg result_dst
= get_nir_dest(instr
->dest
);
448 vec4_instruction
*inst
= new(mem_ctx
)
449 vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE
, result_dst
);
452 inst
->mlen
= 1; /* always at least one */
453 inst
->src
[1] = brw_imm_ud(index
);
455 /* MRF for the first parameter */
456 src_reg lod
= brw_imm_d(0);
457 int param_base
= inst
->base_mrf
;
458 int writemask
= WRITEMASK_X
;
459 emit(MOV(dst_reg(MRF
, param_base
, glsl_type::int_type
, writemask
), lod
));
465 case nir_intrinsic_store_ssbo
: {
466 assert(devinfo
->gen
>= 7);
468 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
469 assert(nir_src_bit_size(instr
->src
[0]) == 32);
470 assert(nir_intrinsic_write_mask(instr
) ==
471 (1u << instr
->num_components
) - 1);
473 src_reg surf_index
= get_nir_ssbo_intrinsic_index(instr
);
474 src_reg offset_reg
= retype(get_nir_src_imm(instr
->src
[2]),
475 BRW_REGISTER_TYPE_UD
);
478 src_reg val_reg
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_F
, 4);
480 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
481 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
482 * typed and untyped messages and across hardware platforms, the
483 * current implementation of the untyped messages will transparently convert
484 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
485 * and enabling only channel X on the SEND instruction.
487 * The above, works well for full vector writes, but not for partial writes
488 * where we want to write some channels and not others, like when we have
489 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
490 * quite restrictive with regards to the channel enables we can configure in
491 * the message descriptor (not all combinations are allowed) we cannot simply
492 * implement these scenarios with a single message while keeping the
493 * aforementioned symmetry in the implementation. For now we de decided that
494 * it is better to keep the symmetry to reduce complexity, so in situations
495 * such as the one described we end up emitting two untyped write messages
496 * (one for xy and another for w).
498 * The code below packs consecutive channels into a single write message,
499 * detects gaps in the vector write and if needed, sends a second message
500 * with the remaining channels. If in the future we decide that we want to
501 * emit a single message at the expense of losing the symmetry in the
502 * implementation we can:
504 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
505 * message payload. In this mode we can write up to 8 offsets and dwords
506 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
507 * and select which of the 8 channels carry data to write by setting the
508 * appropriate writemask in the dst register of the SEND instruction.
509 * It would require to write a new generator opcode specifically for
510 * IvyBridge since we would need to prepare a SIMD8 payload that could
511 * use any channel, not just X.
513 * 2) For Haswell+: Simply send a single write message but set the writemask
514 * on the dst of the SEND instruction to select the channels we want to
515 * write. It would require to modify the current messages to receive
516 * and honor the writemask provided.
518 const vec4_builder bld
= vec4_builder(this).at_end()
519 .annotate(current_annotation
, base_ir
);
521 emit_untyped_write(bld
, surf_index
, offset_reg
, val_reg
,
522 1 /* dims */, instr
->num_components
/* size */,
527 case nir_intrinsic_load_ssbo
: {
528 assert(devinfo
->gen
>= 7);
530 /* brw_nir_lower_mem_access_bit_sizes takes care of this */
531 assert(nir_dest_bit_size(instr
->dest
) == 32);
533 src_reg surf_index
= get_nir_ssbo_intrinsic_index(instr
);
534 src_reg offset_reg
= retype(get_nir_src_imm(instr
->src
[1]),
535 BRW_REGISTER_TYPE_UD
);
537 /* Read the vector */
538 const vec4_builder bld
= vec4_builder(this).at_end()
539 .annotate(current_annotation
, base_ir
);
541 src_reg read_result
= emit_untyped_read(bld
, surf_index
, offset_reg
,
542 1 /* dims */, 4 /* size*/,
544 dst_reg dest
= get_nir_dest(instr
->dest
);
545 read_result
.type
= dest
.type
;
546 read_result
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
547 emit(MOV(dest
, read_result
));
551 case nir_intrinsic_ssbo_atomic_add
:
552 case nir_intrinsic_ssbo_atomic_imin
:
553 case nir_intrinsic_ssbo_atomic_umin
:
554 case nir_intrinsic_ssbo_atomic_imax
:
555 case nir_intrinsic_ssbo_atomic_umax
:
556 case nir_intrinsic_ssbo_atomic_and
:
557 case nir_intrinsic_ssbo_atomic_or
:
558 case nir_intrinsic_ssbo_atomic_xor
:
559 case nir_intrinsic_ssbo_atomic_exchange
:
560 case nir_intrinsic_ssbo_atomic_comp_swap
:
561 nir_emit_ssbo_atomic(brw_aop_for_nir_intrinsic(instr
), instr
);
564 case nir_intrinsic_load_vertex_id
:
565 unreachable("should be lowered by lower_vertex_id()");
567 case nir_intrinsic_load_vertex_id_zero_base
:
568 case nir_intrinsic_load_base_vertex
:
569 case nir_intrinsic_load_instance_id
:
570 case nir_intrinsic_load_base_instance
:
571 case nir_intrinsic_load_draw_id
:
572 case nir_intrinsic_load_invocation_id
:
573 unreachable("should be lowered by brw_nir_lower_vs_inputs()");
575 case nir_intrinsic_load_uniform
: {
576 /* Offsets are in bytes but they should always be multiples of 4 */
577 assert(nir_intrinsic_base(instr
) % 4 == 0);
579 dest
= get_nir_dest(instr
->dest
);
581 src
= src_reg(dst_reg(UNIFORM
, nir_intrinsic_base(instr
) / 16));
582 src
.type
= dest
.type
;
584 /* Uniforms don't actually have to be vec4 aligned. In the case that
585 * it isn't, we have to use a swizzle to shift things around. They
586 * do still have the std140 alignment requirement that vec2's have to
587 * be vec2-aligned and vec3's and vec4's have to be vec4-aligned.
589 * The swizzle also works in the indirect case as the generator adds
590 * the swizzle to the offset for us.
592 const int type_size
= type_sz(src
.type
);
593 unsigned shift
= (nir_intrinsic_base(instr
) % 16) / type_size
;
594 assert(shift
+ instr
->num_components
<= 4);
596 if (nir_src_is_const(instr
->src
[0])) {
597 const unsigned load_offset
= nir_src_as_uint(instr
->src
[0]);
598 /* Offsets are in bytes but they should always be multiples of 4 */
599 assert(load_offset
% 4 == 0);
601 src
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
602 dest
.writemask
= brw_writemask_for_size(instr
->num_components
);
603 unsigned offset
= load_offset
+ shift
* type_size
;
604 src
.offset
= ROUND_DOWN_TO(offset
, 16);
605 shift
= (offset
% 16) / type_size
;
606 assert(shift
+ instr
->num_components
<= 4);
607 src
.swizzle
+= BRW_SWIZZLE4(shift
, shift
, shift
, shift
);
609 emit(MOV(dest
, src
));
611 /* Uniform arrays are vec4 aligned, because of std140 alignment
616 src_reg indirect
= get_nir_src(instr
->src
[0], BRW_REGISTER_TYPE_UD
, 1);
618 /* MOV_INDIRECT is going to stomp the whole thing anyway */
619 dest
.writemask
= WRITEMASK_XYZW
;
621 emit(SHADER_OPCODE_MOV_INDIRECT
, dest
, src
,
622 indirect
, brw_imm_ud(instr
->const_index
[1]));
627 case nir_intrinsic_load_ubo
: {
630 prog_data
->base
.has_ubo_pull
= true;
632 dest
= get_nir_dest(instr
->dest
);
634 if (nir_src_is_const(instr
->src
[0])) {
635 /* The block index is a constant, so just emit the binding table entry
638 const unsigned index
= prog_data
->base
.binding_table
.ubo_start
+
639 nir_src_as_uint(instr
->src
[0]);
640 surf_index
= brw_imm_ud(index
);
642 /* The block index is not a constant. Evaluate the index expression
643 * per-channel and add the base UBO index; we have to select a value
644 * from any live channel.
646 surf_index
= src_reg(this, glsl_type::uint_type
);
647 emit(ADD(dst_reg(surf_index
), get_nir_src(instr
->src
[0], nir_type_int32
,
648 instr
->num_components
),
649 brw_imm_ud(prog_data
->base
.binding_table
.ubo_start
)));
650 surf_index
= emit_uniformize(surf_index
);
654 if (nir_src_is_const(instr
->src
[1])) {
655 unsigned load_offset
= nir_src_as_uint(instr
->src
[1]);
656 offset_reg
= brw_imm_ud(load_offset
& ~15);
658 offset_reg
= src_reg(this, glsl_type::uint_type
);
659 emit(MOV(dst_reg(offset_reg
),
660 get_nir_src(instr
->src
[1], nir_type_uint32
, 1)));
663 src_reg packed_consts
;
664 if (nir_dest_bit_size(instr
->dest
) == 32) {
665 packed_consts
= src_reg(this, glsl_type::vec4_type
);
666 emit_pull_constant_load_reg(dst_reg(packed_consts
),
669 NULL
, NULL
/* before_block/inst */);
671 src_reg temp
= src_reg(this, glsl_type::dvec4_type
);
672 src_reg temp_float
= retype(temp
, BRW_REGISTER_TYPE_F
);
674 emit_pull_constant_load_reg(dst_reg(temp_float
),
675 surf_index
, offset_reg
, NULL
, NULL
);
676 if (offset_reg
.file
== IMM
)
679 emit(ADD(dst_reg(offset_reg
), offset_reg
, brw_imm_ud(16u)));
680 emit_pull_constant_load_reg(dst_reg(byte_offset(temp_float
, REG_SIZE
)),
681 surf_index
, offset_reg
, NULL
, NULL
);
683 packed_consts
= src_reg(this, glsl_type::dvec4_type
);
684 shuffle_64bit_data(dst_reg(packed_consts
), temp
, false);
687 packed_consts
.swizzle
= brw_swizzle_for_size(instr
->num_components
);
688 if (nir_src_is_const(instr
->src
[1])) {
689 unsigned load_offset
= nir_src_as_uint(instr
->src
[1]);
690 unsigned type_size
= type_sz(dest
.type
);
691 packed_consts
.swizzle
+=
692 BRW_SWIZZLE4(load_offset
% 16 / type_size
,
693 load_offset
% 16 / type_size
,
694 load_offset
% 16 / type_size
,
695 load_offset
% 16 / type_size
);
698 emit(MOV(dest
, retype(packed_consts
, dest
.type
)));
703 case nir_intrinsic_memory_barrier
:
704 case nir_intrinsic_scoped_memory_barrier
: {
705 const vec4_builder bld
=
706 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
707 const dst_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
);
708 vec4_instruction
*fence
=
709 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
, brw_vec8_grf(0, 0));
710 fence
->sfid
= GEN7_SFID_DATAPORT_DATA_CACHE
;
714 case nir_intrinsic_shader_clock
: {
715 /* We cannot do anything if there is an event, so ignore it for now */
716 const src_reg shader_clock
= get_timestamp();
717 const enum brw_reg_type type
= brw_type_for_base_type(glsl_type::uvec2_type
);
719 dest
= get_nir_dest(instr
->dest
, type
);
720 emit(MOV(dest
, shader_clock
));
725 unreachable("Unknown intrinsic");
730 vec4_visitor::nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
)
733 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
734 dest
= get_nir_dest(instr
->dest
);
736 src_reg surface
= get_nir_ssbo_intrinsic_index(instr
);
737 src_reg offset
= get_nir_src(instr
->src
[1], 1);
739 if (op
!= BRW_AOP_INC
&& op
!= BRW_AOP_DEC
&& op
!= BRW_AOP_PREDEC
)
740 data1
= get_nir_src(instr
->src
[2], 1);
742 if (op
== BRW_AOP_CMPWR
)
743 data2
= get_nir_src(instr
->src
[3], 1);
745 /* Emit the actual atomic operation operation */
746 const vec4_builder bld
=
747 vec4_builder(this).at_end().annotate(current_annotation
, base_ir
);
749 src_reg atomic_result
= emit_untyped_atomic(bld
, surface
, offset
,
751 1 /* dims */, 1 /* rsize */,
754 dest
.type
= atomic_result
.type
;
755 bld
.MOV(dest
, atomic_result
);
759 brw_swizzle_for_nir_swizzle(uint8_t swizzle
[4])
761 return BRW_SWIZZLE4(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
765 vec4_visitor::optimize_predicate(nir_alu_instr
*instr
,
766 enum brw_predicate
*predicate
)
768 if (!instr
->src
[0].src
.is_ssa
||
769 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
772 nir_alu_instr
*cmp_instr
=
773 nir_instr_as_alu(instr
->src
[0].src
.ssa
->parent_instr
);
775 switch (cmp_instr
->op
) {
776 case nir_op_b32any_fnequal2
:
777 case nir_op_b32any_inequal2
:
778 case nir_op_b32any_fnequal3
:
779 case nir_op_b32any_inequal3
:
780 case nir_op_b32any_fnequal4
:
781 case nir_op_b32any_inequal4
:
782 *predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
784 case nir_op_b32all_fequal2
:
785 case nir_op_b32all_iequal2
:
786 case nir_op_b32all_fequal3
:
787 case nir_op_b32all_iequal3
:
788 case nir_op_b32all_fequal4
:
789 case nir_op_b32all_iequal4
:
790 *predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
796 unsigned size_swizzle
=
797 brw_swizzle_for_size(nir_op_infos
[cmp_instr
->op
].input_sizes
[0]);
800 assert(nir_op_infos
[cmp_instr
->op
].num_inputs
== 2);
801 for (unsigned i
= 0; i
< 2; i
++) {
802 nir_alu_type type
= nir_op_infos
[cmp_instr
->op
].input_types
[i
];
803 unsigned bit_size
= nir_src_bit_size(cmp_instr
->src
[i
].src
);
804 type
= (nir_alu_type
) (((unsigned) type
) | bit_size
);
805 op
[i
] = get_nir_src(cmp_instr
->src
[i
].src
, type
, 4);
806 unsigned base_swizzle
=
807 brw_swizzle_for_nir_swizzle(cmp_instr
->src
[i
].swizzle
);
808 op
[i
].swizzle
= brw_compose_swizzle(size_swizzle
, base_swizzle
);
811 emit(CMP(dst_null_d(), op
[0], op
[1],
812 brw_cmod_for_nir_comparison(cmp_instr
->op
)));
818 emit_find_msb_using_lzd(const vec4_builder
&bld
,
823 vec4_instruction
*inst
;
827 /* LZD of an absolute value source almost always does the right
828 * thing. There are two problem values:
830 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
831 * 0. However, findMSB(int(0x80000000)) == 30.
833 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
834 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
836 * For a value of zero or negative one, -1 will be returned.
838 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
839 * findMSB(-(1<<x)) should return x-1.
841 * For all negative number cases, including 0x80000000 and
842 * 0xffffffff, the correct value is obtained from LZD if instead of
843 * negating the (already negative) value the logical-not is used. A
844 * conditonal logical-not can be achieved in two instructions.
846 temp
= src_reg(bld
.vgrf(BRW_REGISTER_TYPE_D
));
848 bld
.ASR(dst_reg(temp
), src
, brw_imm_d(31));
849 bld
.XOR(dst_reg(temp
), temp
, src
);
852 bld
.LZD(retype(dst
, BRW_REGISTER_TYPE_UD
),
853 retype(temp
, BRW_REGISTER_TYPE_UD
));
855 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
856 * from the LSB side. Subtract the result from 31 to convert the MSB count
857 * into an LSB count. If no bits are set, LZD will return 32. 31-32 = -1,
858 * which is exactly what findMSB() is supposed to return.
860 inst
= bld
.ADD(dst
, retype(src_reg(dst
), BRW_REGISTER_TYPE_D
),
862 inst
->src
[0].negate
= true;
866 vec4_visitor::emit_conversion_from_double(dst_reg dst
, src_reg src
)
868 /* BDW PRM vol 15 - workarounds:
869 * DF->f format conversion for Align16 has wrong emask calculation when
870 * source is immediate.
872 if (devinfo
->gen
== 8 && dst
.type
== BRW_REGISTER_TYPE_F
&&
873 src
.file
== BRW_IMMEDIATE_VALUE
) {
874 emit(MOV(dst
, brw_imm_f(src
.df
)));
880 case BRW_REGISTER_TYPE_D
:
881 op
= VEC4_OPCODE_DOUBLE_TO_D32
;
883 case BRW_REGISTER_TYPE_UD
:
884 op
= VEC4_OPCODE_DOUBLE_TO_U32
;
886 case BRW_REGISTER_TYPE_F
:
887 op
= VEC4_OPCODE_DOUBLE_TO_F32
;
890 unreachable("Unknown conversion");
893 dst_reg temp
= dst_reg(this, glsl_type::dvec4_type
);
894 emit(MOV(temp
, src
));
895 dst_reg temp2
= dst_reg(this, glsl_type::dvec4_type
);
896 emit(op
, temp2
, src_reg(temp
));
898 emit(VEC4_OPCODE_PICK_LOW_32BIT
, retype(temp2
, dst
.type
), src_reg(temp2
));
899 emit(MOV(dst
, src_reg(retype(temp2
, dst
.type
))));
903 vec4_visitor::emit_conversion_to_double(dst_reg dst
, src_reg src
)
905 dst_reg tmp_dst
= dst_reg(src_reg(this, glsl_type::dvec4_type
));
906 src_reg tmp_src
= retype(src_reg(this, glsl_type::vec4_type
), src
.type
);
907 emit(MOV(dst_reg(tmp_src
), src
));
908 emit(VEC4_OPCODE_TO_DOUBLE
, tmp_dst
, tmp_src
);
909 emit(MOV(dst
, src_reg(tmp_dst
)));
913 * Try to use an immediate value for a source
915 * In cases of flow control, constant propagation is sometimes unable to
916 * determine that a register contains a constant value. To work around this,
917 * try to emit a literal as one of the sources. If \c try_src0_also is set,
918 * \c op[0] will also be tried for an immediate value.
920 * If \c op[0] is modified, the operands will be exchanged so that \c op[1]
921 * will always be the immediate value.
923 * \return The index of the source that was modified, 0 or 1, if successful.
926 * \param op - Operands to the instruction
927 * \param try_src0_also - True if \c op[0] should also be a candidate for
928 * getting an immediate value. This should only be set
929 * for commutative operations.
932 try_immediate_source(const nir_alu_instr
*instr
, src_reg
*op
,
934 ASSERTED
const gen_device_info
*devinfo
)
938 /* MOV should be the only single-source instruction passed to this
939 * function. Any other unary instruction with a constant source should
940 * have been constant-folded away!
942 assert(nir_op_infos
[instr
->op
].num_inputs
> 1 ||
943 instr
->op
== nir_op_mov
);
945 if (instr
->op
!= nir_op_mov
&&
946 nir_src_bit_size(instr
->src
[1].src
) == 32 &&
947 nir_src_is_const(instr
->src
[1].src
)) {
949 } else if (try_src0_also
&&
950 nir_src_bit_size(instr
->src
[0].src
) == 32 &&
951 nir_src_is_const(instr
->src
[0].src
)) {
957 const enum brw_reg_type old_type
= op
[idx
].type
;
960 case BRW_REGISTER_TYPE_D
:
961 case BRW_REGISTER_TYPE_UD
: {
965 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
966 if (nir_alu_instr_channel_used(instr
, idx
, i
)) {
967 if (first_comp
< 0) {
969 d
= nir_src_comp_as_int(instr
->src
[idx
].src
,
970 instr
->src
[idx
].swizzle
[i
]);
971 } else if (d
!= nir_src_comp_as_int(instr
->src
[idx
].src
,
972 instr
->src
[idx
].swizzle
[i
])) {
978 assert(first_comp
>= 0);
983 if (op
[idx
].negate
) {
984 /* On Gen8+ a negation source modifier on a logical operation means
985 * something different. Nothing should generate this, so assert that
988 assert(devinfo
->gen
< 8 || (instr
->op
!= nir_op_iand
&&
989 instr
->op
!= nir_op_ior
&&
990 instr
->op
!= nir_op_ixor
));
994 op
[idx
] = retype(src_reg(brw_imm_d(d
)), old_type
);
998 case BRW_REGISTER_TYPE_F
: {
1000 float f
[4] = { 0.0f
, 0.0f
, 0.0f
, 0.0f
};
1001 bool is_scalar
= true;
1003 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
1004 if (nir_alu_instr_channel_used(instr
, idx
, i
)) {
1005 f
[i
] = nir_src_comp_as_float(instr
->src
[idx
].src
,
1006 instr
->src
[idx
].swizzle
[i
]);
1007 if (first_comp
< 0) {
1009 } else if (f
[first_comp
] != f
[i
]) {
1017 f
[first_comp
] = fabs(f
[first_comp
]);
1020 f
[first_comp
] = -f
[first_comp
];
1022 op
[idx
] = src_reg(brw_imm_f(f
[first_comp
]));
1023 assert(op
[idx
].type
== old_type
);
1025 uint8_t vf_values
[4] = { 0, 0, 0, 0 };
1027 for (unsigned i
= 0; i
< ARRAY_SIZE(vf_values
); i
++) {
1035 const int vf
= brw_float_to_vf(f
[i
]);
1042 op
[idx
] = src_reg(brw_imm_vf4(vf_values
[0], vf_values
[1],
1043 vf_values
[2], vf_values
[3]));
1049 unreachable("Non-32bit type.");
1052 /* If the instruction has more than one source, the instruction format only
1053 * allows source 1 to be an immediate value. If the immediate value was
1054 * source 0, then the sources must be exchanged.
1056 if (idx
== 0 && instr
->op
!= nir_op_mov
) {
1057 src_reg tmp
= op
[0];
1066 vec4_visitor::fix_float_operands(src_reg op
[3], nir_alu_instr
*instr
)
1068 bool fixed
[3] = { false, false, false };
1070 for (unsigned i
= 0; i
< 2; i
++) {
1071 if (!nir_src_is_const(instr
->src
[i
].src
))
1074 for (unsigned j
= i
+ 1; j
< 3; j
++) {
1078 if (!nir_src_is_const(instr
->src
[j
].src
))
1081 if (nir_alu_srcs_equal(instr
, instr
, i
, j
)) {
1083 op
[i
] = fix_3src_operand(op
[i
]);
1089 } else if (nir_alu_srcs_negative_equal(instr
, instr
, i
, j
)) {
1091 op
[i
] = fix_3src_operand(op
[i
]);
1094 op
[j
].negate
= !op
[j
].negate
;
1102 for (unsigned i
= 0; i
< 3; i
++) {
1104 op
[i
] = fix_3src_operand(op
[i
]);
1109 const_src_fits_in_16_bits(const nir_src
&src
, brw_reg_type type
)
1111 assert(nir_src_is_const(src
));
1112 if (type_is_unsigned_int(type
)) {
1113 return nir_src_comp_as_uint(src
, 0) <= UINT16_MAX
;
1115 const int64_t c
= nir_src_comp_as_int(src
, 0);
1116 return c
<= INT16_MAX
&& c
>= INT16_MIN
;
1121 vec4_visitor::nir_emit_alu(nir_alu_instr
*instr
)
1123 vec4_instruction
*inst
;
1125 nir_alu_type dst_type
= (nir_alu_type
) (nir_op_infos
[instr
->op
].output_type
|
1126 nir_dest_bit_size(instr
->dest
.dest
));
1127 dst_reg dst
= get_nir_dest(instr
->dest
.dest
, dst_type
);
1128 dst
.writemask
= instr
->dest
.write_mask
;
1130 assert(!instr
->dest
.saturate
);
1133 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
1134 /* We don't lower to source modifiers, so they shouldn't exist. */
1135 assert(!instr
->src
[i
].abs
);
1136 assert(!instr
->src
[i
].negate
);
1138 nir_alu_type src_type
= (nir_alu_type
)
1139 (nir_op_infos
[instr
->op
].input_types
[i
] |
1140 nir_src_bit_size(instr
->src
[i
].src
));
1141 op
[i
] = get_nir_src(instr
->src
[i
].src
, src_type
, 4);
1142 op
[i
].swizzle
= brw_swizzle_for_nir_swizzle(instr
->src
[i
].swizzle
);
1145 switch (instr
->op
) {
1147 try_immediate_source(instr
, &op
[0], true, devinfo
);
1148 inst
= emit(MOV(dst
, op
[0]));
1154 unreachable("not reached: should be handled by lower_vec_to_movs()");
1158 inst
= emit(MOV(dst
, op
[0]));
1164 if (nir_src_bit_size(instr
->src
[0].src
) == 64)
1165 emit_conversion_from_double(dst
, op
[0]);
1167 inst
= emit(MOV(dst
, op
[0]));
1173 emit_conversion_to_double(dst
, op
[0]);
1177 inst
= emit(MOV(dst
, op
[0]));
1178 inst
->saturate
= true;
1183 op
[0].negate
= true;
1184 inst
= emit(MOV(dst
, op
[0]));
1189 op
[0].negate
= false;
1191 inst
= emit(MOV(dst
, op
[0]));
1195 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1198 try_immediate_source(instr
, op
, true, devinfo
);
1199 inst
= emit(ADD(dst
, op
[0], op
[1]));
1202 case nir_op_uadd_sat
:
1203 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1204 inst
= emit(ADD(dst
, op
[0], op
[1]));
1205 inst
->saturate
= true;
1209 try_immediate_source(instr
, op
, true, devinfo
);
1210 inst
= emit(MUL(dst
, op
[0], op
[1]));
1214 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1215 if (devinfo
->gen
< 8) {
1216 /* For integer multiplication, the MUL uses the low 16 bits of one of
1217 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1218 * accumulates in the contribution of the upper 16 bits of that
1219 * operand. If we can determine that one of the args is in the low
1220 * 16 bits, though, we can just emit a single MUL.
1222 if (nir_src_is_const(instr
->src
[0].src
) &&
1223 nir_alu_instr_src_read_mask(instr
, 0) == 1 &&
1224 const_src_fits_in_16_bits(instr
->src
[0].src
, op
[0].type
)) {
1225 if (devinfo
->gen
< 7)
1226 emit(MUL(dst
, op
[0], op
[1]));
1228 emit(MUL(dst
, op
[1], op
[0]));
1229 } else if (nir_src_is_const(instr
->src
[1].src
) &&
1230 nir_alu_instr_src_read_mask(instr
, 1) == 1 &&
1231 const_src_fits_in_16_bits(instr
->src
[1].src
, op
[1].type
)) {
1232 if (devinfo
->gen
< 7)
1233 emit(MUL(dst
, op
[1], op
[0]));
1235 emit(MUL(dst
, op
[0], op
[1]));
1237 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1239 emit(MUL(acc
, op
[0], op
[1]));
1240 emit(MACH(dst_null_d(), op
[0], op
[1]));
1241 emit(MOV(dst
, src_reg(acc
)));
1244 emit(MUL(dst
, op
[0], op
[1]));
1249 case nir_op_imul_high
:
1250 case nir_op_umul_high
: {
1251 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1252 struct brw_reg acc
= retype(brw_acc_reg(8), dst
.type
);
1254 if (devinfo
->gen
>= 8)
1255 emit(MUL(acc
, op
[0], retype(op
[1], BRW_REGISTER_TYPE_UW
)));
1257 emit(MUL(acc
, op
[0], op
[1]));
1259 emit(MACH(dst
, op
[0], op
[1]));
1264 inst
= emit_math(SHADER_OPCODE_RCP
, dst
, op
[0]);
1268 inst
= emit_math(SHADER_OPCODE_EXP2
, dst
, op
[0]);
1272 inst
= emit_math(SHADER_OPCODE_LOG2
, dst
, op
[0]);
1276 inst
= emit_math(SHADER_OPCODE_SIN
, dst
, op
[0]);
1280 inst
= emit_math(SHADER_OPCODE_COS
, dst
, op
[0]);
1285 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1286 emit_math(SHADER_OPCODE_INT_QUOTIENT
, dst
, op
[0], op
[1]);
1291 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1292 * appears that our hardware just does the right thing for signed
1295 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1296 emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1300 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1301 inst
= emit_math(SHADER_OPCODE_INT_REMAINDER
, dst
, op
[0], op
[1]);
1303 /* Math instructions don't support conditional mod */
1304 inst
= emit(MOV(dst_null_d(), src_reg(dst
)));
1305 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1307 /* Now, we need to determine if signs of the sources are different.
1308 * When we XOR the sources, the top bit is 0 if they are the same and 1
1309 * if they are different. We can then use a conditional modifier to
1310 * turn that into a predicate. This leads us to an XOR.l instruction.
1312 * Technically, according to the PRM, you're not allowed to use .l on a
1313 * XOR instruction. However, emperical experiments and Curro's reading
1314 * of the simulator source both indicate that it's safe.
1316 src_reg tmp
= src_reg(this, glsl_type::ivec4_type
);
1317 inst
= emit(XOR(dst_reg(tmp
), op
[0], op
[1]));
1318 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1319 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
1321 /* If the result of the initial remainder operation is non-zero and the
1322 * two sources have different signs, add in a copy of op[1] to get the
1323 * final integer modulus value.
1325 inst
= emit(ADD(dst
, src_reg(dst
), op
[1]));
1326 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1331 unreachable("not reached: should be handled by ldexp_to_arith()");
1334 inst
= emit_math(SHADER_OPCODE_SQRT
, dst
, op
[0]);
1338 inst
= emit_math(SHADER_OPCODE_RSQ
, dst
, op
[0]);
1342 inst
= emit_math(SHADER_OPCODE_POW
, dst
, op
[0], op
[1]);
1345 case nir_op_uadd_carry
: {
1346 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1347 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1349 emit(ADDC(dst_null_ud(), op
[0], op
[1]));
1350 emit(MOV(dst
, src_reg(acc
)));
1354 case nir_op_usub_borrow
: {
1355 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1356 struct brw_reg acc
= retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD
);
1358 emit(SUBB(dst_null_ud(), op
[0], op
[1]));
1359 emit(MOV(dst
, src_reg(acc
)));
1364 inst
= emit(RNDZ(dst
, op
[0]));
1365 if (devinfo
->gen
< 6) {
1366 inst
->conditional_mod
= BRW_CONDITIONAL_R
;
1367 inst
= emit(ADD(dst
, src_reg(dst
), brw_imm_f(1.0f
)));
1368 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1369 inst
= emit(MOV(dst
, src_reg(dst
))); /* for potential saturation */
1373 case nir_op_fceil
: {
1374 src_reg tmp
= src_reg(this, glsl_type::float_type
);
1376 brw_swizzle_for_size(instr
->src
[0].src
.is_ssa
?
1377 instr
->src
[0].src
.ssa
->num_components
:
1378 instr
->src
[0].src
.reg
.reg
->num_components
);
1380 op
[0].negate
= !op
[0].negate
;
1381 emit(RNDD(dst_reg(tmp
), op
[0]));
1383 inst
= emit(MOV(dst
, tmp
));
1388 inst
= emit(RNDD(dst
, op
[0]));
1392 inst
= emit(FRC(dst
, op
[0]));
1395 case nir_op_fround_even
:
1396 inst
= emit(RNDE(dst
, op
[0]));
1397 if (devinfo
->gen
< 6) {
1398 inst
->conditional_mod
= BRW_CONDITIONAL_R
;
1399 inst
= emit(ADD(dst
, src_reg(dst
), brw_imm_f(1.0f
)));
1400 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1401 inst
= emit(MOV(dst
, src_reg(dst
))); /* for potential saturation */
1405 case nir_op_fquantize2f16
: {
1406 /* See also vec4_visitor::emit_pack_half_2x16() */
1407 src_reg tmp16
= src_reg(this, glsl_type::uvec4_type
);
1408 src_reg tmp32
= src_reg(this, glsl_type::vec4_type
);
1409 src_reg zero
= src_reg(this, glsl_type::vec4_type
);
1411 /* Check for denormal */
1412 src_reg abs_src0
= op
[0];
1413 abs_src0
.abs
= true;
1414 emit(CMP(dst_null_f(), abs_src0
, brw_imm_f(ldexpf(1.0, -14)),
1415 BRW_CONDITIONAL_L
));
1416 /* Get the appropriately signed zero */
1417 emit(AND(retype(dst_reg(zero
), BRW_REGISTER_TYPE_UD
),
1418 retype(op
[0], BRW_REGISTER_TYPE_UD
),
1419 brw_imm_ud(0x80000000)));
1420 /* Do the actual F32 -> F16 -> F32 conversion */
1421 emit(F32TO16(dst_reg(tmp16
), op
[0]));
1422 emit(F16TO32(dst_reg(tmp32
), tmp16
));
1423 /* Select that or zero based on normal status */
1424 inst
= emit(BRW_OPCODE_SEL
, dst
, zero
, tmp32
);
1425 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1431 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1434 try_immediate_source(instr
, op
, true, devinfo
);
1435 inst
= emit_minmax(BRW_CONDITIONAL_L
, dst
, op
[0], op
[1]);
1440 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1443 try_immediate_source(instr
, op
, true, devinfo
);
1444 inst
= emit_minmax(BRW_CONDITIONAL_GE
, dst
, op
[0], op
[1]);
1448 case nir_op_fddx_coarse
:
1449 case nir_op_fddx_fine
:
1451 case nir_op_fddy_coarse
:
1452 case nir_op_fddy_fine
:
1453 unreachable("derivatives are not valid in vertex shaders");
1461 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1466 case nir_op_fne32
: {
1467 enum brw_conditional_mod conditional_mod
=
1468 brw_cmod_for_nir_comparison(instr
->op
);
1470 if (nir_src_bit_size(instr
->src
[0].src
) < 64) {
1471 /* If the order of the sources is changed due to an immediate value,
1472 * then the condition must also be changed.
1474 if (try_immediate_source(instr
, op
, true, devinfo
) == 0)
1475 conditional_mod
= brw_swap_cmod(conditional_mod
);
1477 emit(CMP(dst
, op
[0], op
[1], conditional_mod
));
1479 /* Produce a 32-bit boolean result from the DF comparison by selecting
1480 * only the low 32-bit in each DF produced. Do this in a temporary
1481 * so we can then move from there to the result using align16 again
1482 * to honor the original writemask.
1484 dst_reg temp
= dst_reg(this, glsl_type::dvec4_type
);
1485 emit(CMP(temp
, op
[0], op
[1], conditional_mod
));
1486 dst_reg result
= dst_reg(this, glsl_type::bvec4_type
);
1487 emit(VEC4_OPCODE_PICK_LOW_32BIT
, result
, src_reg(temp
));
1488 emit(MOV(dst
, src_reg(result
)));
1493 case nir_op_b32all_iequal2
:
1494 case nir_op_b32all_iequal3
:
1495 case nir_op_b32all_iequal4
:
1496 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1498 case nir_op_b32all_fequal2
:
1499 case nir_op_b32all_fequal3
:
1500 case nir_op_b32all_fequal4
: {
1502 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1504 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1505 brw_cmod_for_nir_comparison(instr
->op
)));
1506 emit(MOV(dst
, brw_imm_d(0)));
1507 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1508 inst
->predicate
= BRW_PREDICATE_ALIGN16_ALL4H
;
1512 case nir_op_b32any_inequal2
:
1513 case nir_op_b32any_inequal3
:
1514 case nir_op_b32any_inequal4
:
1515 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1517 case nir_op_b32any_fnequal2
:
1518 case nir_op_b32any_fnequal3
:
1519 case nir_op_b32any_fnequal4
: {
1521 brw_swizzle_for_size(nir_op_infos
[instr
->op
].input_sizes
[0]);
1523 emit(CMP(dst_null_d(), swizzle(op
[0], swiz
), swizzle(op
[1], swiz
),
1524 brw_cmod_for_nir_comparison(instr
->op
)));
1526 emit(MOV(dst
, brw_imm_d(0)));
1527 inst
= emit(MOV(dst
, brw_imm_d(~0)));
1528 inst
->predicate
= BRW_PREDICATE_ALIGN16_ANY4H
;
1533 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1534 if (devinfo
->gen
>= 8) {
1535 op
[0] = resolve_source_modifiers(op
[0]);
1537 emit(NOT(dst
, op
[0]));
1541 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1542 if (devinfo
->gen
>= 8) {
1543 op
[0] = resolve_source_modifiers(op
[0]);
1544 op
[1] = resolve_source_modifiers(op
[1]);
1546 try_immediate_source(instr
, op
, true, devinfo
);
1547 emit(XOR(dst
, op
[0], op
[1]));
1551 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1552 if (devinfo
->gen
>= 8) {
1553 op
[0] = resolve_source_modifiers(op
[0]);
1554 op
[1] = resolve_source_modifiers(op
[1]);
1556 try_immediate_source(instr
, op
, true, devinfo
);
1557 emit(OR(dst
, op
[0], op
[1]));
1561 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1562 if (devinfo
->gen
>= 8) {
1563 op
[0] = resolve_source_modifiers(op
[0]);
1564 op
[1] = resolve_source_modifiers(op
[1]);
1566 try_immediate_source(instr
, op
, true, devinfo
);
1567 emit(AND(dst
, op
[0], op
[1]));
1573 if (nir_dest_bit_size(instr
->dest
.dest
) > 32) {
1574 assert(dst
.type
== BRW_REGISTER_TYPE_DF
);
1575 emit_conversion_to_double(dst
, negate(op
[0]));
1577 emit(MOV(dst
, negate(op
[0])));
1582 if (nir_src_bit_size(instr
->src
[0].src
) == 64) {
1583 /* We use a MOV with conditional_mod to check if the provided value is
1584 * 0.0. We want this to flush denormalized numbers to zero, so we set a
1585 * source modifier on the source operand to trigger this, as source
1586 * modifiers don't affect the result of the testing against 0.0.
1588 src_reg value
= op
[0];
1590 vec4_instruction
*inst
= emit(MOV(dst_null_df(), value
));
1591 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1593 src_reg one
= src_reg(this, glsl_type::ivec4_type
);
1594 emit(MOV(dst_reg(one
), brw_imm_d(~0)));
1595 inst
= emit(BRW_OPCODE_SEL
, dst
, one
, brw_imm_d(0));
1596 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1598 emit(CMP(dst
, op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1603 emit(CMP(dst
, op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1606 case nir_op_unpack_half_2x16_split_x
:
1607 case nir_op_unpack_half_2x16_split_y
:
1608 case nir_op_pack_half_2x16_split
:
1609 unreachable("not reached: should not occur in vertex shader");
1611 case nir_op_unpack_snorm_2x16
:
1612 case nir_op_unpack_unorm_2x16
:
1613 case nir_op_pack_snorm_2x16
:
1614 case nir_op_pack_unorm_2x16
:
1615 unreachable("not reached: should be handled by lower_packing_builtins");
1617 case nir_op_pack_uvec4_to_uint
:
1618 unreachable("not reached");
1620 case nir_op_pack_uvec2_to_uint
: {
1621 dst_reg tmp1
= dst_reg(this, glsl_type::uint_type
);
1622 tmp1
.writemask
= WRITEMASK_X
;
1623 op
[0].swizzle
= BRW_SWIZZLE_YYYY
;
1624 emit(SHL(tmp1
, op
[0], src_reg(brw_imm_ud(16u))));
1626 dst_reg tmp2
= dst_reg(this, glsl_type::uint_type
);
1627 tmp2
.writemask
= WRITEMASK_X
;
1628 op
[0].swizzle
= BRW_SWIZZLE_XXXX
;
1629 emit(AND(tmp2
, op
[0], src_reg(brw_imm_ud(0xffffu
))));
1631 emit(OR(dst
, src_reg(tmp1
), src_reg(tmp2
)));
1635 case nir_op_pack_64_2x32_split
: {
1636 dst_reg result
= dst_reg(this, glsl_type::dvec4_type
);
1637 dst_reg tmp
= dst_reg(this, glsl_type::uvec4_type
);
1638 emit(MOV(tmp
, retype(op
[0], BRW_REGISTER_TYPE_UD
)));
1639 emit(VEC4_OPCODE_SET_LOW_32BIT
, result
, src_reg(tmp
));
1640 emit(MOV(tmp
, retype(op
[1], BRW_REGISTER_TYPE_UD
)));
1641 emit(VEC4_OPCODE_SET_HIGH_32BIT
, result
, src_reg(tmp
));
1642 emit(MOV(dst
, src_reg(result
)));
1646 case nir_op_unpack_64_2x32_split_x
:
1647 case nir_op_unpack_64_2x32_split_y
: {
1648 enum opcode oper
= (instr
->op
== nir_op_unpack_64_2x32_split_x
) ?
1649 VEC4_OPCODE_PICK_LOW_32BIT
: VEC4_OPCODE_PICK_HIGH_32BIT
;
1650 dst_reg tmp
= dst_reg(this, glsl_type::dvec4_type
);
1651 emit(MOV(tmp
, op
[0]));
1652 dst_reg tmp2
= dst_reg(this, glsl_type::uvec4_type
);
1653 emit(oper
, tmp2
, src_reg(tmp
));
1654 emit(MOV(dst
, src_reg(tmp2
)));
1658 case nir_op_unpack_half_2x16
:
1659 /* As NIR does not guarantee that we have a correct swizzle outside the
1660 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1661 * uses the source operand in an operation with WRITEMASK_Y while our
1662 * source operand has only size 1, it accessed incorrect data producing
1663 * regressions in Piglit. We repeat the swizzle of the first component on the
1664 * rest of components to avoid regressions. In the vec4_visitor IR code path
1665 * this is not needed because the operand has already the correct swizzle.
1667 op
[0].swizzle
= brw_compose_swizzle(BRW_SWIZZLE_XXXX
, op
[0].swizzle
);
1668 emit_unpack_half_2x16(dst
, op
[0]);
1671 case nir_op_pack_half_2x16
:
1672 emit_pack_half_2x16(dst
, op
[0]);
1675 case nir_op_unpack_unorm_4x8
:
1676 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1677 emit_unpack_unorm_4x8(dst
, op
[0]);
1680 case nir_op_pack_unorm_4x8
:
1681 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1682 emit_pack_unorm_4x8(dst
, op
[0]);
1685 case nir_op_unpack_snorm_4x8
:
1686 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1687 emit_unpack_snorm_4x8(dst
, op
[0]);
1690 case nir_op_pack_snorm_4x8
:
1691 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1692 emit_pack_snorm_4x8(dst
, op
[0]);
1695 case nir_op_bitfield_reverse
:
1696 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1697 emit(BFREV(dst
, op
[0]));
1700 case nir_op_bit_count
:
1701 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1702 emit(CBIT(dst
, op
[0]));
1705 case nir_op_ufind_msb
:
1706 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1707 emit_find_msb_using_lzd(vec4_builder(this).at_end(), dst
, op
[0], false);
1710 case nir_op_ifind_msb
: {
1711 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1712 vec4_builder bld
= vec4_builder(this).at_end();
1715 if (devinfo
->gen
< 7) {
1716 emit_find_msb_using_lzd(bld
, dst
, op
[0], true);
1718 emit(FBH(retype(dst
, BRW_REGISTER_TYPE_UD
), op
[0]));
1720 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1721 * count from the LSB side. If FBH didn't return an error
1722 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1723 * count into an LSB count.
1725 bld
.CMP(dst_null_d(), src
, brw_imm_d(-1), BRW_CONDITIONAL_NZ
);
1727 inst
= bld
.ADD(dst
, src
, brw_imm_d(31));
1728 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1729 inst
->src
[0].negate
= true;
1734 case nir_op_find_lsb
: {
1735 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1736 vec4_builder bld
= vec4_builder(this).at_end();
1738 if (devinfo
->gen
< 7) {
1739 dst_reg temp
= bld
.vgrf(BRW_REGISTER_TYPE_D
);
1741 /* (x & -x) generates a value that consists of only the LSB of x.
1742 * For all powers of 2, findMSB(y) == findLSB(y).
1744 src_reg src
= src_reg(retype(op
[0], BRW_REGISTER_TYPE_D
));
1745 src_reg negated_src
= src
;
1747 /* One must be negated, and the other must be non-negated. It
1748 * doesn't matter which is which.
1750 negated_src
.negate
= true;
1753 bld
.AND(temp
, src
, negated_src
);
1754 emit_find_msb_using_lzd(bld
, dst
, src_reg(temp
), false);
1756 bld
.FBL(dst
, op
[0]);
1761 case nir_op_ubitfield_extract
:
1762 case nir_op_ibitfield_extract
:
1763 unreachable("should have been lowered");
1766 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1767 op
[0] = fix_3src_operand(op
[0]);
1768 op
[1] = fix_3src_operand(op
[1]);
1769 op
[2] = fix_3src_operand(op
[2]);
1771 emit(BFE(dst
, op
[2], op
[1], op
[0]));
1775 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1776 emit(BFI1(dst
, op
[0], op
[1]));
1780 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1781 op
[0] = fix_3src_operand(op
[0]);
1782 op
[1] = fix_3src_operand(op
[1]);
1783 op
[2] = fix_3src_operand(op
[2]);
1785 emit(BFI2(dst
, op
[0], op
[1], op
[2]));
1788 case nir_op_bitfield_insert
:
1789 unreachable("not reached: should have been lowered");
1792 if (type_sz(op
[0].type
) < 8) {
1793 /* AND(val, 0x80000000) gives the sign bit.
1795 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1798 emit(CMP(dst_null_f(), op
[0], brw_imm_f(0.0f
), BRW_CONDITIONAL_NZ
));
1800 op
[0].type
= BRW_REGISTER_TYPE_UD
;
1801 dst
.type
= BRW_REGISTER_TYPE_UD
;
1802 emit(AND(dst
, op
[0], brw_imm_ud(0x80000000u
)));
1804 inst
= emit(OR(dst
, src_reg(dst
), brw_imm_ud(0x3f800000u
)));
1805 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1806 dst
.type
= BRW_REGISTER_TYPE_F
;
1808 /* For doubles we do the same but we need to consider:
1810 * - We use a MOV with conditional_mod instead of a CMP so that we can
1811 * skip loading a 0.0 immediate. We use a source modifier on the
1812 * source of the MOV so that we flush denormalized values to 0.
1813 * Since we want to compare against 0, this won't alter the result.
1814 * - We need to extract the high 32-bit of each DF where the sign
1816 * - We need to produce a DF result.
1819 /* Check for zero */
1820 src_reg value
= op
[0];
1822 inst
= emit(MOV(dst_null_df(), value
));
1823 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
1825 /* AND each high 32-bit channel with 0x80000000u */
1826 dst_reg tmp
= dst_reg(this, glsl_type::uvec4_type
);
1827 emit(VEC4_OPCODE_PICK_HIGH_32BIT
, tmp
, op
[0]);
1828 emit(AND(tmp
, src_reg(tmp
), brw_imm_ud(0x80000000u
)));
1830 /* Add 1.0 to each channel, predicated to skip the cases where the
1831 * channel's value was 0
1833 inst
= emit(OR(tmp
, src_reg(tmp
), brw_imm_ud(0x3f800000u
)));
1834 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1836 /* Now convert the result from float to double */
1837 emit_conversion_to_double(dst
, retype(src_reg(tmp
),
1838 BRW_REGISTER_TYPE_F
));
1843 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1844 try_immediate_source(instr
, op
, false, devinfo
);
1845 emit(SHL(dst
, op
[0], op
[1]));
1849 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1850 try_immediate_source(instr
, op
, false, devinfo
);
1851 emit(ASR(dst
, op
[0], op
[1]));
1855 assert(nir_dest_bit_size(instr
->dest
.dest
) < 64);
1856 try_immediate_source(instr
, op
, false, devinfo
);
1857 emit(SHR(dst
, op
[0], op
[1]));
1861 if (type_sz(dst
.type
) == 8) {
1862 dst_reg mul_dst
= dst_reg(this, glsl_type::dvec4_type
);
1863 emit(MUL(mul_dst
, op
[1], op
[0]));
1864 inst
= emit(ADD(dst
, src_reg(mul_dst
), op
[2]));
1866 fix_float_operands(op
, instr
);
1867 inst
= emit(MAD(dst
, op
[2], op
[1], op
[0]));
1872 fix_float_operands(op
, instr
);
1873 inst
= emit(LRP(dst
, op
[2], op
[1], op
[0]));
1876 case nir_op_b32csel
:
1877 enum brw_predicate predicate
;
1878 if (!optimize_predicate(instr
, &predicate
)) {
1879 emit(CMP(dst_null_d(), op
[0], brw_imm_d(0), BRW_CONDITIONAL_NZ
));
1880 switch (dst
.writemask
) {
1882 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_X
;
1885 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Y
;
1888 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_Z
;
1891 predicate
= BRW_PREDICATE_ALIGN16_REPLICATE_W
;
1894 predicate
= BRW_PREDICATE_NORMAL
;
1898 inst
= emit(BRW_OPCODE_SEL
, dst
, op
[1], op
[2]);
1899 inst
->predicate
= predicate
;
1902 case nir_op_fdot_replicated2
:
1903 try_immediate_source(instr
, op
, true, devinfo
);
1904 inst
= emit(BRW_OPCODE_DP2
, dst
, op
[0], op
[1]);
1907 case nir_op_fdot_replicated3
:
1908 try_immediate_source(instr
, op
, true, devinfo
);
1909 inst
= emit(BRW_OPCODE_DP3
, dst
, op
[0], op
[1]);
1912 case nir_op_fdot_replicated4
:
1913 try_immediate_source(instr
, op
, true, devinfo
);
1914 inst
= emit(BRW_OPCODE_DP4
, dst
, op
[0], op
[1]);
1917 case nir_op_fdph_replicated
:
1918 try_immediate_source(instr
, op
, false, devinfo
);
1919 inst
= emit(BRW_OPCODE_DPH
, dst
, op
[0], op
[1]);
1923 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1926 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1930 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1933 unreachable("Unimplemented ALU operation");
1936 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1937 * to sign extend the low bit to 0/~0
1939 if (devinfo
->gen
<= 5 &&
1940 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) ==
1941 BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1942 dst_reg masked
= dst_reg(this, glsl_type::int_type
);
1943 masked
.writemask
= dst
.writemask
;
1944 emit(AND(masked
, src_reg(dst
), brw_imm_d(1)));
1945 src_reg masked_neg
= src_reg(masked
);
1946 masked_neg
.negate
= true;
1947 emit(MOV(retype(dst
, BRW_REGISTER_TYPE_D
), masked_neg
));
1952 vec4_visitor::nir_emit_jump(nir_jump_instr
*instr
)
1954 switch (instr
->type
) {
1955 case nir_jump_break
:
1956 emit(BRW_OPCODE_BREAK
);
1959 case nir_jump_continue
:
1960 emit(BRW_OPCODE_CONTINUE
);
1963 case nir_jump_return
:
1966 unreachable("unknown jump");
1970 static enum ir_texture_opcode
1971 ir_texture_opcode_for_nir_texop(nir_texop texop
)
1973 enum ir_texture_opcode op
;
1976 case nir_texop_lod
: op
= ir_lod
; break;
1977 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1978 case nir_texop_texture_samples
: op
= ir_texture_samples
; break;
1979 case nir_texop_tex
: op
= ir_tex
; break;
1980 case nir_texop_tg4
: op
= ir_tg4
; break;
1981 case nir_texop_txb
: op
= ir_txb
; break;
1982 case nir_texop_txd
: op
= ir_txd
; break;
1983 case nir_texop_txf
: op
= ir_txf
; break;
1984 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1985 case nir_texop_txl
: op
= ir_txl
; break;
1986 case nir_texop_txs
: op
= ir_txs
; break;
1987 case nir_texop_samples_identical
: op
= ir_samples_identical
; break;
1989 unreachable("unknown texture opcode");
1995 static const glsl_type
*
1996 glsl_type_for_nir_alu_type(nir_alu_type alu_type
,
1997 unsigned components
)
1999 return glsl_type::get_instance(brw_glsl_base_type_for_nir_type(alu_type
),
2004 vec4_visitor::nir_emit_texture(nir_tex_instr
*instr
)
2006 unsigned texture
= instr
->texture_index
;
2007 unsigned sampler
= instr
->sampler_index
;
2008 src_reg texture_reg
= brw_imm_ud(texture
);
2009 src_reg sampler_reg
= brw_imm_ud(sampler
);
2011 const glsl_type
*coord_type
= NULL
;
2012 src_reg shadow_comparator
;
2013 src_reg offset_value
;
2015 src_reg sample_index
;
2018 const glsl_type
*dest_type
=
2019 glsl_type_for_nir_alu_type(instr
->dest_type
,
2020 nir_tex_instr_dest_size(instr
));
2021 dst_reg dest
= get_nir_dest(instr
->dest
, instr
->dest_type
);
2023 /* The hardware requires a LOD for buffer textures */
2024 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
)
2027 /* Load the texture operation sources */
2028 uint32_t constant_offset
= 0;
2029 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
2030 switch (instr
->src
[i
].src_type
) {
2031 case nir_tex_src_comparator
:
2032 shadow_comparator
= get_nir_src(instr
->src
[i
].src
,
2033 BRW_REGISTER_TYPE_F
, 1);
2036 case nir_tex_src_coord
: {
2037 unsigned src_size
= nir_tex_instr_src_size(instr
, i
);
2039 switch (instr
->op
) {
2041 case nir_texop_txf_ms
:
2042 case nir_texop_samples_identical
:
2043 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
,
2045 coord_type
= glsl_type::ivec(src_size
);
2049 coordinate
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
2051 coord_type
= glsl_type::vec(src_size
);
2057 case nir_tex_src_ddx
:
2058 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
2059 nir_tex_instr_src_size(instr
, i
));
2062 case nir_tex_src_ddy
:
2063 lod2
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
,
2064 nir_tex_instr_src_size(instr
, i
));
2067 case nir_tex_src_lod
:
2068 switch (instr
->op
) {
2071 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
2075 lod
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_F
, 1);
2080 case nir_tex_src_ms_index
: {
2081 sample_index
= get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 1);
2085 case nir_tex_src_offset
:
2086 if (!brw_texture_offset(instr
, i
, &constant_offset
)) {
2088 get_nir_src(instr
->src
[i
].src
, BRW_REGISTER_TYPE_D
, 2);
2092 case nir_tex_src_texture_offset
: {
2093 /* Emit code to evaluate the actual indexing expression */
2094 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
2095 src_reg
temp(this, glsl_type::uint_type
);
2096 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(texture
)));
2097 texture_reg
= emit_uniformize(temp
);
2101 case nir_tex_src_sampler_offset
: {
2102 /* Emit code to evaluate the actual indexing expression */
2103 src_reg src
= get_nir_src(instr
->src
[i
].src
, 1);
2104 src_reg
temp(this, glsl_type::uint_type
);
2105 emit(ADD(dst_reg(temp
), src
, brw_imm_ud(sampler
)));
2106 sampler_reg
= emit_uniformize(temp
);
2110 case nir_tex_src_projector
:
2111 unreachable("Should be lowered by do_lower_texture_projection");
2113 case nir_tex_src_bias
:
2114 unreachable("LOD bias is not valid for vertex shaders.\n");
2117 unreachable("unknown texture source");
2121 if (instr
->op
== nir_texop_txf_ms
||
2122 instr
->op
== nir_texop_samples_identical
) {
2123 assert(coord_type
!= NULL
);
2124 if (devinfo
->gen
>= 7 &&
2125 key_tex
->compressed_multisample_layout_mask
& (1 << texture
)) {
2126 mcs
= emit_mcs_fetch(coord_type
, coordinate
, texture_reg
);
2128 mcs
= brw_imm_ud(0u);
2132 /* Stuff the channel select bits in the top of the texture offset */
2133 if (instr
->op
== nir_texop_tg4
) {
2134 if (instr
->component
== 1 &&
2135 (key_tex
->gather_channel_quirk_mask
& (1 << texture
))) {
2136 /* gather4 sampler is broken for green channel on RG32F --
2137 * we must ask for blue instead.
2139 constant_offset
|= 2 << 16;
2141 constant_offset
|= instr
->component
<< 16;
2145 ir_texture_opcode op
= ir_texture_opcode_for_nir_texop(instr
->op
);
2147 emit_texture(op
, dest
, dest_type
, coordinate
, instr
->coord_components
,
2149 lod
, lod2
, sample_index
,
2150 constant_offset
, offset_value
, mcs
,
2151 texture
, texture_reg
, sampler_reg
);
2155 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr
*instr
)
2157 nir_ssa_values
[instr
->def
.index
] =
2158 dst_reg(VGRF
, alloc
.allocate(DIV_ROUND_UP(instr
->def
.bit_size
, 32)));
2161 /* SIMD4x2 64bit data is stored in register space like this:
2163 * r0.0:DF x0 y0 z0 w0
2164 * r1.0:DF x1 y1 z1 w1
2166 * When we need to write data such as this to memory using 32-bit write
2167 * messages we need to shuffle it in this fashion:
2169 * r0.0:DF x0 y0 x1 y1 (to be written at base offset)
2170 * r0.0:DF z0 w0 z1 w1 (to be written at base offset + 16)
2172 * We need to do the inverse operation when we read using 32-bit messages,
2173 * which we can do by applying the same exact shuffling on the 64-bit data
2174 * read, only that because the data for each vertex is positioned differently
2175 * we need to apply different channel enables.
2177 * This function takes 64bit data and shuffles it as explained above.
2179 * The @for_write parameter is used to specify if the shuffling is being done
2180 * for proper SIMD4x2 64-bit data that needs to be shuffled prior to a 32-bit
2181 * write message (for_write = true), or instead we are doing the inverse
2182 * operation and we have just read 64-bit data using a 32-bit messages that we
2183 * need to shuffle to create valid SIMD4x2 64-bit data (for_write = false).
2185 * If @block and @ref are non-NULL, then the shuffling is done after @ref,
2186 * otherwise the instructions are emitted normally at the end. The function
2187 * returns the last instruction inserted.
2189 * Notice that @src and @dst cannot be the same register.
2192 vec4_visitor::shuffle_64bit_data(dst_reg dst
, src_reg src
, bool for_write
,
2193 bblock_t
*block
, vec4_instruction
*ref
)
2195 assert(type_sz(src
.type
) == 8);
2196 assert(type_sz(dst
.type
) == 8);
2197 assert(!regions_overlap(dst
, 2 * REG_SIZE
, src
, 2 * REG_SIZE
));
2198 assert(!ref
== !block
);
2200 const vec4_builder bld
= !ref
? vec4_builder(this).at_end() :
2201 vec4_builder(this).at(block
, ref
->next
);
2203 /* Resolve swizzle in src */
2204 vec4_instruction
*inst
;
2205 if (src
.swizzle
!= BRW_SWIZZLE_XYZW
) {
2206 dst_reg data
= dst_reg(this, glsl_type::dvec4_type
);
2207 inst
= bld
.MOV(data
, src
);
2208 src
= src_reg(data
);
2211 /* dst+0.XY = src+0.XY */
2212 inst
= bld
.group(4, 0).MOV(writemask(dst
, WRITEMASK_XY
), src
);
2214 /* dst+0.ZW = src+1.XY */
2215 inst
= bld
.group(4, for_write
? 1 : 0)
2216 .MOV(writemask(dst
, WRITEMASK_ZW
),
2217 swizzle(byte_offset(src
, REG_SIZE
), BRW_SWIZZLE_XYXY
));
2219 /* dst+1.XY = src+0.ZW */
2220 inst
= bld
.group(4, for_write
? 0 : 1)
2221 .MOV(writemask(byte_offset(dst
, REG_SIZE
), WRITEMASK_XY
),
2222 swizzle(src
, BRW_SWIZZLE_ZWZW
));
2224 /* dst+1.ZW = src+1.ZW */
2225 inst
= bld
.group(4, 1)
2226 .MOV(writemask(byte_offset(dst
, REG_SIZE
), WRITEMASK_ZW
),
2227 byte_offset(src
, REG_SIZE
));