2 * Copyright © 2014-2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
33 typedef struct nir_builder
{
36 /* Whether new ALU instructions will be marked "exact" */
40 nir_function_impl
*impl
;
44 nir_builder_init(nir_builder
*build
, nir_function_impl
*impl
)
46 memset(build
, 0, sizeof(*build
));
49 build
->shader
= impl
->function
->shader
;
53 nir_builder_init_simple_shader(nir_builder
*build
, void *mem_ctx
,
54 gl_shader_stage stage
,
55 const nir_shader_compiler_options
*options
)
57 build
->shader
= nir_shader_create(mem_ctx
, stage
, options
, NULL
);
58 nir_function
*func
= nir_function_create(build
->shader
, "main");
59 func
->is_entrypoint
= true;
61 build
->impl
= nir_function_impl_create(func
);
62 build
->cursor
= nir_after_cf_list(&build
->impl
->body
);
66 nir_builder_instr_insert(nir_builder
*build
, nir_instr
*instr
)
68 nir_instr_insert(build
->cursor
, instr
);
70 /* Move the cursor forward. */
71 build
->cursor
= nir_after_instr(instr
);
74 static inline nir_instr
*
75 nir_builder_last_instr(nir_builder
*build
)
77 assert(build
->cursor
.option
== nir_cursor_after_instr
);
78 return build
->cursor
.instr
;
82 nir_builder_cf_insert(nir_builder
*build
, nir_cf_node
*cf
)
84 nir_cf_node_insert(build
->cursor
, cf
);
88 nir_builder_is_inside_cf(nir_builder
*build
, nir_cf_node
*cf_node
)
90 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
91 for (nir_cf_node
*n
= &block
->cf_node
; n
; n
= n
->parent
) {
98 static inline nir_if
*
99 nir_push_if(nir_builder
*build
, nir_ssa_def
*condition
)
101 nir_if
*nif
= nir_if_create(build
->shader
);
102 nif
->condition
= nir_src_for_ssa(condition
);
103 nir_builder_cf_insert(build
, &nif
->cf_node
);
104 build
->cursor
= nir_before_cf_list(&nif
->then_list
);
108 static inline nir_if
*
109 nir_push_else(nir_builder
*build
, nir_if
*nif
)
112 assert(nir_builder_is_inside_cf(build
, &nif
->cf_node
));
114 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
115 nif
= nir_cf_node_as_if(block
->cf_node
.parent
);
117 build
->cursor
= nir_before_cf_list(&nif
->else_list
);
122 nir_pop_if(nir_builder
*build
, nir_if
*nif
)
125 assert(nir_builder_is_inside_cf(build
, &nif
->cf_node
));
127 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
128 nif
= nir_cf_node_as_if(block
->cf_node
.parent
);
130 build
->cursor
= nir_after_cf_node(&nif
->cf_node
);
133 static inline nir_ssa_def
*
134 nir_if_phi(nir_builder
*build
, nir_ssa_def
*then_def
, nir_ssa_def
*else_def
)
136 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
137 nir_if
*nif
= nir_cf_node_as_if(nir_cf_node_prev(&block
->cf_node
));
139 nir_phi_instr
*phi
= nir_phi_instr_create(build
->shader
);
141 nir_phi_src
*src
= ralloc(phi
, nir_phi_src
);
142 src
->pred
= nir_if_last_then_block(nif
);
143 src
->src
= nir_src_for_ssa(then_def
);
144 exec_list_push_tail(&phi
->srcs
, &src
->node
);
146 src
= ralloc(phi
, nir_phi_src
);
147 src
->pred
= nir_if_last_else_block(nif
);
148 src
->src
= nir_src_for_ssa(else_def
);
149 exec_list_push_tail(&phi
->srcs
, &src
->node
);
151 assert(then_def
->num_components
== else_def
->num_components
);
152 assert(then_def
->bit_size
== else_def
->bit_size
);
153 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
154 then_def
->num_components
, then_def
->bit_size
, NULL
);
156 nir_builder_instr_insert(build
, &phi
->instr
);
158 return &phi
->dest
.ssa
;
161 static inline nir_loop
*
162 nir_push_loop(nir_builder
*build
)
164 nir_loop
*loop
= nir_loop_create(build
->shader
);
165 nir_builder_cf_insert(build
, &loop
->cf_node
);
166 build
->cursor
= nir_before_cf_list(&loop
->body
);
171 nir_pop_loop(nir_builder
*build
, nir_loop
*loop
)
174 assert(nir_builder_is_inside_cf(build
, &loop
->cf_node
));
176 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
177 loop
= nir_cf_node_as_loop(block
->cf_node
.parent
);
179 build
->cursor
= nir_after_cf_node(&loop
->cf_node
);
182 static inline nir_ssa_def
*
183 nir_ssa_undef(nir_builder
*build
, unsigned num_components
, unsigned bit_size
)
185 nir_ssa_undef_instr
*undef
=
186 nir_ssa_undef_instr_create(build
->shader
, num_components
, bit_size
);
190 nir_instr_insert(nir_before_cf_list(&build
->impl
->body
), &undef
->instr
);
195 static inline nir_ssa_def
*
196 nir_build_imm(nir_builder
*build
, unsigned num_components
,
197 unsigned bit_size
, const nir_const_value
*value
)
199 nir_load_const_instr
*load_const
=
200 nir_load_const_instr_create(build
->shader
, num_components
, bit_size
);
204 memcpy(load_const
->value
, value
, sizeof(nir_const_value
) * num_components
);
206 nir_builder_instr_insert(build
, &load_const
->instr
);
208 return &load_const
->def
;
211 static inline nir_ssa_def
*
212 nir_imm_zero(nir_builder
*build
, unsigned num_components
, unsigned bit_size
)
214 nir_load_const_instr
*load_const
=
215 nir_load_const_instr_create(build
->shader
, num_components
, bit_size
);
217 /* nir_load_const_instr_create uses rzalloc so it's already zero */
219 nir_builder_instr_insert(build
, &load_const
->instr
);
221 return &load_const
->def
;
224 static inline nir_ssa_def
*
225 nir_imm_boolN_t(nir_builder
*build
, bool x
, unsigned bit_size
)
227 nir_const_value v
= nir_const_value_for_bool(x
, bit_size
);
228 return nir_build_imm(build
, 1, bit_size
, &v
);
231 static inline nir_ssa_def
*
232 nir_imm_bool(nir_builder
*build
, bool x
)
234 return nir_imm_boolN_t(build
, x
, 1);
237 static inline nir_ssa_def
*
238 nir_imm_true(nir_builder
*build
)
240 return nir_imm_bool(build
, true);
243 static inline nir_ssa_def
*
244 nir_imm_false(nir_builder
*build
)
246 return nir_imm_bool(build
, false);
249 static inline nir_ssa_def
*
250 nir_imm_floatN_t(nir_builder
*build
, double x
, unsigned bit_size
)
252 nir_const_value v
= nir_const_value_for_float(x
, bit_size
);
253 return nir_build_imm(build
, 1, bit_size
, &v
);
256 static inline nir_ssa_def
*
257 nir_imm_float16(nir_builder
*build
, float x
)
259 return nir_imm_floatN_t(build
, x
, 16);
262 static inline nir_ssa_def
*
263 nir_imm_float(nir_builder
*build
, float x
)
265 return nir_imm_floatN_t(build
, x
, 32);
268 static inline nir_ssa_def
*
269 nir_imm_double(nir_builder
*build
, double x
)
271 return nir_imm_floatN_t(build
, x
, 64);
274 static inline nir_ssa_def
*
275 nir_imm_vec2(nir_builder
*build
, float x
, float y
)
277 nir_const_value v
[2] = {
278 nir_const_value_for_float(x
, 32),
279 nir_const_value_for_float(y
, 32),
281 return nir_build_imm(build
, 2, 32, v
);
284 static inline nir_ssa_def
*
285 nir_imm_vec4(nir_builder
*build
, float x
, float y
, float z
, float w
)
287 nir_const_value v
[4] = {
288 nir_const_value_for_float(x
, 32),
289 nir_const_value_for_float(y
, 32),
290 nir_const_value_for_float(z
, 32),
291 nir_const_value_for_float(w
, 32),
294 return nir_build_imm(build
, 4, 32, v
);
297 static inline nir_ssa_def
*
298 nir_imm_vec4_16(nir_builder
*build
, float x
, float y
, float z
, float w
)
300 nir_const_value v
[4] = {
301 nir_const_value_for_float(x
, 16),
302 nir_const_value_for_float(y
, 16),
303 nir_const_value_for_float(z
, 16),
304 nir_const_value_for_float(w
, 16),
307 return nir_build_imm(build
, 4, 16, v
);
310 static inline nir_ssa_def
*
311 nir_imm_intN_t(nir_builder
*build
, uint64_t x
, unsigned bit_size
)
313 nir_const_value v
= nir_const_value_for_raw_uint(x
, bit_size
);
314 return nir_build_imm(build
, 1, bit_size
, &v
);
317 static inline nir_ssa_def
*
318 nir_imm_int(nir_builder
*build
, int x
)
320 return nir_imm_intN_t(build
, x
, 32);
323 static inline nir_ssa_def
*
324 nir_imm_int64(nir_builder
*build
, int64_t x
)
326 return nir_imm_intN_t(build
, x
, 64);
329 static inline nir_ssa_def
*
330 nir_imm_ivec2(nir_builder
*build
, int x
, int y
)
332 nir_const_value v
[2] = {
333 nir_const_value_for_int(x
, 32),
334 nir_const_value_for_int(y
, 32),
337 return nir_build_imm(build
, 2, 32, v
);
340 static inline nir_ssa_def
*
341 nir_imm_ivec4(nir_builder
*build
, int x
, int y
, int z
, int w
)
343 nir_const_value v
[4] = {
344 nir_const_value_for_int(x
, 32),
345 nir_const_value_for_int(y
, 32),
346 nir_const_value_for_int(z
, 32),
347 nir_const_value_for_int(w
, 32),
350 return nir_build_imm(build
, 4, 32, v
);
353 static inline nir_ssa_def
*
354 nir_builder_alu_instr_finish_and_insert(nir_builder
*build
, nir_alu_instr
*instr
)
356 const nir_op_info
*op_info
= &nir_op_infos
[instr
->op
];
358 instr
->exact
= build
->exact
;
360 /* Guess the number of components the destination temporary should have
361 * based on our input sizes, if it's not fixed for the op.
363 unsigned num_components
= op_info
->output_size
;
364 if (num_components
== 0) {
365 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
366 if (op_info
->input_sizes
[i
] == 0)
367 num_components
= MAX2(num_components
,
368 instr
->src
[i
].src
.ssa
->num_components
);
371 assert(num_components
!= 0);
373 /* Figure out the bitwidth based on the source bitwidth if the instruction
376 unsigned bit_size
= nir_alu_type_get_type_size(op_info
->output_type
);
378 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
379 unsigned src_bit_size
= instr
->src
[i
].src
.ssa
->bit_size
;
380 if (nir_alu_type_get_type_size(op_info
->input_types
[i
]) == 0) {
382 assert(src_bit_size
== bit_size
);
384 bit_size
= src_bit_size
;
386 assert(src_bit_size
==
387 nir_alu_type_get_type_size(op_info
->input_types
[i
]));
392 /* When in doubt, assume 32. */
396 /* Make sure we don't swizzle from outside of our source vector (like if a
397 * scalar value was passed into a multiply with a vector).
399 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
400 for (unsigned j
= instr
->src
[i
].src
.ssa
->num_components
;
401 j
< NIR_MAX_VEC_COMPONENTS
; j
++) {
402 instr
->src
[i
].swizzle
[j
] = instr
->src
[i
].src
.ssa
->num_components
- 1;
406 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
, num_components
,
408 instr
->dest
.write_mask
= (1 << num_components
) - 1;
410 nir_builder_instr_insert(build
, &instr
->instr
);
412 return &instr
->dest
.dest
.ssa
;
415 static inline nir_ssa_def
*
416 nir_build_alu(nir_builder
*build
, nir_op op
, nir_ssa_def
*src0
,
417 nir_ssa_def
*src1
, nir_ssa_def
*src2
, nir_ssa_def
*src3
)
419 nir_alu_instr
*instr
= nir_alu_instr_create(build
->shader
, op
);
423 instr
->src
[0].src
= nir_src_for_ssa(src0
);
425 instr
->src
[1].src
= nir_src_for_ssa(src1
);
427 instr
->src
[2].src
= nir_src_for_ssa(src2
);
429 instr
->src
[3].src
= nir_src_for_ssa(src3
);
431 return nir_builder_alu_instr_finish_and_insert(build
, instr
);
434 /* for the couple special cases with more than 4 src args: */
435 static inline nir_ssa_def
*
436 nir_build_alu_src_arr(nir_builder
*build
, nir_op op
, nir_ssa_def
**srcs
)
438 const nir_op_info
*op_info
= &nir_op_infos
[op
];
439 nir_alu_instr
*instr
= nir_alu_instr_create(build
->shader
, op
);
443 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++)
444 instr
->src
[i
].src
= nir_src_for_ssa(srcs
[i
]);
446 return nir_builder_alu_instr_finish_and_insert(build
, instr
);
449 #include "nir_builder_opcodes.h"
451 static inline nir_ssa_def
*
452 nir_vec(nir_builder
*build
, nir_ssa_def
**comp
, unsigned num_components
)
454 return nir_build_alu_src_arr(build
, nir_op_vec(num_components
), comp
);
457 static inline nir_ssa_def
*
458 nir_mov_alu(nir_builder
*build
, nir_alu_src src
, unsigned num_components
)
460 assert(!src
.abs
&& !src
.negate
);
461 if (src
.src
.is_ssa
&& src
.src
.ssa
->num_components
== num_components
) {
462 bool any_swizzles
= false;
463 for (unsigned i
= 0; i
< num_components
; i
++) {
464 if (src
.swizzle
[i
] != i
)
471 nir_alu_instr
*mov
= nir_alu_instr_create(build
->shader
, nir_op_mov
);
472 nir_ssa_dest_init(&mov
->instr
, &mov
->dest
.dest
, num_components
,
473 nir_src_bit_size(src
.src
), NULL
);
474 mov
->exact
= build
->exact
;
475 mov
->dest
.write_mask
= (1 << num_components
) - 1;
477 nir_builder_instr_insert(build
, &mov
->instr
);
479 return &mov
->dest
.dest
.ssa
;
483 * Construct an fmov or imov that reswizzles the source's components.
485 static inline nir_ssa_def
*
486 nir_swizzle(nir_builder
*build
, nir_ssa_def
*src
, const unsigned *swiz
,
487 unsigned num_components
)
489 assert(num_components
<= NIR_MAX_VEC_COMPONENTS
);
490 nir_alu_src alu_src
= { NIR_SRC_INIT
};
491 alu_src
.src
= nir_src_for_ssa(src
);
493 bool is_identity_swizzle
= true;
494 for (unsigned i
= 0; i
< num_components
&& i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
496 is_identity_swizzle
= false;
497 alu_src
.swizzle
[i
] = swiz
[i
];
500 if (num_components
== src
->num_components
&& is_identity_swizzle
)
503 return nir_mov_alu(build
, alu_src
, num_components
);
506 /* Selects the right fdot given the number of components in each source. */
507 static inline nir_ssa_def
*
508 nir_fdot(nir_builder
*build
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
510 assert(src0
->num_components
== src1
->num_components
);
511 switch (src0
->num_components
) {
512 case 1: return nir_fmul(build
, src0
, src1
);
513 case 2: return nir_fdot2(build
, src0
, src1
);
514 case 3: return nir_fdot3(build
, src0
, src1
);
515 case 4: return nir_fdot4(build
, src0
, src1
);
516 case 8: return nir_fdot8(build
, src0
, src1
);
517 case 16: return nir_fdot16(build
, src0
, src1
);
519 unreachable("bad component size");
525 static inline nir_ssa_def
*
526 nir_ball_iequal(nir_builder
*b
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
528 switch (src0
->num_components
) {
529 case 1: return nir_ieq(b
, src0
, src1
);
530 case 2: return nir_ball_iequal2(b
, src0
, src1
);
531 case 3: return nir_ball_iequal3(b
, src0
, src1
);
532 case 4: return nir_ball_iequal4(b
, src0
, src1
);
533 case 8: return nir_ball_iequal8(b
, src0
, src1
);
534 case 16: return nir_ball_iequal16(b
, src0
, src1
);
536 unreachable("bad component size");
540 static inline nir_ssa_def
*
541 nir_ball(nir_builder
*b
, nir_ssa_def
*src
)
543 return nir_ball_iequal(b
, src
, nir_imm_true(b
));
546 static inline nir_ssa_def
*
547 nir_bany_inequal(nir_builder
*b
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
549 switch (src0
->num_components
) {
550 case 1: return nir_ine(b
, src0
, src1
);
551 case 2: return nir_bany_inequal2(b
, src0
, src1
);
552 case 3: return nir_bany_inequal3(b
, src0
, src1
);
553 case 4: return nir_bany_inequal4(b
, src0
, src1
);
554 case 8: return nir_bany_inequal8(b
, src0
, src1
);
555 case 16: return nir_bany_inequal16(b
, src0
, src1
);
557 unreachable("bad component size");
561 static inline nir_ssa_def
*
562 nir_bany(nir_builder
*b
, nir_ssa_def
*src
)
564 return nir_bany_inequal(b
, src
, nir_imm_false(b
));
567 static inline nir_ssa_def
*
568 nir_channel(nir_builder
*b
, nir_ssa_def
*def
, unsigned c
)
570 return nir_swizzle(b
, def
, &c
, 1);
573 static inline nir_ssa_def
*
574 nir_channels(nir_builder
*b
, nir_ssa_def
*def
, nir_component_mask_t mask
)
576 unsigned num_channels
= 0, swizzle
[NIR_MAX_VEC_COMPONENTS
] = { 0 };
578 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
579 if ((mask
& (1 << i
)) == 0)
581 swizzle
[num_channels
++] = i
;
584 return nir_swizzle(b
, def
, swizzle
, num_channels
);
587 static inline nir_ssa_def
*
588 _nir_vector_extract_helper(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*c
,
589 unsigned start
, unsigned end
)
591 if (start
== end
- 1) {
592 return nir_channel(b
, vec
, start
);
594 unsigned mid
= start
+ (end
- start
) / 2;
595 return nir_bcsel(b
, nir_ilt(b
, c
, nir_imm_intN_t(b
, mid
, c
->bit_size
)),
596 _nir_vector_extract_helper(b
, vec
, c
, start
, mid
),
597 _nir_vector_extract_helper(b
, vec
, c
, mid
, end
));
601 static inline nir_ssa_def
*
602 nir_vector_extract(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*c
)
604 nir_src c_src
= nir_src_for_ssa(c
);
605 if (nir_src_is_const(c_src
)) {
606 uint64_t c_const
= nir_src_as_uint(c_src
);
607 if (c_const
< vec
->num_components
)
608 return nir_channel(b
, vec
, c_const
);
610 return nir_ssa_undef(b
, 1, vec
->bit_size
);
612 return _nir_vector_extract_helper(b
, vec
, c
, 0, vec
->num_components
);
616 /** Replaces the component of `vec` specified by `c` with `scalar` */
617 static inline nir_ssa_def
*
618 nir_vector_insert_imm(nir_builder
*b
, nir_ssa_def
*vec
,
619 nir_ssa_def
*scalar
, unsigned c
)
621 assert(scalar
->num_components
== 1);
622 assert(c
< vec
->num_components
);
624 nir_op vec_op
= nir_op_vec(vec
->num_components
);
625 nir_alu_instr
*vec_instr
= nir_alu_instr_create(b
->shader
, vec_op
);
627 for (unsigned i
= 0; i
< vec
->num_components
; i
++) {
629 vec_instr
->src
[i
].src
= nir_src_for_ssa(scalar
);
630 vec_instr
->src
[i
].swizzle
[0] = 0;
632 vec_instr
->src
[i
].src
= nir_src_for_ssa(vec
);
633 vec_instr
->src
[i
].swizzle
[0] = i
;
637 return nir_builder_alu_instr_finish_and_insert(b
, vec_instr
);
640 /** Replaces the component of `vec` specified by `c` with `scalar` */
641 static inline nir_ssa_def
*
642 nir_vector_insert(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*scalar
,
645 assert(scalar
->num_components
== 1);
646 assert(c
->num_components
== 1);
648 nir_src c_src
= nir_src_for_ssa(c
);
649 if (nir_src_is_const(c_src
)) {
650 uint64_t c_const
= nir_src_as_uint(c_src
);
651 if (c_const
< vec
->num_components
)
652 return nir_vector_insert_imm(b
, vec
, scalar
, c_const
);
656 nir_const_value per_comp_idx_const
[NIR_MAX_VEC_COMPONENTS
];
657 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++)
658 per_comp_idx_const
[i
] = nir_const_value_for_int(i
, c
->bit_size
);
659 nir_ssa_def
*per_comp_idx
=
660 nir_build_imm(b
, vec
->num_components
,
661 c
->bit_size
, per_comp_idx_const
);
663 /* nir_builder will automatically splat out scalars to vectors so an
664 * insert is as simple as "if I'm the channel, replace me with the
667 return nir_bcsel(b
, nir_ieq(b
, c
, per_comp_idx
), scalar
, vec
);
671 static inline nir_ssa_def
*
672 nir_i2i(nir_builder
*build
, nir_ssa_def
*x
, unsigned dest_bit_size
)
674 if (x
->bit_size
== dest_bit_size
)
677 switch (dest_bit_size
) {
678 case 64: return nir_i2i64(build
, x
);
679 case 32: return nir_i2i32(build
, x
);
680 case 16: return nir_i2i16(build
, x
);
681 case 8: return nir_i2i8(build
, x
);
682 default: unreachable("Invalid bit size");
686 static inline nir_ssa_def
*
687 nir_u2u(nir_builder
*build
, nir_ssa_def
*x
, unsigned dest_bit_size
)
689 if (x
->bit_size
== dest_bit_size
)
692 switch (dest_bit_size
) {
693 case 64: return nir_u2u64(build
, x
);
694 case 32: return nir_u2u32(build
, x
);
695 case 16: return nir_u2u16(build
, x
);
696 case 8: return nir_u2u8(build
, x
);
697 default: unreachable("Invalid bit size");
701 static inline nir_ssa_def
*
702 nir_iadd_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
704 assert(x
->bit_size
<= 64);
705 if (x
->bit_size
< 64)
706 y
&= (1ull << x
->bit_size
) - 1;
711 return nir_iadd(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
715 static inline nir_ssa_def
*
716 _nir_mul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
, bool amul
)
718 assert(x
->bit_size
<= 64);
719 if (x
->bit_size
< 64)
720 y
&= (1ull << x
->bit_size
) - 1;
723 return nir_imm_intN_t(build
, 0, x
->bit_size
);
726 } else if (!build
->shader
->options
->lower_bitops
&&
727 util_is_power_of_two_or_zero64(y
)) {
728 return nir_ishl(build
, x
, nir_imm_int(build
, ffsll(y
) - 1));
730 return nir_amul(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
732 return nir_imul(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
736 static inline nir_ssa_def
*
737 nir_imul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
739 return _nir_mul_imm(build
, x
, y
, false);
742 static inline nir_ssa_def
*
743 nir_amul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
745 return _nir_mul_imm(build
, x
, y
, true);
748 static inline nir_ssa_def
*
749 nir_fadd_imm(nir_builder
*build
, nir_ssa_def
*x
, double y
)
751 return nir_fadd(build
, x
, nir_imm_floatN_t(build
, y
, x
->bit_size
));
754 static inline nir_ssa_def
*
755 nir_fmul_imm(nir_builder
*build
, nir_ssa_def
*x
, double y
)
757 return nir_fmul(build
, x
, nir_imm_floatN_t(build
, y
, x
->bit_size
));
760 static inline nir_ssa_def
*
761 nir_pack_bits(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
763 assert(src
->num_components
* src
->bit_size
== dest_bit_size
);
765 switch (dest_bit_size
) {
767 switch (src
->bit_size
) {
768 case 32: return nir_pack_64_2x32(b
, src
);
769 case 16: return nir_pack_64_4x16(b
, src
);
775 if (src
->bit_size
== 16)
776 return nir_pack_32_2x16(b
, src
);
783 /* If we got here, we have no dedicated unpack opcode. */
784 nir_ssa_def
*dest
= nir_imm_intN_t(b
, 0, dest_bit_size
);
785 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
786 nir_ssa_def
*val
= nir_u2u(b
, nir_channel(b
, src
, i
), dest_bit_size
);
787 val
= nir_ishl(b
, val
, nir_imm_int(b
, i
* src
->bit_size
));
788 dest
= nir_ior(b
, dest
, val
);
793 static inline nir_ssa_def
*
794 nir_unpack_bits(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
796 assert(src
->num_components
== 1);
797 assert(src
->bit_size
> dest_bit_size
);
798 const unsigned dest_num_components
= src
->bit_size
/ dest_bit_size
;
799 assert(dest_num_components
<= NIR_MAX_VEC_COMPONENTS
);
801 switch (src
->bit_size
) {
803 switch (dest_bit_size
) {
804 case 32: return nir_unpack_64_2x32(b
, src
);
805 case 16: return nir_unpack_64_4x16(b
, src
);
811 if (dest_bit_size
== 16)
812 return nir_unpack_32_2x16(b
, src
);
819 /* If we got here, we have no dedicated unpack opcode. */
820 nir_ssa_def
*dest_comps
[NIR_MAX_VEC_COMPONENTS
];
821 for (unsigned i
= 0; i
< dest_num_components
; i
++) {
822 nir_ssa_def
*val
= nir_ushr(b
, src
, nir_imm_int(b
, i
* dest_bit_size
));
823 dest_comps
[i
] = nir_u2u(b
, val
, dest_bit_size
);
825 return nir_vec(b
, dest_comps
, dest_num_components
);
829 * Treats srcs as if it's one big blob of bits and extracts the range of bits
832 * [first_bit, first_bit + dest_num_components * dest_bit_size)
834 * The range can have any alignment or size as long as it's an integer number
835 * of destination components and fits inside the concatenated sources.
837 * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
838 * values are involved because that would require pack/unpack to/from a vec8
839 * which NIR currently does not support.
841 static inline nir_ssa_def
*
842 nir_extract_bits(nir_builder
*b
, nir_ssa_def
**srcs
, unsigned num_srcs
,
844 unsigned dest_num_components
, unsigned dest_bit_size
)
846 const unsigned num_bits
= dest_num_components
* dest_bit_size
;
848 /* Figure out the common bit size */
849 unsigned common_bit_size
= dest_bit_size
;
850 for (unsigned i
= 0; i
< num_srcs
; i
++)
851 common_bit_size
= MIN2(common_bit_size
, srcs
[i
]->bit_size
);
853 common_bit_size
= MIN2(common_bit_size
, (1u << (ffs(first_bit
) - 1)));
855 /* We don't want to have to deal with 1-bit values */
856 assert(common_bit_size
>= 8);
858 nir_ssa_def
*common_comps
[NIR_MAX_VEC_COMPONENTS
* sizeof(uint64_t)];
859 assert(num_bits
/ common_bit_size
<= ARRAY_SIZE(common_comps
));
861 /* First, unpack to the common bit size and select the components from the
865 unsigned src_start_bit
= 0;
866 unsigned src_end_bit
= 0;
867 for (unsigned i
= 0; i
< num_bits
/ common_bit_size
; i
++) {
868 const unsigned bit
= first_bit
+ (i
* common_bit_size
);
869 while (bit
>= src_end_bit
) {
871 assert(src_idx
< (int) num_srcs
);
872 src_start_bit
= src_end_bit
;
873 src_end_bit
+= srcs
[src_idx
]->bit_size
*
874 srcs
[src_idx
]->num_components
;
876 assert(bit
>= src_start_bit
);
877 assert(bit
+ common_bit_size
<= src_end_bit
);
878 const unsigned rel_bit
= bit
- src_start_bit
;
879 const unsigned src_bit_size
= srcs
[src_idx
]->bit_size
;
881 nir_ssa_def
*comp
= nir_channel(b
, srcs
[src_idx
],
882 rel_bit
/ src_bit_size
);
883 if (srcs
[src_idx
]->bit_size
> common_bit_size
) {
884 nir_ssa_def
*unpacked
= nir_unpack_bits(b
, comp
, common_bit_size
);
885 comp
= nir_channel(b
, unpacked
, (rel_bit
% src_bit_size
) /
888 common_comps
[i
] = comp
;
891 /* Now, re-pack the destination if we have to */
892 if (dest_bit_size
> common_bit_size
) {
893 unsigned common_per_dest
= dest_bit_size
/ common_bit_size
;
894 nir_ssa_def
*dest_comps
[NIR_MAX_VEC_COMPONENTS
];
895 for (unsigned i
= 0; i
< dest_num_components
; i
++) {
896 nir_ssa_def
*unpacked
= nir_vec(b
, common_comps
+ i
* common_per_dest
,
898 dest_comps
[i
] = nir_pack_bits(b
, unpacked
, dest_bit_size
);
900 return nir_vec(b
, dest_comps
, dest_num_components
);
902 assert(dest_bit_size
== common_bit_size
);
903 return nir_vec(b
, common_comps
, dest_num_components
);
907 static inline nir_ssa_def
*
908 nir_bitcast_vector(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
910 assert((src
->bit_size
* src
->num_components
) % dest_bit_size
== 0);
911 const unsigned dest_num_components
=
912 (src
->bit_size
* src
->num_components
) / dest_bit_size
;
913 assert(dest_num_components
<= NIR_MAX_VEC_COMPONENTS
);
915 return nir_extract_bits(b
, &src
, 1, 0, dest_num_components
, dest_bit_size
);
919 * Turns a nir_src into a nir_ssa_def * so it can be passed to
920 * nir_build_alu()-based builder calls.
922 * See nir_ssa_for_alu_src() for alu instructions.
924 static inline nir_ssa_def
*
925 nir_ssa_for_src(nir_builder
*build
, nir_src src
, int num_components
)
927 if (src
.is_ssa
&& src
.ssa
->num_components
== num_components
)
930 nir_alu_src alu
= { NIR_SRC_INIT
};
932 for (int j
= 0; j
< 4; j
++)
935 return nir_mov_alu(build
, alu
, num_components
);
939 * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
940 * nir_alu_src's swizzle.
942 static inline nir_ssa_def
*
943 nir_ssa_for_alu_src(nir_builder
*build
, nir_alu_instr
*instr
, unsigned srcn
)
945 static uint8_t trivial_swizzle
[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
946 STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle
) == NIR_MAX_VEC_COMPONENTS
);
948 nir_alu_src
*src
= &instr
->src
[srcn
];
949 unsigned num_components
= nir_ssa_alu_instr_src_components(instr
, srcn
);
951 if (src
->src
.is_ssa
&& (src
->src
.ssa
->num_components
== num_components
) &&
952 !src
->abs
&& !src
->negate
&&
953 (memcmp(src
->swizzle
, trivial_swizzle
, num_components
) == 0))
956 return nir_mov_alu(build
, *src
, num_components
);
959 static inline unsigned
960 nir_get_ptr_bitsize(nir_builder
*build
)
962 if (build
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
963 return build
->shader
->info
.cs
.ptr_size
;
967 static inline nir_deref_instr
*
968 nir_build_deref_var(nir_builder
*build
, nir_variable
*var
)
970 nir_deref_instr
*deref
=
971 nir_deref_instr_create(build
->shader
, nir_deref_type_var
);
973 deref
->mode
= var
->data
.mode
;
974 deref
->type
= var
->type
;
977 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
, 1,
978 nir_get_ptr_bitsize(build
), NULL
);
980 nir_builder_instr_insert(build
, &deref
->instr
);
985 static inline nir_deref_instr
*
986 nir_build_deref_array(nir_builder
*build
, nir_deref_instr
*parent
,
989 assert(glsl_type_is_array(parent
->type
) ||
990 glsl_type_is_matrix(parent
->type
) ||
991 glsl_type_is_vector(parent
->type
));
993 assert(index
->bit_size
== parent
->dest
.ssa
.bit_size
);
995 nir_deref_instr
*deref
=
996 nir_deref_instr_create(build
->shader
, nir_deref_type_array
);
998 deref
->mode
= parent
->mode
;
999 deref
->type
= glsl_get_array_element(parent
->type
);
1000 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1001 deref
->arr
.index
= nir_src_for_ssa(index
);
1003 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1004 parent
->dest
.ssa
.num_components
,
1005 parent
->dest
.ssa
.bit_size
, NULL
);
1007 nir_builder_instr_insert(build
, &deref
->instr
);
1012 static inline nir_deref_instr
*
1013 nir_build_deref_array_imm(nir_builder
*build
, nir_deref_instr
*parent
,
1016 assert(parent
->dest
.is_ssa
);
1017 nir_ssa_def
*idx_ssa
= nir_imm_intN_t(build
, index
,
1018 parent
->dest
.ssa
.bit_size
);
1020 return nir_build_deref_array(build
, parent
, idx_ssa
);
1023 static inline nir_deref_instr
*
1024 nir_build_deref_ptr_as_array(nir_builder
*build
, nir_deref_instr
*parent
,
1027 assert(parent
->deref_type
== nir_deref_type_array
||
1028 parent
->deref_type
== nir_deref_type_ptr_as_array
||
1029 parent
->deref_type
== nir_deref_type_cast
);
1031 assert(index
->bit_size
== parent
->dest
.ssa
.bit_size
);
1033 nir_deref_instr
*deref
=
1034 nir_deref_instr_create(build
->shader
, nir_deref_type_ptr_as_array
);
1036 deref
->mode
= parent
->mode
;
1037 deref
->type
= parent
->type
;
1038 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1039 deref
->arr
.index
= nir_src_for_ssa(index
);
1041 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1042 parent
->dest
.ssa
.num_components
,
1043 parent
->dest
.ssa
.bit_size
, NULL
);
1045 nir_builder_instr_insert(build
, &deref
->instr
);
1050 static inline nir_deref_instr
*
1051 nir_build_deref_array_wildcard(nir_builder
*build
, nir_deref_instr
*parent
)
1053 assert(glsl_type_is_array(parent
->type
) ||
1054 glsl_type_is_matrix(parent
->type
));
1056 nir_deref_instr
*deref
=
1057 nir_deref_instr_create(build
->shader
, nir_deref_type_array_wildcard
);
1059 deref
->mode
= parent
->mode
;
1060 deref
->type
= glsl_get_array_element(parent
->type
);
1061 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1063 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1064 parent
->dest
.ssa
.num_components
,
1065 parent
->dest
.ssa
.bit_size
, NULL
);
1067 nir_builder_instr_insert(build
, &deref
->instr
);
1072 static inline nir_deref_instr
*
1073 nir_build_deref_struct(nir_builder
*build
, nir_deref_instr
*parent
,
1076 assert(glsl_type_is_struct_or_ifc(parent
->type
));
1078 nir_deref_instr
*deref
=
1079 nir_deref_instr_create(build
->shader
, nir_deref_type_struct
);
1081 deref
->mode
= parent
->mode
;
1082 deref
->type
= glsl_get_struct_field(parent
->type
, index
);
1083 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1084 deref
->strct
.index
= index
;
1086 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1087 parent
->dest
.ssa
.num_components
,
1088 parent
->dest
.ssa
.bit_size
, NULL
);
1090 nir_builder_instr_insert(build
, &deref
->instr
);
1095 static inline nir_deref_instr
*
1096 nir_build_deref_cast(nir_builder
*build
, nir_ssa_def
*parent
,
1097 nir_variable_mode mode
, const struct glsl_type
*type
,
1098 unsigned ptr_stride
)
1100 nir_deref_instr
*deref
=
1101 nir_deref_instr_create(build
->shader
, nir_deref_type_cast
);
1105 deref
->parent
= nir_src_for_ssa(parent
);
1106 deref
->cast
.ptr_stride
= ptr_stride
;
1108 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1109 parent
->num_components
, parent
->bit_size
, NULL
);
1111 nir_builder_instr_insert(build
, &deref
->instr
);
1116 /** Returns a deref that follows another but starting from the given parent
1118 * The new deref will be the same type and take the same array or struct index
1119 * as the leader deref but it may have a different parent. This is very
1120 * useful for walking deref paths.
1122 static inline nir_deref_instr
*
1123 nir_build_deref_follower(nir_builder
*b
, nir_deref_instr
*parent
,
1124 nir_deref_instr
*leader
)
1126 /* If the derefs would have the same parent, don't make a new one */
1127 assert(leader
->parent
.is_ssa
);
1128 if (leader
->parent
.ssa
== &parent
->dest
.ssa
)
1131 UNUSED nir_deref_instr
*leader_parent
= nir_src_as_deref(leader
->parent
);
1133 switch (leader
->deref_type
) {
1134 case nir_deref_type_var
:
1135 unreachable("A var dereference cannot have a parent");
1138 case nir_deref_type_array
:
1139 case nir_deref_type_array_wildcard
:
1140 assert(glsl_type_is_matrix(parent
->type
) ||
1141 glsl_type_is_array(parent
->type
) ||
1142 (leader
->deref_type
== nir_deref_type_array
&&
1143 glsl_type_is_vector(parent
->type
)));
1144 assert(glsl_get_length(parent
->type
) ==
1145 glsl_get_length(leader_parent
->type
));
1147 if (leader
->deref_type
== nir_deref_type_array
) {
1148 assert(leader
->arr
.index
.is_ssa
);
1149 nir_ssa_def
*index
= nir_i2i(b
, leader
->arr
.index
.ssa
,
1150 parent
->dest
.ssa
.bit_size
);
1151 return nir_build_deref_array(b
, parent
, index
);
1153 return nir_build_deref_array_wildcard(b
, parent
);
1156 case nir_deref_type_struct
:
1157 assert(glsl_type_is_struct_or_ifc(parent
->type
));
1158 assert(glsl_get_length(parent
->type
) ==
1159 glsl_get_length(leader_parent
->type
));
1161 return nir_build_deref_struct(b
, parent
, leader
->strct
.index
);
1164 unreachable("Invalid deref instruction type");
1168 static inline nir_ssa_def
*
1169 nir_load_reg(nir_builder
*build
, nir_register
*reg
)
1171 return nir_ssa_for_src(build
, nir_src_for_reg(reg
), reg
->num_components
);
1175 nir_store_reg(nir_builder
*build
, nir_register
*reg
,
1176 nir_ssa_def
*def
, nir_component_mask_t write_mask
)
1178 assert(reg
->num_components
== def
->num_components
);
1179 assert(reg
->bit_size
== def
->bit_size
);
1181 nir_alu_instr
*mov
= nir_alu_instr_create(build
->shader
, nir_op_mov
);
1182 mov
->src
[0].src
= nir_src_for_ssa(def
);
1183 mov
->dest
.dest
= nir_dest_for_reg(reg
);
1184 mov
->dest
.write_mask
= write_mask
& BITFIELD_MASK(reg
->num_components
);
1185 nir_builder_instr_insert(build
, &mov
->instr
);
1188 static inline nir_ssa_def
*
1189 nir_load_deref_with_access(nir_builder
*build
, nir_deref_instr
*deref
,
1190 enum gl_access_qualifier access
)
1192 nir_intrinsic_instr
*load
=
1193 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_load_deref
);
1194 load
->num_components
= glsl_get_vector_elements(deref
->type
);
1195 load
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1196 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
,
1197 glsl_get_bit_size(deref
->type
), NULL
);
1198 nir_intrinsic_set_access(load
, access
);
1199 nir_builder_instr_insert(build
, &load
->instr
);
1200 return &load
->dest
.ssa
;
1203 static inline nir_ssa_def
*
1204 nir_load_deref(nir_builder
*build
, nir_deref_instr
*deref
)
1206 return nir_load_deref_with_access(build
, deref
, (enum gl_access_qualifier
)0);
1210 nir_store_deref_with_access(nir_builder
*build
, nir_deref_instr
*deref
,
1211 nir_ssa_def
*value
, unsigned writemask
,
1212 enum gl_access_qualifier access
)
1214 nir_intrinsic_instr
*store
=
1215 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_store_deref
);
1216 store
->num_components
= glsl_get_vector_elements(deref
->type
);
1217 store
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1218 store
->src
[1] = nir_src_for_ssa(value
);
1219 nir_intrinsic_set_write_mask(store
,
1220 writemask
& ((1 << store
->num_components
) - 1));
1221 nir_intrinsic_set_access(store
, access
);
1222 nir_builder_instr_insert(build
, &store
->instr
);
1226 nir_store_deref(nir_builder
*build
, nir_deref_instr
*deref
,
1227 nir_ssa_def
*value
, unsigned writemask
)
1229 nir_store_deref_with_access(build
, deref
, value
, writemask
,
1230 (enum gl_access_qualifier
)0);
1234 nir_copy_deref_with_access(nir_builder
*build
, nir_deref_instr
*dest
,
1235 nir_deref_instr
*src
,
1236 enum gl_access_qualifier dest_access
,
1237 enum gl_access_qualifier src_access
)
1239 nir_intrinsic_instr
*copy
=
1240 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_copy_deref
);
1241 copy
->src
[0] = nir_src_for_ssa(&dest
->dest
.ssa
);
1242 copy
->src
[1] = nir_src_for_ssa(&src
->dest
.ssa
);
1243 nir_intrinsic_set_dst_access(copy
, dest_access
);
1244 nir_intrinsic_set_src_access(copy
, src_access
);
1245 nir_builder_instr_insert(build
, ©
->instr
);
1249 nir_copy_deref(nir_builder
*build
, nir_deref_instr
*dest
, nir_deref_instr
*src
)
1251 nir_copy_deref_with_access(build
, dest
, src
,
1252 (enum gl_access_qualifier
) 0,
1253 (enum gl_access_qualifier
) 0);
1256 static inline nir_ssa_def
*
1257 nir_load_var(nir_builder
*build
, nir_variable
*var
)
1259 return nir_load_deref(build
, nir_build_deref_var(build
, var
));
1263 nir_store_var(nir_builder
*build
, nir_variable
*var
, nir_ssa_def
*value
,
1266 nir_store_deref(build
, nir_build_deref_var(build
, var
), value
, writemask
);
1270 nir_copy_var(nir_builder
*build
, nir_variable
*dest
, nir_variable
*src
)
1272 nir_copy_deref(build
, nir_build_deref_var(build
, dest
),
1273 nir_build_deref_var(build
, src
));
1276 static inline nir_ssa_def
*
1277 nir_load_param(nir_builder
*build
, uint32_t param_idx
)
1279 assert(param_idx
< build
->impl
->function
->num_params
);
1280 nir_parameter
*param
= &build
->impl
->function
->params
[param_idx
];
1282 nir_intrinsic_instr
*load
=
1283 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_load_param
);
1284 nir_intrinsic_set_param_idx(load
, param_idx
);
1285 load
->num_components
= param
->num_components
;
1286 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
1287 param
->num_components
, param
->bit_size
, NULL
);
1288 nir_builder_instr_insert(build
, &load
->instr
);
1289 return &load
->dest
.ssa
;
1292 #include "nir_builder_opcodes.h"
1294 static inline nir_ssa_def
*
1295 nir_f2b(nir_builder
*build
, nir_ssa_def
*f
)
1297 return nir_f2b1(build
, f
);
1300 static inline nir_ssa_def
*
1301 nir_i2b(nir_builder
*build
, nir_ssa_def
*i
)
1303 return nir_i2b1(build
, i
);
1306 static inline nir_ssa_def
*
1307 nir_b2f(nir_builder
*build
, nir_ssa_def
*b
, uint32_t bit_size
)
1310 case 64: return nir_b2f64(build
, b
);
1311 case 32: return nir_b2f32(build
, b
);
1312 case 16: return nir_b2f16(build
, b
);
1314 unreachable("Invalid bit-size");
1318 static inline nir_ssa_def
*
1319 nir_b2i(nir_builder
*build
, nir_ssa_def
*b
, uint32_t bit_size
)
1322 case 64: return nir_b2i64(build
, b
);
1323 case 32: return nir_b2i32(build
, b
);
1324 case 16: return nir_b2i16(build
, b
);
1325 case 8: return nir_b2i8(build
, b
);
1327 unreachable("Invalid bit-size");
1330 static inline nir_ssa_def
*
1331 nir_load_barycentric(nir_builder
*build
, nir_intrinsic_op op
,
1332 unsigned interp_mode
)
1334 unsigned num_components
= op
== nir_intrinsic_load_barycentric_model
? 3 : 2;
1335 nir_intrinsic_instr
*bary
= nir_intrinsic_instr_create(build
->shader
, op
);
1336 nir_ssa_dest_init(&bary
->instr
, &bary
->dest
, num_components
, 32, NULL
);
1337 nir_intrinsic_set_interp_mode(bary
, interp_mode
);
1338 nir_builder_instr_insert(build
, &bary
->instr
);
1339 return &bary
->dest
.ssa
;
1343 nir_jump(nir_builder
*build
, nir_jump_type jump_type
)
1345 assert(jump_type
!= nir_jump_goto
&& jump_type
!= nir_jump_goto_if
);
1346 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, jump_type
);
1347 nir_builder_instr_insert(build
, &jump
->instr
);
1351 nir_goto(nir_builder
*build
, struct nir_block
*target
)
1353 assert(!build
->impl
->structured
);
1354 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, nir_jump_goto
);
1355 jump
->target
= target
;
1356 nir_builder_instr_insert(build
, &jump
->instr
);
1360 nir_goto_if(nir_builder
*build
, struct nir_block
*target
, nir_src cond
,
1361 struct nir_block
*else_target
)
1363 assert(!build
->impl
->structured
);
1364 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, nir_jump_goto_if
);
1365 jump
->condition
= cond
;
1366 jump
->target
= target
;
1367 jump
->else_target
= else_target
;
1368 nir_builder_instr_insert(build
, &jump
->instr
);
1371 static inline nir_ssa_def
*
1372 nir_compare_func(nir_builder
*b
, enum compare_func func
,
1373 nir_ssa_def
*src0
, nir_ssa_def
*src1
)
1376 case COMPARE_FUNC_NEVER
:
1377 return nir_imm_int(b
, 0);
1378 case COMPARE_FUNC_ALWAYS
:
1379 return nir_imm_int(b
, ~0);
1380 case COMPARE_FUNC_EQUAL
:
1381 return nir_feq(b
, src0
, src1
);
1382 case COMPARE_FUNC_NOTEQUAL
:
1383 return nir_fne(b
, src0
, src1
);
1384 case COMPARE_FUNC_GREATER
:
1385 return nir_flt(b
, src1
, src0
);
1386 case COMPARE_FUNC_GEQUAL
:
1387 return nir_fge(b
, src0
, src1
);
1388 case COMPARE_FUNC_LESS
:
1389 return nir_flt(b
, src0
, src1
);
1390 case COMPARE_FUNC_LEQUAL
:
1391 return nir_fge(b
, src1
, src0
);
1393 unreachable("bad compare func");
1397 nir_scoped_barrier(nir_builder
*b
,
1398 nir_scope exec_scope
,
1399 nir_scope mem_scope
,
1400 nir_memory_semantics mem_semantics
,
1401 nir_variable_mode mem_modes
)
1403 nir_intrinsic_instr
*intrin
=
1404 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_scoped_barrier
);
1405 nir_intrinsic_set_execution_scope(intrin
, exec_scope
);
1406 nir_intrinsic_set_memory_scope(intrin
, mem_scope
);
1407 nir_intrinsic_set_memory_semantics(intrin
, mem_semantics
);
1408 nir_intrinsic_set_memory_modes(intrin
, mem_modes
);
1409 nir_builder_instr_insert(b
, &intrin
->instr
);
1413 nir_scoped_memory_barrier(nir_builder
*b
,
1415 nir_memory_semantics semantics
,
1416 nir_variable_mode modes
)
1418 nir_scoped_barrier(b
, NIR_SCOPE_NONE
, scope
, semantics
, modes
);
1421 static inline nir_ssa_def
*
1422 nir_convert_to_bit_size(nir_builder
*b
,
1427 nir_alu_type base_type
= nir_alu_type_get_base_type(type
);
1428 nir_alu_type dst_type
= (nir_alu_type
)(bit_size
| base_type
);
1431 nir_type_conversion_op(type
, dst_type
, nir_rounding_mode_undef
);
1433 return nir_build_alu(b
, opcode
, src
, NULL
, NULL
, NULL
);
1436 static inline nir_ssa_def
*
1437 nir_i2iN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1439 return nir_convert_to_bit_size(b
, src
, nir_type_int
, bit_size
);
1442 static inline nir_ssa_def
*
1443 nir_u2uN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1445 return nir_convert_to_bit_size(b
, src
, nir_type_uint
, bit_size
);
1448 static inline nir_ssa_def
*
1449 nir_b2bN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1451 return nir_convert_to_bit_size(b
, src
, nir_type_bool
, bit_size
);
1454 static inline nir_ssa_def
*
1455 nir_f2fN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1457 return nir_convert_to_bit_size(b
, src
, nir_type_float
, bit_size
);
1460 #endif /* NIR_BUILDER_H */