2 * Copyright © 2014-2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
33 typedef struct nir_builder
{
36 /* Whether new ALU instructions will be marked "exact" */
40 nir_function_impl
*impl
;
44 nir_builder_init(nir_builder
*build
, nir_function_impl
*impl
)
46 memset(build
, 0, sizeof(*build
));
49 build
->shader
= impl
->function
->shader
;
53 nir_builder_init_simple_shader(nir_builder
*build
, void *mem_ctx
,
54 gl_shader_stage stage
,
55 const nir_shader_compiler_options
*options
)
57 build
->shader
= nir_shader_create(mem_ctx
, stage
, options
, NULL
);
58 nir_function
*func
= nir_function_create(build
->shader
, "main");
59 func
->is_entrypoint
= true;
61 build
->impl
= nir_function_impl_create(func
);
62 build
->cursor
= nir_after_cf_list(&build
->impl
->body
);
66 nir_builder_instr_insert(nir_builder
*build
, nir_instr
*instr
)
68 nir_instr_insert(build
->cursor
, instr
);
70 /* Move the cursor forward. */
71 build
->cursor
= nir_after_instr(instr
);
74 static inline nir_instr
*
75 nir_builder_last_instr(nir_builder
*build
)
77 assert(build
->cursor
.option
== nir_cursor_after_instr
);
78 return build
->cursor
.instr
;
82 nir_builder_cf_insert(nir_builder
*build
, nir_cf_node
*cf
)
84 nir_cf_node_insert(build
->cursor
, cf
);
88 nir_builder_is_inside_cf(nir_builder
*build
, nir_cf_node
*cf_node
)
90 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
91 for (nir_cf_node
*n
= &block
->cf_node
; n
; n
= n
->parent
) {
98 static inline nir_if
*
99 nir_push_if_src(nir_builder
*build
, nir_src condition
)
101 nir_if
*nif
= nir_if_create(build
->shader
);
102 nif
->condition
= condition
;
103 nir_builder_cf_insert(build
, &nif
->cf_node
);
104 build
->cursor
= nir_before_cf_list(&nif
->then_list
);
108 static inline nir_if
*
109 nir_push_if(nir_builder
*build
, nir_ssa_def
*condition
)
111 return nir_push_if_src(build
, nir_src_for_ssa(condition
));
114 static inline nir_if
*
115 nir_push_else(nir_builder
*build
, nir_if
*nif
)
118 assert(nir_builder_is_inside_cf(build
, &nif
->cf_node
));
120 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
121 nif
= nir_cf_node_as_if(block
->cf_node
.parent
);
123 build
->cursor
= nir_before_cf_list(&nif
->else_list
);
128 nir_pop_if(nir_builder
*build
, nir_if
*nif
)
131 assert(nir_builder_is_inside_cf(build
, &nif
->cf_node
));
133 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
134 nif
= nir_cf_node_as_if(block
->cf_node
.parent
);
136 build
->cursor
= nir_after_cf_node(&nif
->cf_node
);
139 static inline nir_ssa_def
*
140 nir_if_phi(nir_builder
*build
, nir_ssa_def
*then_def
, nir_ssa_def
*else_def
)
142 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
143 nir_if
*nif
= nir_cf_node_as_if(nir_cf_node_prev(&block
->cf_node
));
145 nir_phi_instr
*phi
= nir_phi_instr_create(build
->shader
);
147 nir_phi_src
*src
= ralloc(phi
, nir_phi_src
);
148 src
->pred
= nir_if_last_then_block(nif
);
149 src
->src
= nir_src_for_ssa(then_def
);
150 exec_list_push_tail(&phi
->srcs
, &src
->node
);
152 src
= ralloc(phi
, nir_phi_src
);
153 src
->pred
= nir_if_last_else_block(nif
);
154 src
->src
= nir_src_for_ssa(else_def
);
155 exec_list_push_tail(&phi
->srcs
, &src
->node
);
157 assert(then_def
->num_components
== else_def
->num_components
);
158 assert(then_def
->bit_size
== else_def
->bit_size
);
159 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
160 then_def
->num_components
, then_def
->bit_size
, NULL
);
162 nir_builder_instr_insert(build
, &phi
->instr
);
164 return &phi
->dest
.ssa
;
167 static inline nir_loop
*
168 nir_push_loop(nir_builder
*build
)
170 nir_loop
*loop
= nir_loop_create(build
->shader
);
171 nir_builder_cf_insert(build
, &loop
->cf_node
);
172 build
->cursor
= nir_before_cf_list(&loop
->body
);
177 nir_pop_loop(nir_builder
*build
, nir_loop
*loop
)
180 assert(nir_builder_is_inside_cf(build
, &loop
->cf_node
));
182 nir_block
*block
= nir_cursor_current_block(build
->cursor
);
183 loop
= nir_cf_node_as_loop(block
->cf_node
.parent
);
185 build
->cursor
= nir_after_cf_node(&loop
->cf_node
);
188 static inline nir_ssa_def
*
189 nir_ssa_undef(nir_builder
*build
, unsigned num_components
, unsigned bit_size
)
191 nir_ssa_undef_instr
*undef
=
192 nir_ssa_undef_instr_create(build
->shader
, num_components
, bit_size
);
196 nir_instr_insert(nir_before_cf_list(&build
->impl
->body
), &undef
->instr
);
201 static inline nir_ssa_def
*
202 nir_build_imm(nir_builder
*build
, unsigned num_components
,
203 unsigned bit_size
, const nir_const_value
*value
)
205 nir_load_const_instr
*load_const
=
206 nir_load_const_instr_create(build
->shader
, num_components
, bit_size
);
210 memcpy(load_const
->value
, value
, sizeof(nir_const_value
) * num_components
);
212 nir_builder_instr_insert(build
, &load_const
->instr
);
214 return &load_const
->def
;
217 static inline nir_ssa_def
*
218 nir_imm_zero(nir_builder
*build
, unsigned num_components
, unsigned bit_size
)
220 nir_load_const_instr
*load_const
=
221 nir_load_const_instr_create(build
->shader
, num_components
, bit_size
);
223 /* nir_load_const_instr_create uses rzalloc so it's already zero */
225 nir_builder_instr_insert(build
, &load_const
->instr
);
227 return &load_const
->def
;
230 static inline nir_ssa_def
*
231 nir_imm_boolN_t(nir_builder
*build
, bool x
, unsigned bit_size
)
233 nir_const_value v
= nir_const_value_for_bool(x
, bit_size
);
234 return nir_build_imm(build
, 1, bit_size
, &v
);
237 static inline nir_ssa_def
*
238 nir_imm_bool(nir_builder
*build
, bool x
)
240 return nir_imm_boolN_t(build
, x
, 1);
243 static inline nir_ssa_def
*
244 nir_imm_true(nir_builder
*build
)
246 return nir_imm_bool(build
, true);
249 static inline nir_ssa_def
*
250 nir_imm_false(nir_builder
*build
)
252 return nir_imm_bool(build
, false);
255 static inline nir_ssa_def
*
256 nir_imm_floatN_t(nir_builder
*build
, double x
, unsigned bit_size
)
258 nir_const_value v
= nir_const_value_for_float(x
, bit_size
);
259 return nir_build_imm(build
, 1, bit_size
, &v
);
262 static inline nir_ssa_def
*
263 nir_imm_float16(nir_builder
*build
, float x
)
265 return nir_imm_floatN_t(build
, x
, 16);
268 static inline nir_ssa_def
*
269 nir_imm_float(nir_builder
*build
, float x
)
271 return nir_imm_floatN_t(build
, x
, 32);
274 static inline nir_ssa_def
*
275 nir_imm_double(nir_builder
*build
, double x
)
277 return nir_imm_floatN_t(build
, x
, 64);
280 static inline nir_ssa_def
*
281 nir_imm_vec2(nir_builder
*build
, float x
, float y
)
283 nir_const_value v
[2] = {
284 nir_const_value_for_float(x
, 32),
285 nir_const_value_for_float(y
, 32),
287 return nir_build_imm(build
, 2, 32, v
);
290 static inline nir_ssa_def
*
291 nir_imm_vec4(nir_builder
*build
, float x
, float y
, float z
, float w
)
293 nir_const_value v
[4] = {
294 nir_const_value_for_float(x
, 32),
295 nir_const_value_for_float(y
, 32),
296 nir_const_value_for_float(z
, 32),
297 nir_const_value_for_float(w
, 32),
300 return nir_build_imm(build
, 4, 32, v
);
303 static inline nir_ssa_def
*
304 nir_imm_vec4_16(nir_builder
*build
, float x
, float y
, float z
, float w
)
306 nir_const_value v
[4] = {
307 nir_const_value_for_float(x
, 16),
308 nir_const_value_for_float(y
, 16),
309 nir_const_value_for_float(z
, 16),
310 nir_const_value_for_float(w
, 16),
313 return nir_build_imm(build
, 4, 16, v
);
316 static inline nir_ssa_def
*
317 nir_imm_intN_t(nir_builder
*build
, uint64_t x
, unsigned bit_size
)
319 nir_const_value v
= nir_const_value_for_raw_uint(x
, bit_size
);
320 return nir_build_imm(build
, 1, bit_size
, &v
);
323 static inline nir_ssa_def
*
324 nir_imm_int(nir_builder
*build
, int x
)
326 return nir_imm_intN_t(build
, x
, 32);
329 static inline nir_ssa_def
*
330 nir_imm_int64(nir_builder
*build
, int64_t x
)
332 return nir_imm_intN_t(build
, x
, 64);
335 static inline nir_ssa_def
*
336 nir_imm_ivec2(nir_builder
*build
, int x
, int y
)
338 nir_const_value v
[2] = {
339 nir_const_value_for_int(x
, 32),
340 nir_const_value_for_int(y
, 32),
343 return nir_build_imm(build
, 2, 32, v
);
346 static inline nir_ssa_def
*
347 nir_imm_ivec4(nir_builder
*build
, int x
, int y
, int z
, int w
)
349 nir_const_value v
[4] = {
350 nir_const_value_for_int(x
, 32),
351 nir_const_value_for_int(y
, 32),
352 nir_const_value_for_int(z
, 32),
353 nir_const_value_for_int(w
, 32),
356 return nir_build_imm(build
, 4, 32, v
);
359 static inline nir_ssa_def
*
360 nir_builder_alu_instr_finish_and_insert(nir_builder
*build
, nir_alu_instr
*instr
)
362 const nir_op_info
*op_info
= &nir_op_infos
[instr
->op
];
364 instr
->exact
= build
->exact
;
366 /* Guess the number of components the destination temporary should have
367 * based on our input sizes, if it's not fixed for the op.
369 unsigned num_components
= op_info
->output_size
;
370 if (num_components
== 0) {
371 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
372 if (op_info
->input_sizes
[i
] == 0)
373 num_components
= MAX2(num_components
,
374 instr
->src
[i
].src
.ssa
->num_components
);
377 assert(num_components
!= 0);
379 /* Figure out the bitwidth based on the source bitwidth if the instruction
382 unsigned bit_size
= nir_alu_type_get_type_size(op_info
->output_type
);
384 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
385 unsigned src_bit_size
= instr
->src
[i
].src
.ssa
->bit_size
;
386 if (nir_alu_type_get_type_size(op_info
->input_types
[i
]) == 0) {
388 assert(src_bit_size
== bit_size
);
390 bit_size
= src_bit_size
;
392 assert(src_bit_size
==
393 nir_alu_type_get_type_size(op_info
->input_types
[i
]));
398 /* When in doubt, assume 32. */
402 /* Make sure we don't swizzle from outside of our source vector (like if a
403 * scalar value was passed into a multiply with a vector).
405 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++) {
406 for (unsigned j
= instr
->src
[i
].src
.ssa
->num_components
;
407 j
< NIR_MAX_VEC_COMPONENTS
; j
++) {
408 instr
->src
[i
].swizzle
[j
] = instr
->src
[i
].src
.ssa
->num_components
- 1;
412 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
, num_components
,
414 instr
->dest
.write_mask
= (1 << num_components
) - 1;
416 nir_builder_instr_insert(build
, &instr
->instr
);
418 return &instr
->dest
.dest
.ssa
;
421 static inline nir_ssa_def
*
422 nir_build_alu(nir_builder
*build
, nir_op op
, nir_ssa_def
*src0
,
423 nir_ssa_def
*src1
, nir_ssa_def
*src2
, nir_ssa_def
*src3
)
425 nir_alu_instr
*instr
= nir_alu_instr_create(build
->shader
, op
);
429 instr
->src
[0].src
= nir_src_for_ssa(src0
);
431 instr
->src
[1].src
= nir_src_for_ssa(src1
);
433 instr
->src
[2].src
= nir_src_for_ssa(src2
);
435 instr
->src
[3].src
= nir_src_for_ssa(src3
);
437 return nir_builder_alu_instr_finish_and_insert(build
, instr
);
440 /* for the couple special cases with more than 4 src args: */
441 static inline nir_ssa_def
*
442 nir_build_alu_src_arr(nir_builder
*build
, nir_op op
, nir_ssa_def
**srcs
)
444 const nir_op_info
*op_info
= &nir_op_infos
[op
];
445 nir_alu_instr
*instr
= nir_alu_instr_create(build
->shader
, op
);
449 for (unsigned i
= 0; i
< op_info
->num_inputs
; i
++)
450 instr
->src
[i
].src
= nir_src_for_ssa(srcs
[i
]);
452 return nir_builder_alu_instr_finish_and_insert(build
, instr
);
455 #include "nir_builder_opcodes.h"
457 static inline nir_ssa_def
*
458 nir_vec(nir_builder
*build
, nir_ssa_def
**comp
, unsigned num_components
)
460 return nir_build_alu_src_arr(build
, nir_op_vec(num_components
), comp
);
463 static inline nir_ssa_def
*
464 nir_mov_alu(nir_builder
*build
, nir_alu_src src
, unsigned num_components
)
466 assert(!src
.abs
&& !src
.negate
);
467 if (src
.src
.is_ssa
&& src
.src
.ssa
->num_components
== num_components
) {
468 bool any_swizzles
= false;
469 for (unsigned i
= 0; i
< num_components
; i
++) {
470 if (src
.swizzle
[i
] != i
)
477 nir_alu_instr
*mov
= nir_alu_instr_create(build
->shader
, nir_op_mov
);
478 nir_ssa_dest_init(&mov
->instr
, &mov
->dest
.dest
, num_components
,
479 nir_src_bit_size(src
.src
), NULL
);
480 mov
->exact
= build
->exact
;
481 mov
->dest
.write_mask
= (1 << num_components
) - 1;
483 nir_builder_instr_insert(build
, &mov
->instr
);
485 return &mov
->dest
.dest
.ssa
;
489 * Construct an fmov or imov that reswizzles the source's components.
491 static inline nir_ssa_def
*
492 nir_swizzle(nir_builder
*build
, nir_ssa_def
*src
, const unsigned *swiz
,
493 unsigned num_components
)
495 assert(num_components
<= NIR_MAX_VEC_COMPONENTS
);
496 nir_alu_src alu_src
= { NIR_SRC_INIT
};
497 alu_src
.src
= nir_src_for_ssa(src
);
499 bool is_identity_swizzle
= true;
500 for (unsigned i
= 0; i
< num_components
&& i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
502 is_identity_swizzle
= false;
503 alu_src
.swizzle
[i
] = swiz
[i
];
506 if (num_components
== src
->num_components
&& is_identity_swizzle
)
509 return nir_mov_alu(build
, alu_src
, num_components
);
512 /* Selects the right fdot given the number of components in each source. */
513 static inline nir_ssa_def
*
514 nir_fdot(nir_builder
*build
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
516 assert(src0
->num_components
== src1
->num_components
);
517 switch (src0
->num_components
) {
518 case 1: return nir_fmul(build
, src0
, src1
);
519 case 2: return nir_fdot2(build
, src0
, src1
);
520 case 3: return nir_fdot3(build
, src0
, src1
);
521 case 4: return nir_fdot4(build
, src0
, src1
);
522 case 8: return nir_fdot8(build
, src0
, src1
);
523 case 16: return nir_fdot16(build
, src0
, src1
);
525 unreachable("bad component size");
531 static inline nir_ssa_def
*
532 nir_ball_iequal(nir_builder
*b
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
534 switch (src0
->num_components
) {
535 case 1: return nir_ieq(b
, src0
, src1
);
536 case 2: return nir_ball_iequal2(b
, src0
, src1
);
537 case 3: return nir_ball_iequal3(b
, src0
, src1
);
538 case 4: return nir_ball_iequal4(b
, src0
, src1
);
539 case 8: return nir_ball_iequal8(b
, src0
, src1
);
540 case 16: return nir_ball_iequal16(b
, src0
, src1
);
542 unreachable("bad component size");
546 static inline nir_ssa_def
*
547 nir_ball(nir_builder
*b
, nir_ssa_def
*src
)
549 return nir_ball_iequal(b
, src
, nir_imm_true(b
));
552 static inline nir_ssa_def
*
553 nir_bany_inequal(nir_builder
*b
, nir_ssa_def
*src0
, nir_ssa_def
*src1
)
555 switch (src0
->num_components
) {
556 case 1: return nir_ine(b
, src0
, src1
);
557 case 2: return nir_bany_inequal2(b
, src0
, src1
);
558 case 3: return nir_bany_inequal3(b
, src0
, src1
);
559 case 4: return nir_bany_inequal4(b
, src0
, src1
);
560 case 8: return nir_bany_inequal8(b
, src0
, src1
);
561 case 16: return nir_bany_inequal16(b
, src0
, src1
);
563 unreachable("bad component size");
567 static inline nir_ssa_def
*
568 nir_bany(nir_builder
*b
, nir_ssa_def
*src
)
570 return nir_bany_inequal(b
, src
, nir_imm_false(b
));
573 static inline nir_ssa_def
*
574 nir_channel(nir_builder
*b
, nir_ssa_def
*def
, unsigned c
)
576 return nir_swizzle(b
, def
, &c
, 1);
579 static inline nir_ssa_def
*
580 nir_channels(nir_builder
*b
, nir_ssa_def
*def
, nir_component_mask_t mask
)
582 unsigned num_channels
= 0, swizzle
[NIR_MAX_VEC_COMPONENTS
] = { 0 };
584 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
585 if ((mask
& (1 << i
)) == 0)
587 swizzle
[num_channels
++] = i
;
590 return nir_swizzle(b
, def
, swizzle
, num_channels
);
593 static inline nir_ssa_def
*
594 _nir_vector_extract_helper(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*c
,
595 unsigned start
, unsigned end
)
597 if (start
== end
- 1) {
598 return nir_channel(b
, vec
, start
);
600 unsigned mid
= start
+ (end
- start
) / 2;
601 return nir_bcsel(b
, nir_ilt(b
, c
, nir_imm_intN_t(b
, mid
, c
->bit_size
)),
602 _nir_vector_extract_helper(b
, vec
, c
, start
, mid
),
603 _nir_vector_extract_helper(b
, vec
, c
, mid
, end
));
607 static inline nir_ssa_def
*
608 nir_vector_extract(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*c
)
610 nir_src c_src
= nir_src_for_ssa(c
);
611 if (nir_src_is_const(c_src
)) {
612 uint64_t c_const
= nir_src_as_uint(c_src
);
613 if (c_const
< vec
->num_components
)
614 return nir_channel(b
, vec
, c_const
);
616 return nir_ssa_undef(b
, 1, vec
->bit_size
);
618 return _nir_vector_extract_helper(b
, vec
, c
, 0, vec
->num_components
);
622 /** Replaces the component of `vec` specified by `c` with `scalar` */
623 static inline nir_ssa_def
*
624 nir_vector_insert_imm(nir_builder
*b
, nir_ssa_def
*vec
,
625 nir_ssa_def
*scalar
, unsigned c
)
627 assert(scalar
->num_components
== 1);
628 assert(c
< vec
->num_components
);
630 nir_op vec_op
= nir_op_vec(vec
->num_components
);
631 nir_alu_instr
*vec_instr
= nir_alu_instr_create(b
->shader
, vec_op
);
633 for (unsigned i
= 0; i
< vec
->num_components
; i
++) {
635 vec_instr
->src
[i
].src
= nir_src_for_ssa(scalar
);
636 vec_instr
->src
[i
].swizzle
[0] = 0;
638 vec_instr
->src
[i
].src
= nir_src_for_ssa(vec
);
639 vec_instr
->src
[i
].swizzle
[0] = i
;
643 return nir_builder_alu_instr_finish_and_insert(b
, vec_instr
);
646 /** Replaces the component of `vec` specified by `c` with `scalar` */
647 static inline nir_ssa_def
*
648 nir_vector_insert(nir_builder
*b
, nir_ssa_def
*vec
, nir_ssa_def
*scalar
,
651 assert(scalar
->num_components
== 1);
652 assert(c
->num_components
== 1);
654 nir_src c_src
= nir_src_for_ssa(c
);
655 if (nir_src_is_const(c_src
)) {
656 uint64_t c_const
= nir_src_as_uint(c_src
);
657 if (c_const
< vec
->num_components
)
658 return nir_vector_insert_imm(b
, vec
, scalar
, c_const
);
662 nir_const_value per_comp_idx_const
[NIR_MAX_VEC_COMPONENTS
];
663 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++)
664 per_comp_idx_const
[i
] = nir_const_value_for_int(i
, c
->bit_size
);
665 nir_ssa_def
*per_comp_idx
=
666 nir_build_imm(b
, vec
->num_components
,
667 c
->bit_size
, per_comp_idx_const
);
669 /* nir_builder will automatically splat out scalars to vectors so an
670 * insert is as simple as "if I'm the channel, replace me with the
673 return nir_bcsel(b
, nir_ieq(b
, c
, per_comp_idx
), scalar
, vec
);
677 static inline nir_ssa_def
*
678 nir_i2i(nir_builder
*build
, nir_ssa_def
*x
, unsigned dest_bit_size
)
680 if (x
->bit_size
== dest_bit_size
)
683 switch (dest_bit_size
) {
684 case 64: return nir_i2i64(build
, x
);
685 case 32: return nir_i2i32(build
, x
);
686 case 16: return nir_i2i16(build
, x
);
687 case 8: return nir_i2i8(build
, x
);
688 default: unreachable("Invalid bit size");
692 static inline nir_ssa_def
*
693 nir_u2u(nir_builder
*build
, nir_ssa_def
*x
, unsigned dest_bit_size
)
695 if (x
->bit_size
== dest_bit_size
)
698 switch (dest_bit_size
) {
699 case 64: return nir_u2u64(build
, x
);
700 case 32: return nir_u2u32(build
, x
);
701 case 16: return nir_u2u16(build
, x
);
702 case 8: return nir_u2u8(build
, x
);
703 default: unreachable("Invalid bit size");
707 static inline nir_ssa_def
*
708 nir_iadd_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
710 assert(x
->bit_size
<= 64);
711 y
&= BITFIELD64_MASK(x
->bit_size
);
716 return nir_iadd(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
720 static inline nir_ssa_def
*
721 _nir_mul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
, bool amul
)
723 assert(x
->bit_size
<= 64);
724 y
&= BITFIELD64_MASK(x
->bit_size
);
727 return nir_imm_intN_t(build
, 0, x
->bit_size
);
730 } else if (!build
->shader
->options
->lower_bitops
&&
731 util_is_power_of_two_or_zero64(y
)) {
732 return nir_ishl(build
, x
, nir_imm_int(build
, ffsll(y
) - 1));
734 return nir_amul(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
736 return nir_imul(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
740 static inline nir_ssa_def
*
741 nir_imul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
743 return _nir_mul_imm(build
, x
, y
, false);
746 static inline nir_ssa_def
*
747 nir_amul_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
749 return _nir_mul_imm(build
, x
, y
, true);
752 static inline nir_ssa_def
*
753 nir_fadd_imm(nir_builder
*build
, nir_ssa_def
*x
, double y
)
755 return nir_fadd(build
, x
, nir_imm_floatN_t(build
, y
, x
->bit_size
));
758 static inline nir_ssa_def
*
759 nir_fmul_imm(nir_builder
*build
, nir_ssa_def
*x
, double y
)
761 return nir_fmul(build
, x
, nir_imm_floatN_t(build
, y
, x
->bit_size
));
764 static inline nir_ssa_def
*
765 nir_iand_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
767 assert(x
->bit_size
<= 64);
768 y
&= BITFIELD64_MASK(x
->bit_size
);
771 return nir_imm_intN_t(build
, 0, x
->bit_size
);
772 } else if (y
== BITFIELD64_MASK(x
->bit_size
)) {
775 return nir_iand(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
779 static inline nir_ssa_def
*
780 nir_ishr_imm(nir_builder
*build
, nir_ssa_def
*x
, uint32_t y
)
785 return nir_ishr(build
, x
, nir_imm_int(build
, y
));
789 static inline nir_ssa_def
*
790 nir_ushr_imm(nir_builder
*build
, nir_ssa_def
*x
, uint32_t y
)
795 return nir_ushr(build
, x
, nir_imm_int(build
, y
));
799 static inline nir_ssa_def
*
800 nir_udiv_imm(nir_builder
*build
, nir_ssa_def
*x
, uint64_t y
)
802 assert(x
->bit_size
<= 64);
803 y
&= BITFIELD64_MASK(x
->bit_size
);
807 } else if (util_is_power_of_two_nonzero(y
)) {
808 return nir_ushr_imm(build
, x
, ffsll(y
) - 1);
810 return nir_udiv(build
, x
, nir_imm_intN_t(build
, y
, x
->bit_size
));
814 static inline nir_ssa_def
*
815 nir_pack_bits(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
817 assert(src
->num_components
* src
->bit_size
== dest_bit_size
);
819 switch (dest_bit_size
) {
821 switch (src
->bit_size
) {
822 case 32: return nir_pack_64_2x32(b
, src
);
823 case 16: return nir_pack_64_4x16(b
, src
);
829 if (src
->bit_size
== 16)
830 return nir_pack_32_2x16(b
, src
);
837 /* If we got here, we have no dedicated unpack opcode. */
838 nir_ssa_def
*dest
= nir_imm_intN_t(b
, 0, dest_bit_size
);
839 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
840 nir_ssa_def
*val
= nir_u2u(b
, nir_channel(b
, src
, i
), dest_bit_size
);
841 val
= nir_ishl(b
, val
, nir_imm_int(b
, i
* src
->bit_size
));
842 dest
= nir_ior(b
, dest
, val
);
847 static inline nir_ssa_def
*
848 nir_unpack_bits(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
850 assert(src
->num_components
== 1);
851 assert(src
->bit_size
> dest_bit_size
);
852 const unsigned dest_num_components
= src
->bit_size
/ dest_bit_size
;
853 assert(dest_num_components
<= NIR_MAX_VEC_COMPONENTS
);
855 switch (src
->bit_size
) {
857 switch (dest_bit_size
) {
858 case 32: return nir_unpack_64_2x32(b
, src
);
859 case 16: return nir_unpack_64_4x16(b
, src
);
865 if (dest_bit_size
== 16)
866 return nir_unpack_32_2x16(b
, src
);
873 /* If we got here, we have no dedicated unpack opcode. */
874 nir_ssa_def
*dest_comps
[NIR_MAX_VEC_COMPONENTS
];
875 for (unsigned i
= 0; i
< dest_num_components
; i
++) {
876 nir_ssa_def
*val
= nir_ushr_imm(b
, src
, i
* dest_bit_size
);
877 dest_comps
[i
] = nir_u2u(b
, val
, dest_bit_size
);
879 return nir_vec(b
, dest_comps
, dest_num_components
);
883 * Treats srcs as if it's one big blob of bits and extracts the range of bits
886 * [first_bit, first_bit + dest_num_components * dest_bit_size)
888 * The range can have any alignment or size as long as it's an integer number
889 * of destination components and fits inside the concatenated sources.
891 * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
892 * values are involved because that would require pack/unpack to/from a vec8
893 * which NIR currently does not support.
895 static inline nir_ssa_def
*
896 nir_extract_bits(nir_builder
*b
, nir_ssa_def
**srcs
, unsigned num_srcs
,
898 unsigned dest_num_components
, unsigned dest_bit_size
)
900 const unsigned num_bits
= dest_num_components
* dest_bit_size
;
902 /* Figure out the common bit size */
903 unsigned common_bit_size
= dest_bit_size
;
904 for (unsigned i
= 0; i
< num_srcs
; i
++)
905 common_bit_size
= MIN2(common_bit_size
, srcs
[i
]->bit_size
);
907 common_bit_size
= MIN2(common_bit_size
, (1u << (ffs(first_bit
) - 1)));
909 /* We don't want to have to deal with 1-bit values */
910 assert(common_bit_size
>= 8);
912 nir_ssa_def
*common_comps
[NIR_MAX_VEC_COMPONENTS
* sizeof(uint64_t)];
913 assert(num_bits
/ common_bit_size
<= ARRAY_SIZE(common_comps
));
915 /* First, unpack to the common bit size and select the components from the
919 unsigned src_start_bit
= 0;
920 unsigned src_end_bit
= 0;
921 for (unsigned i
= 0; i
< num_bits
/ common_bit_size
; i
++) {
922 const unsigned bit
= first_bit
+ (i
* common_bit_size
);
923 while (bit
>= src_end_bit
) {
925 assert(src_idx
< (int) num_srcs
);
926 src_start_bit
= src_end_bit
;
927 src_end_bit
+= srcs
[src_idx
]->bit_size
*
928 srcs
[src_idx
]->num_components
;
930 assert(bit
>= src_start_bit
);
931 assert(bit
+ common_bit_size
<= src_end_bit
);
932 const unsigned rel_bit
= bit
- src_start_bit
;
933 const unsigned src_bit_size
= srcs
[src_idx
]->bit_size
;
935 nir_ssa_def
*comp
= nir_channel(b
, srcs
[src_idx
],
936 rel_bit
/ src_bit_size
);
937 if (srcs
[src_idx
]->bit_size
> common_bit_size
) {
938 nir_ssa_def
*unpacked
= nir_unpack_bits(b
, comp
, common_bit_size
);
939 comp
= nir_channel(b
, unpacked
, (rel_bit
% src_bit_size
) /
942 common_comps
[i
] = comp
;
945 /* Now, re-pack the destination if we have to */
946 if (dest_bit_size
> common_bit_size
) {
947 unsigned common_per_dest
= dest_bit_size
/ common_bit_size
;
948 nir_ssa_def
*dest_comps
[NIR_MAX_VEC_COMPONENTS
];
949 for (unsigned i
= 0; i
< dest_num_components
; i
++) {
950 nir_ssa_def
*unpacked
= nir_vec(b
, common_comps
+ i
* common_per_dest
,
952 dest_comps
[i
] = nir_pack_bits(b
, unpacked
, dest_bit_size
);
954 return nir_vec(b
, dest_comps
, dest_num_components
);
956 assert(dest_bit_size
== common_bit_size
);
957 return nir_vec(b
, common_comps
, dest_num_components
);
961 static inline nir_ssa_def
*
962 nir_bitcast_vector(nir_builder
*b
, nir_ssa_def
*src
, unsigned dest_bit_size
)
964 assert((src
->bit_size
* src
->num_components
) % dest_bit_size
== 0);
965 const unsigned dest_num_components
=
966 (src
->bit_size
* src
->num_components
) / dest_bit_size
;
967 assert(dest_num_components
<= NIR_MAX_VEC_COMPONENTS
);
969 return nir_extract_bits(b
, &src
, 1, 0, dest_num_components
, dest_bit_size
);
973 * Turns a nir_src into a nir_ssa_def * so it can be passed to
974 * nir_build_alu()-based builder calls.
976 * See nir_ssa_for_alu_src() for alu instructions.
978 static inline nir_ssa_def
*
979 nir_ssa_for_src(nir_builder
*build
, nir_src src
, int num_components
)
981 if (src
.is_ssa
&& src
.ssa
->num_components
== num_components
)
984 nir_alu_src alu
= { NIR_SRC_INIT
};
986 for (int j
= 0; j
< 4; j
++)
989 return nir_mov_alu(build
, alu
, num_components
);
993 * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
994 * nir_alu_src's swizzle.
996 static inline nir_ssa_def
*
997 nir_ssa_for_alu_src(nir_builder
*build
, nir_alu_instr
*instr
, unsigned srcn
)
999 static uint8_t trivial_swizzle
[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
1000 STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle
) == NIR_MAX_VEC_COMPONENTS
);
1002 nir_alu_src
*src
= &instr
->src
[srcn
];
1003 unsigned num_components
= nir_ssa_alu_instr_src_components(instr
, srcn
);
1005 if (src
->src
.is_ssa
&& (src
->src
.ssa
->num_components
== num_components
) &&
1006 !src
->abs
&& !src
->negate
&&
1007 (memcmp(src
->swizzle
, trivial_swizzle
, num_components
) == 0))
1008 return src
->src
.ssa
;
1010 return nir_mov_alu(build
, *src
, num_components
);
1013 static inline unsigned
1014 nir_get_ptr_bitsize(nir_shader
*shader
)
1016 if (shader
->info
.stage
== MESA_SHADER_KERNEL
)
1017 return shader
->info
.cs
.ptr_size
;
1021 static inline nir_deref_instr
*
1022 nir_build_deref_var(nir_builder
*build
, nir_variable
*var
)
1024 nir_deref_instr
*deref
=
1025 nir_deref_instr_create(build
->shader
, nir_deref_type_var
);
1027 deref
->mode
= (nir_variable_mode
)var
->data
.mode
;
1028 deref
->type
= var
->type
;
1031 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
, 1,
1032 nir_get_ptr_bitsize(build
->shader
), NULL
);
1034 nir_builder_instr_insert(build
, &deref
->instr
);
1039 static inline nir_deref_instr
*
1040 nir_build_deref_array(nir_builder
*build
, nir_deref_instr
*parent
,
1043 assert(glsl_type_is_array(parent
->type
) ||
1044 glsl_type_is_matrix(parent
->type
) ||
1045 glsl_type_is_vector(parent
->type
));
1047 assert(index
->bit_size
== parent
->dest
.ssa
.bit_size
);
1049 nir_deref_instr
*deref
=
1050 nir_deref_instr_create(build
->shader
, nir_deref_type_array
);
1052 deref
->mode
= parent
->mode
;
1053 deref
->type
= glsl_get_array_element(parent
->type
);
1054 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1055 deref
->arr
.index
= nir_src_for_ssa(index
);
1057 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1058 parent
->dest
.ssa
.num_components
,
1059 parent
->dest
.ssa
.bit_size
, NULL
);
1061 nir_builder_instr_insert(build
, &deref
->instr
);
1066 static inline nir_deref_instr
*
1067 nir_build_deref_array_imm(nir_builder
*build
, nir_deref_instr
*parent
,
1070 assert(parent
->dest
.is_ssa
);
1071 nir_ssa_def
*idx_ssa
= nir_imm_intN_t(build
, index
,
1072 parent
->dest
.ssa
.bit_size
);
1074 return nir_build_deref_array(build
, parent
, idx_ssa
);
1077 static inline nir_deref_instr
*
1078 nir_build_deref_ptr_as_array(nir_builder
*build
, nir_deref_instr
*parent
,
1081 assert(parent
->deref_type
== nir_deref_type_array
||
1082 parent
->deref_type
== nir_deref_type_ptr_as_array
||
1083 parent
->deref_type
== nir_deref_type_cast
);
1085 assert(index
->bit_size
== parent
->dest
.ssa
.bit_size
);
1087 nir_deref_instr
*deref
=
1088 nir_deref_instr_create(build
->shader
, nir_deref_type_ptr_as_array
);
1090 deref
->mode
= parent
->mode
;
1091 deref
->type
= parent
->type
;
1092 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1093 deref
->arr
.index
= nir_src_for_ssa(index
);
1095 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1096 parent
->dest
.ssa
.num_components
,
1097 parent
->dest
.ssa
.bit_size
, NULL
);
1099 nir_builder_instr_insert(build
, &deref
->instr
);
1104 static inline nir_deref_instr
*
1105 nir_build_deref_array_wildcard(nir_builder
*build
, nir_deref_instr
*parent
)
1107 assert(glsl_type_is_array(parent
->type
) ||
1108 glsl_type_is_matrix(parent
->type
));
1110 nir_deref_instr
*deref
=
1111 nir_deref_instr_create(build
->shader
, nir_deref_type_array_wildcard
);
1113 deref
->mode
= parent
->mode
;
1114 deref
->type
= glsl_get_array_element(parent
->type
);
1115 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1117 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1118 parent
->dest
.ssa
.num_components
,
1119 parent
->dest
.ssa
.bit_size
, NULL
);
1121 nir_builder_instr_insert(build
, &deref
->instr
);
1126 static inline nir_deref_instr
*
1127 nir_build_deref_struct(nir_builder
*build
, nir_deref_instr
*parent
,
1130 assert(glsl_type_is_struct_or_ifc(parent
->type
));
1132 nir_deref_instr
*deref
=
1133 nir_deref_instr_create(build
->shader
, nir_deref_type_struct
);
1135 deref
->mode
= parent
->mode
;
1136 deref
->type
= glsl_get_struct_field(parent
->type
, index
);
1137 deref
->parent
= nir_src_for_ssa(&parent
->dest
.ssa
);
1138 deref
->strct
.index
= index
;
1140 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1141 parent
->dest
.ssa
.num_components
,
1142 parent
->dest
.ssa
.bit_size
, NULL
);
1144 nir_builder_instr_insert(build
, &deref
->instr
);
1149 static inline nir_deref_instr
*
1150 nir_build_deref_cast(nir_builder
*build
, nir_ssa_def
*parent
,
1151 nir_variable_mode mode
, const struct glsl_type
*type
,
1152 unsigned ptr_stride
)
1154 nir_deref_instr
*deref
=
1155 nir_deref_instr_create(build
->shader
, nir_deref_type_cast
);
1159 deref
->parent
= nir_src_for_ssa(parent
);
1160 deref
->cast
.ptr_stride
= ptr_stride
;
1162 nir_ssa_dest_init(&deref
->instr
, &deref
->dest
,
1163 parent
->num_components
, parent
->bit_size
, NULL
);
1165 nir_builder_instr_insert(build
, &deref
->instr
);
1170 /** Returns a deref that follows another but starting from the given parent
1172 * The new deref will be the same type and take the same array or struct index
1173 * as the leader deref but it may have a different parent. This is very
1174 * useful for walking deref paths.
1176 static inline nir_deref_instr
*
1177 nir_build_deref_follower(nir_builder
*b
, nir_deref_instr
*parent
,
1178 nir_deref_instr
*leader
)
1180 /* If the derefs would have the same parent, don't make a new one */
1181 assert(leader
->parent
.is_ssa
);
1182 if (leader
->parent
.ssa
== &parent
->dest
.ssa
)
1185 UNUSED nir_deref_instr
*leader_parent
= nir_src_as_deref(leader
->parent
);
1187 switch (leader
->deref_type
) {
1188 case nir_deref_type_var
:
1189 unreachable("A var dereference cannot have a parent");
1192 case nir_deref_type_array
:
1193 case nir_deref_type_array_wildcard
:
1194 assert(glsl_type_is_matrix(parent
->type
) ||
1195 glsl_type_is_array(parent
->type
) ||
1196 (leader
->deref_type
== nir_deref_type_array
&&
1197 glsl_type_is_vector(parent
->type
)));
1198 assert(glsl_get_length(parent
->type
) ==
1199 glsl_get_length(leader_parent
->type
));
1201 if (leader
->deref_type
== nir_deref_type_array
) {
1202 assert(leader
->arr
.index
.is_ssa
);
1203 nir_ssa_def
*index
= nir_i2i(b
, leader
->arr
.index
.ssa
,
1204 parent
->dest
.ssa
.bit_size
);
1205 return nir_build_deref_array(b
, parent
, index
);
1207 return nir_build_deref_array_wildcard(b
, parent
);
1210 case nir_deref_type_struct
:
1211 assert(glsl_type_is_struct_or_ifc(parent
->type
));
1212 assert(glsl_get_length(parent
->type
) ==
1213 glsl_get_length(leader_parent
->type
));
1215 return nir_build_deref_struct(b
, parent
, leader
->strct
.index
);
1218 unreachable("Invalid deref instruction type");
1222 static inline nir_ssa_def
*
1223 nir_load_reg(nir_builder
*build
, nir_register
*reg
)
1225 return nir_ssa_for_src(build
, nir_src_for_reg(reg
), reg
->num_components
);
1229 nir_store_reg(nir_builder
*build
, nir_register
*reg
,
1230 nir_ssa_def
*def
, nir_component_mask_t write_mask
)
1232 assert(reg
->num_components
== def
->num_components
);
1233 assert(reg
->bit_size
== def
->bit_size
);
1235 nir_alu_instr
*mov
= nir_alu_instr_create(build
->shader
, nir_op_mov
);
1236 mov
->src
[0].src
= nir_src_for_ssa(def
);
1237 mov
->dest
.dest
= nir_dest_for_reg(reg
);
1238 mov
->dest
.write_mask
= write_mask
& BITFIELD_MASK(reg
->num_components
);
1239 nir_builder_instr_insert(build
, &mov
->instr
);
1242 static inline nir_ssa_def
*
1243 nir_load_deref_with_access(nir_builder
*build
, nir_deref_instr
*deref
,
1244 enum gl_access_qualifier access
)
1246 nir_intrinsic_instr
*load
=
1247 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_load_deref
);
1248 load
->num_components
= glsl_get_vector_elements(deref
->type
);
1249 load
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1250 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
,
1251 glsl_get_bit_size(deref
->type
), NULL
);
1252 nir_intrinsic_set_access(load
, access
);
1253 nir_builder_instr_insert(build
, &load
->instr
);
1254 return &load
->dest
.ssa
;
1257 static inline nir_ssa_def
*
1258 nir_load_deref(nir_builder
*build
, nir_deref_instr
*deref
)
1260 return nir_load_deref_with_access(build
, deref
, (enum gl_access_qualifier
)0);
1264 nir_store_deref_with_access(nir_builder
*build
, nir_deref_instr
*deref
,
1265 nir_ssa_def
*value
, unsigned writemask
,
1266 enum gl_access_qualifier access
)
1268 nir_intrinsic_instr
*store
=
1269 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_store_deref
);
1270 store
->num_components
= glsl_get_vector_elements(deref
->type
);
1271 store
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1272 store
->src
[1] = nir_src_for_ssa(value
);
1273 nir_intrinsic_set_write_mask(store
,
1274 writemask
& ((1 << store
->num_components
) - 1));
1275 nir_intrinsic_set_access(store
, access
);
1276 nir_builder_instr_insert(build
, &store
->instr
);
1280 nir_store_deref(nir_builder
*build
, nir_deref_instr
*deref
,
1281 nir_ssa_def
*value
, unsigned writemask
)
1283 nir_store_deref_with_access(build
, deref
, value
, writemask
,
1284 (enum gl_access_qualifier
)0);
1288 nir_copy_deref_with_access(nir_builder
*build
, nir_deref_instr
*dest
,
1289 nir_deref_instr
*src
,
1290 enum gl_access_qualifier dest_access
,
1291 enum gl_access_qualifier src_access
)
1293 nir_intrinsic_instr
*copy
=
1294 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_copy_deref
);
1295 copy
->src
[0] = nir_src_for_ssa(&dest
->dest
.ssa
);
1296 copy
->src
[1] = nir_src_for_ssa(&src
->dest
.ssa
);
1297 nir_intrinsic_set_dst_access(copy
, dest_access
);
1298 nir_intrinsic_set_src_access(copy
, src_access
);
1299 nir_builder_instr_insert(build
, ©
->instr
);
1303 nir_copy_deref(nir_builder
*build
, nir_deref_instr
*dest
, nir_deref_instr
*src
)
1305 nir_copy_deref_with_access(build
, dest
, src
,
1306 (enum gl_access_qualifier
) 0,
1307 (enum gl_access_qualifier
) 0);
1310 static inline nir_ssa_def
*
1311 nir_load_var(nir_builder
*build
, nir_variable
*var
)
1313 return nir_load_deref(build
, nir_build_deref_var(build
, var
));
1317 nir_store_var(nir_builder
*build
, nir_variable
*var
, nir_ssa_def
*value
,
1320 nir_store_deref(build
, nir_build_deref_var(build
, var
), value
, writemask
);
1324 nir_copy_var(nir_builder
*build
, nir_variable
*dest
, nir_variable
*src
)
1326 nir_copy_deref(build
, nir_build_deref_var(build
, dest
),
1327 nir_build_deref_var(build
, src
));
1330 static inline nir_ssa_def
*
1331 nir_load_param(nir_builder
*build
, uint32_t param_idx
)
1333 assert(param_idx
< build
->impl
->function
->num_params
);
1334 nir_parameter
*param
= &build
->impl
->function
->params
[param_idx
];
1336 nir_intrinsic_instr
*load
=
1337 nir_intrinsic_instr_create(build
->shader
, nir_intrinsic_load_param
);
1338 nir_intrinsic_set_param_idx(load
, param_idx
);
1339 load
->num_components
= param
->num_components
;
1340 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
1341 param
->num_components
, param
->bit_size
, NULL
);
1342 nir_builder_instr_insert(build
, &load
->instr
);
1343 return &load
->dest
.ssa
;
1346 #include "nir_builder_opcodes.h"
1348 static inline nir_ssa_def
*
1349 nir_f2b(nir_builder
*build
, nir_ssa_def
*f
)
1351 return nir_f2b1(build
, f
);
1354 static inline nir_ssa_def
*
1355 nir_i2b(nir_builder
*build
, nir_ssa_def
*i
)
1357 return nir_i2b1(build
, i
);
1360 static inline nir_ssa_def
*
1361 nir_b2f(nir_builder
*build
, nir_ssa_def
*b
, uint32_t bit_size
)
1364 case 64: return nir_b2f64(build
, b
);
1365 case 32: return nir_b2f32(build
, b
);
1366 case 16: return nir_b2f16(build
, b
);
1368 unreachable("Invalid bit-size");
1372 static inline nir_ssa_def
*
1373 nir_b2i(nir_builder
*build
, nir_ssa_def
*b
, uint32_t bit_size
)
1376 case 64: return nir_b2i64(build
, b
);
1377 case 32: return nir_b2i32(build
, b
);
1378 case 16: return nir_b2i16(build
, b
);
1379 case 8: return nir_b2i8(build
, b
);
1381 unreachable("Invalid bit-size");
1384 static inline nir_ssa_def
*
1385 nir_load_barycentric(nir_builder
*build
, nir_intrinsic_op op
,
1386 unsigned interp_mode
)
1388 unsigned num_components
= op
== nir_intrinsic_load_barycentric_model
? 3 : 2;
1389 nir_intrinsic_instr
*bary
= nir_intrinsic_instr_create(build
->shader
, op
);
1390 nir_ssa_dest_init(&bary
->instr
, &bary
->dest
, num_components
, 32, NULL
);
1391 nir_intrinsic_set_interp_mode(bary
, interp_mode
);
1392 nir_builder_instr_insert(build
, &bary
->instr
);
1393 return &bary
->dest
.ssa
;
1397 nir_jump(nir_builder
*build
, nir_jump_type jump_type
)
1399 assert(jump_type
!= nir_jump_goto
&& jump_type
!= nir_jump_goto_if
);
1400 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, jump_type
);
1401 nir_builder_instr_insert(build
, &jump
->instr
);
1405 nir_goto(nir_builder
*build
, struct nir_block
*target
)
1407 assert(!build
->impl
->structured
);
1408 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, nir_jump_goto
);
1409 jump
->target
= target
;
1410 nir_builder_instr_insert(build
, &jump
->instr
);
1414 nir_goto_if(nir_builder
*build
, struct nir_block
*target
, nir_src cond
,
1415 struct nir_block
*else_target
)
1417 assert(!build
->impl
->structured
);
1418 nir_jump_instr
*jump
= nir_jump_instr_create(build
->shader
, nir_jump_goto_if
);
1419 jump
->condition
= cond
;
1420 jump
->target
= target
;
1421 jump
->else_target
= else_target
;
1422 nir_builder_instr_insert(build
, &jump
->instr
);
1425 static inline nir_ssa_def
*
1426 nir_compare_func(nir_builder
*b
, enum compare_func func
,
1427 nir_ssa_def
*src0
, nir_ssa_def
*src1
)
1430 case COMPARE_FUNC_NEVER
:
1431 return nir_imm_int(b
, 0);
1432 case COMPARE_FUNC_ALWAYS
:
1433 return nir_imm_int(b
, ~0);
1434 case COMPARE_FUNC_EQUAL
:
1435 return nir_feq(b
, src0
, src1
);
1436 case COMPARE_FUNC_NOTEQUAL
:
1437 return nir_fneu(b
, src0
, src1
);
1438 case COMPARE_FUNC_GREATER
:
1439 return nir_flt(b
, src1
, src0
);
1440 case COMPARE_FUNC_GEQUAL
:
1441 return nir_fge(b
, src0
, src1
);
1442 case COMPARE_FUNC_LESS
:
1443 return nir_flt(b
, src0
, src1
);
1444 case COMPARE_FUNC_LEQUAL
:
1445 return nir_fge(b
, src1
, src0
);
1447 unreachable("bad compare func");
1451 nir_scoped_barrier(nir_builder
*b
,
1452 nir_scope exec_scope
,
1453 nir_scope mem_scope
,
1454 nir_memory_semantics mem_semantics
,
1455 nir_variable_mode mem_modes
)
1457 nir_intrinsic_instr
*intrin
=
1458 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_scoped_barrier
);
1459 nir_intrinsic_set_execution_scope(intrin
, exec_scope
);
1460 nir_intrinsic_set_memory_scope(intrin
, mem_scope
);
1461 nir_intrinsic_set_memory_semantics(intrin
, mem_semantics
);
1462 nir_intrinsic_set_memory_modes(intrin
, mem_modes
);
1463 nir_builder_instr_insert(b
, &intrin
->instr
);
1467 nir_scoped_memory_barrier(nir_builder
*b
,
1469 nir_memory_semantics semantics
,
1470 nir_variable_mode modes
)
1472 nir_scoped_barrier(b
, NIR_SCOPE_NONE
, scope
, semantics
, modes
);
1475 static inline nir_ssa_def
*
1476 nir_convert_to_bit_size(nir_builder
*b
,
1481 nir_alu_type base_type
= nir_alu_type_get_base_type(type
);
1482 nir_alu_type dst_type
= (nir_alu_type
)(bit_size
| base_type
);
1485 nir_type_conversion_op(type
, dst_type
, nir_rounding_mode_undef
);
1487 return nir_build_alu(b
, opcode
, src
, NULL
, NULL
, NULL
);
1490 static inline nir_ssa_def
*
1491 nir_i2iN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1493 return nir_convert_to_bit_size(b
, src
, nir_type_int
, bit_size
);
1496 static inline nir_ssa_def
*
1497 nir_u2uN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1499 return nir_convert_to_bit_size(b
, src
, nir_type_uint
, bit_size
);
1502 static inline nir_ssa_def
*
1503 nir_b2bN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1505 return nir_convert_to_bit_size(b
, src
, nir_type_bool
, bit_size
);
1508 static inline nir_ssa_def
*
1509 nir_f2fN(nir_builder
*b
, nir_ssa_def
*src
, unsigned bit_size
)
1511 return nir_convert_to_bit_size(b
, src
, nir_type_float
, bit_size
);
1514 #endif /* NIR_BUILDER_H */