bcaeae382b5d06388cb81f41755a02c3f694047b
[mesa.git] / src / compiler / nir / nir_builder.h
1 /*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef NIR_BUILDER_H
25 #define NIR_BUILDER_H
26
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
30
31 struct exec_list;
32
33 typedef struct nir_builder {
34 nir_cursor cursor;
35
36 /* Whether new ALU instructions will be marked "exact" */
37 bool exact;
38
39 nir_shader *shader;
40 nir_function_impl *impl;
41 } nir_builder;
42
43 static inline void
44 nir_builder_init(nir_builder *build, nir_function_impl *impl)
45 {
46 memset(build, 0, sizeof(*build));
47 build->exact = false;
48 build->impl = impl;
49 build->shader = impl->function->shader;
50 }
51
52 static inline void
53 nir_builder_init_simple_shader(nir_builder *build, void *mem_ctx,
54 gl_shader_stage stage,
55 const nir_shader_compiler_options *options)
56 {
57 build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
58 nir_function *func = nir_function_create(build->shader, "main");
59 func->is_entrypoint = true;
60 build->exact = false;
61 build->impl = nir_function_impl_create(func);
62 build->cursor = nir_after_cf_list(&build->impl->body);
63 }
64
65 static inline void
66 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
67 {
68 nir_instr_insert(build->cursor, instr);
69
70 /* Move the cursor forward. */
71 build->cursor = nir_after_instr(instr);
72 }
73
74 static inline nir_instr *
75 nir_builder_last_instr(nir_builder *build)
76 {
77 assert(build->cursor.option == nir_cursor_after_instr);
78 return build->cursor.instr;
79 }
80
81 static inline void
82 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
83 {
84 nir_cf_node_insert(build->cursor, cf);
85 }
86
87 static inline bool
88 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
89 {
90 nir_block *block = nir_cursor_current_block(build->cursor);
91 for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
92 if (n == cf_node)
93 return true;
94 }
95 return false;
96 }
97
98 static inline nir_if *
99 nir_push_if_src(nir_builder *build, nir_src condition)
100 {
101 nir_if *nif = nir_if_create(build->shader);
102 nif->condition = condition;
103 nir_builder_cf_insert(build, &nif->cf_node);
104 build->cursor = nir_before_cf_list(&nif->then_list);
105 return nif;
106 }
107
108 static inline nir_if *
109 nir_push_if(nir_builder *build, nir_ssa_def *condition)
110 {
111 return nir_push_if_src(build, nir_src_for_ssa(condition));
112 }
113
114 static inline nir_if *
115 nir_push_else(nir_builder *build, nir_if *nif)
116 {
117 if (nif) {
118 assert(nir_builder_is_inside_cf(build, &nif->cf_node));
119 } else {
120 nir_block *block = nir_cursor_current_block(build->cursor);
121 nif = nir_cf_node_as_if(block->cf_node.parent);
122 }
123 build->cursor = nir_before_cf_list(&nif->else_list);
124 return nif;
125 }
126
127 static inline void
128 nir_pop_if(nir_builder *build, nir_if *nif)
129 {
130 if (nif) {
131 assert(nir_builder_is_inside_cf(build, &nif->cf_node));
132 } else {
133 nir_block *block = nir_cursor_current_block(build->cursor);
134 nif = nir_cf_node_as_if(block->cf_node.parent);
135 }
136 build->cursor = nir_after_cf_node(&nif->cf_node);
137 }
138
139 static inline nir_ssa_def *
140 nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
141 {
142 nir_block *block = nir_cursor_current_block(build->cursor);
143 nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
144
145 nir_phi_instr *phi = nir_phi_instr_create(build->shader);
146
147 nir_phi_src *src = ralloc(phi, nir_phi_src);
148 src->pred = nir_if_last_then_block(nif);
149 src->src = nir_src_for_ssa(then_def);
150 exec_list_push_tail(&phi->srcs, &src->node);
151
152 src = ralloc(phi, nir_phi_src);
153 src->pred = nir_if_last_else_block(nif);
154 src->src = nir_src_for_ssa(else_def);
155 exec_list_push_tail(&phi->srcs, &src->node);
156
157 assert(then_def->num_components == else_def->num_components);
158 assert(then_def->bit_size == else_def->bit_size);
159 nir_ssa_dest_init(&phi->instr, &phi->dest,
160 then_def->num_components, then_def->bit_size, NULL);
161
162 nir_builder_instr_insert(build, &phi->instr);
163
164 return &phi->dest.ssa;
165 }
166
167 static inline nir_loop *
168 nir_push_loop(nir_builder *build)
169 {
170 nir_loop *loop = nir_loop_create(build->shader);
171 nir_builder_cf_insert(build, &loop->cf_node);
172 build->cursor = nir_before_cf_list(&loop->body);
173 return loop;
174 }
175
176 static inline void
177 nir_pop_loop(nir_builder *build, nir_loop *loop)
178 {
179 if (loop) {
180 assert(nir_builder_is_inside_cf(build, &loop->cf_node));
181 } else {
182 nir_block *block = nir_cursor_current_block(build->cursor);
183 loop = nir_cf_node_as_loop(block->cf_node.parent);
184 }
185 build->cursor = nir_after_cf_node(&loop->cf_node);
186 }
187
188 static inline nir_ssa_def *
189 nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
190 {
191 nir_ssa_undef_instr *undef =
192 nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
193 if (!undef)
194 return NULL;
195
196 nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
197
198 return &undef->def;
199 }
200
201 static inline nir_ssa_def *
202 nir_build_imm(nir_builder *build, unsigned num_components,
203 unsigned bit_size, const nir_const_value *value)
204 {
205 nir_load_const_instr *load_const =
206 nir_load_const_instr_create(build->shader, num_components, bit_size);
207 if (!load_const)
208 return NULL;
209
210 memcpy(load_const->value, value, sizeof(nir_const_value) * num_components);
211
212 nir_builder_instr_insert(build, &load_const->instr);
213
214 return &load_const->def;
215 }
216
217 static inline nir_ssa_def *
218 nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
219 {
220 nir_load_const_instr *load_const =
221 nir_load_const_instr_create(build->shader, num_components, bit_size);
222
223 /* nir_load_const_instr_create uses rzalloc so it's already zero */
224
225 nir_builder_instr_insert(build, &load_const->instr);
226
227 return &load_const->def;
228 }
229
230 static inline nir_ssa_def *
231 nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
232 {
233 nir_const_value v = nir_const_value_for_bool(x, bit_size);
234 return nir_build_imm(build, 1, bit_size, &v);
235 }
236
237 static inline nir_ssa_def *
238 nir_imm_bool(nir_builder *build, bool x)
239 {
240 return nir_imm_boolN_t(build, x, 1);
241 }
242
243 static inline nir_ssa_def *
244 nir_imm_true(nir_builder *build)
245 {
246 return nir_imm_bool(build, true);
247 }
248
249 static inline nir_ssa_def *
250 nir_imm_false(nir_builder *build)
251 {
252 return nir_imm_bool(build, false);
253 }
254
255 static inline nir_ssa_def *
256 nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
257 {
258 nir_const_value v = nir_const_value_for_float(x, bit_size);
259 return nir_build_imm(build, 1, bit_size, &v);
260 }
261
262 static inline nir_ssa_def *
263 nir_imm_float16(nir_builder *build, float x)
264 {
265 return nir_imm_floatN_t(build, x, 16);
266 }
267
268 static inline nir_ssa_def *
269 nir_imm_float(nir_builder *build, float x)
270 {
271 return nir_imm_floatN_t(build, x, 32);
272 }
273
274 static inline nir_ssa_def *
275 nir_imm_double(nir_builder *build, double x)
276 {
277 return nir_imm_floatN_t(build, x, 64);
278 }
279
280 static inline nir_ssa_def *
281 nir_imm_vec2(nir_builder *build, float x, float y)
282 {
283 nir_const_value v[2] = {
284 nir_const_value_for_float(x, 32),
285 nir_const_value_for_float(y, 32),
286 };
287 return nir_build_imm(build, 2, 32, v);
288 }
289
290 static inline nir_ssa_def *
291 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
292 {
293 nir_const_value v[4] = {
294 nir_const_value_for_float(x, 32),
295 nir_const_value_for_float(y, 32),
296 nir_const_value_for_float(z, 32),
297 nir_const_value_for_float(w, 32),
298 };
299
300 return nir_build_imm(build, 4, 32, v);
301 }
302
303 static inline nir_ssa_def *
304 nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
305 {
306 nir_const_value v[4] = {
307 nir_const_value_for_float(x, 16),
308 nir_const_value_for_float(y, 16),
309 nir_const_value_for_float(z, 16),
310 nir_const_value_for_float(w, 16),
311 };
312
313 return nir_build_imm(build, 4, 16, v);
314 }
315
316 static inline nir_ssa_def *
317 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
318 {
319 nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
320 return nir_build_imm(build, 1, bit_size, &v);
321 }
322
323 static inline nir_ssa_def *
324 nir_imm_int(nir_builder *build, int x)
325 {
326 return nir_imm_intN_t(build, x, 32);
327 }
328
329 static inline nir_ssa_def *
330 nir_imm_int64(nir_builder *build, int64_t x)
331 {
332 return nir_imm_intN_t(build, x, 64);
333 }
334
335 static inline nir_ssa_def *
336 nir_imm_ivec2(nir_builder *build, int x, int y)
337 {
338 nir_const_value v[2] = {
339 nir_const_value_for_int(x, 32),
340 nir_const_value_for_int(y, 32),
341 };
342
343 return nir_build_imm(build, 2, 32, v);
344 }
345
346 static inline nir_ssa_def *
347 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
348 {
349 nir_const_value v[4] = {
350 nir_const_value_for_int(x, 32),
351 nir_const_value_for_int(y, 32),
352 nir_const_value_for_int(z, 32),
353 nir_const_value_for_int(w, 32),
354 };
355
356 return nir_build_imm(build, 4, 32, v);
357 }
358
359 static inline nir_ssa_def *
360 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
361 {
362 const nir_op_info *op_info = &nir_op_infos[instr->op];
363
364 instr->exact = build->exact;
365
366 /* Guess the number of components the destination temporary should have
367 * based on our input sizes, if it's not fixed for the op.
368 */
369 unsigned num_components = op_info->output_size;
370 if (num_components == 0) {
371 for (unsigned i = 0; i < op_info->num_inputs; i++) {
372 if (op_info->input_sizes[i] == 0)
373 num_components = MAX2(num_components,
374 instr->src[i].src.ssa->num_components);
375 }
376 }
377 assert(num_components != 0);
378
379 /* Figure out the bitwidth based on the source bitwidth if the instruction
380 * is variable-width.
381 */
382 unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
383 if (bit_size == 0) {
384 for (unsigned i = 0; i < op_info->num_inputs; i++) {
385 unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
386 if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
387 if (bit_size)
388 assert(src_bit_size == bit_size);
389 else
390 bit_size = src_bit_size;
391 } else {
392 assert(src_bit_size ==
393 nir_alu_type_get_type_size(op_info->input_types[i]));
394 }
395 }
396 }
397
398 /* When in doubt, assume 32. */
399 if (bit_size == 0)
400 bit_size = 32;
401
402 /* Make sure we don't swizzle from outside of our source vector (like if a
403 * scalar value was passed into a multiply with a vector).
404 */
405 for (unsigned i = 0; i < op_info->num_inputs; i++) {
406 for (unsigned j = instr->src[i].src.ssa->num_components;
407 j < NIR_MAX_VEC_COMPONENTS; j++) {
408 instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
409 }
410 }
411
412 nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
413 bit_size, NULL);
414 instr->dest.write_mask = (1 << num_components) - 1;
415
416 nir_builder_instr_insert(build, &instr->instr);
417
418 return &instr->dest.dest.ssa;
419 }
420
421 static inline nir_ssa_def *
422 nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
423 nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
424 {
425 nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
426 if (!instr)
427 return NULL;
428
429 instr->src[0].src = nir_src_for_ssa(src0);
430 if (src1)
431 instr->src[1].src = nir_src_for_ssa(src1);
432 if (src2)
433 instr->src[2].src = nir_src_for_ssa(src2);
434 if (src3)
435 instr->src[3].src = nir_src_for_ssa(src3);
436
437 return nir_builder_alu_instr_finish_and_insert(build, instr);
438 }
439
440 /* for the couple special cases with more than 4 src args: */
441 static inline nir_ssa_def *
442 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
443 {
444 const nir_op_info *op_info = &nir_op_infos[op];
445 nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
446 if (!instr)
447 return NULL;
448
449 for (unsigned i = 0; i < op_info->num_inputs; i++)
450 instr->src[i].src = nir_src_for_ssa(srcs[i]);
451
452 return nir_builder_alu_instr_finish_and_insert(build, instr);
453 }
454
455 #include "nir_builder_opcodes.h"
456
457 static inline nir_ssa_def *
458 nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
459 {
460 return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
461 }
462
463 static inline nir_ssa_def *
464 nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
465 {
466 assert(!src.abs && !src.negate);
467 if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
468 bool any_swizzles = false;
469 for (unsigned i = 0; i < num_components; i++) {
470 if (src.swizzle[i] != i)
471 any_swizzles = true;
472 }
473 if (!any_swizzles)
474 return src.src.ssa;
475 }
476
477 nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
478 nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
479 nir_src_bit_size(src.src), NULL);
480 mov->exact = build->exact;
481 mov->dest.write_mask = (1 << num_components) - 1;
482 mov->src[0] = src;
483 nir_builder_instr_insert(build, &mov->instr);
484
485 return &mov->dest.dest.ssa;
486 }
487
488 /**
489 * Construct an fmov or imov that reswizzles the source's components.
490 */
491 static inline nir_ssa_def *
492 nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
493 unsigned num_components)
494 {
495 assert(num_components <= NIR_MAX_VEC_COMPONENTS);
496 nir_alu_src alu_src = { NIR_SRC_INIT };
497 alu_src.src = nir_src_for_ssa(src);
498
499 bool is_identity_swizzle = true;
500 for (unsigned i = 0; i < num_components && i < NIR_MAX_VEC_COMPONENTS; i++) {
501 if (swiz[i] != i)
502 is_identity_swizzle = false;
503 alu_src.swizzle[i] = swiz[i];
504 }
505
506 if (num_components == src->num_components && is_identity_swizzle)
507 return src;
508
509 return nir_mov_alu(build, alu_src, num_components);
510 }
511
512 /* Selects the right fdot given the number of components in each source. */
513 static inline nir_ssa_def *
514 nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
515 {
516 assert(src0->num_components == src1->num_components);
517 switch (src0->num_components) {
518 case 1: return nir_fmul(build, src0, src1);
519 case 2: return nir_fdot2(build, src0, src1);
520 case 3: return nir_fdot3(build, src0, src1);
521 case 4: return nir_fdot4(build, src0, src1);
522 case 8: return nir_fdot8(build, src0, src1);
523 case 16: return nir_fdot16(build, src0, src1);
524 default:
525 unreachable("bad component size");
526 }
527
528 return NULL;
529 }
530
531 static inline nir_ssa_def *
532 nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
533 {
534 switch (src0->num_components) {
535 case 1: return nir_ieq(b, src0, src1);
536 case 2: return nir_ball_iequal2(b, src0, src1);
537 case 3: return nir_ball_iequal3(b, src0, src1);
538 case 4: return nir_ball_iequal4(b, src0, src1);
539 case 8: return nir_ball_iequal8(b, src0, src1);
540 case 16: return nir_ball_iequal16(b, src0, src1);
541 default:
542 unreachable("bad component size");
543 }
544 }
545
546 static inline nir_ssa_def *
547 nir_ball(nir_builder *b, nir_ssa_def *src)
548 {
549 return nir_ball_iequal(b, src, nir_imm_true(b));
550 }
551
552 static inline nir_ssa_def *
553 nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
554 {
555 switch (src0->num_components) {
556 case 1: return nir_ine(b, src0, src1);
557 case 2: return nir_bany_inequal2(b, src0, src1);
558 case 3: return nir_bany_inequal3(b, src0, src1);
559 case 4: return nir_bany_inequal4(b, src0, src1);
560 case 8: return nir_bany_inequal8(b, src0, src1);
561 case 16: return nir_bany_inequal16(b, src0, src1);
562 default:
563 unreachable("bad component size");
564 }
565 }
566
567 static inline nir_ssa_def *
568 nir_bany(nir_builder *b, nir_ssa_def *src)
569 {
570 return nir_bany_inequal(b, src, nir_imm_false(b));
571 }
572
573 static inline nir_ssa_def *
574 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
575 {
576 return nir_swizzle(b, def, &c, 1);
577 }
578
579 static inline nir_ssa_def *
580 nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
581 {
582 unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
583
584 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
585 if ((mask & (1 << i)) == 0)
586 continue;
587 swizzle[num_channels++] = i;
588 }
589
590 return nir_swizzle(b, def, swizzle, num_channels);
591 }
592
593 static inline nir_ssa_def *
594 _nir_vector_extract_helper(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c,
595 unsigned start, unsigned end)
596 {
597 if (start == end - 1) {
598 return nir_channel(b, vec, start);
599 } else {
600 unsigned mid = start + (end - start) / 2;
601 return nir_bcsel(b, nir_ilt(b, c, nir_imm_intN_t(b, mid, c->bit_size)),
602 _nir_vector_extract_helper(b, vec, c, start, mid),
603 _nir_vector_extract_helper(b, vec, c, mid, end));
604 }
605 }
606
607 static inline nir_ssa_def *
608 nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
609 {
610 nir_src c_src = nir_src_for_ssa(c);
611 if (nir_src_is_const(c_src)) {
612 uint64_t c_const = nir_src_as_uint(c_src);
613 if (c_const < vec->num_components)
614 return nir_channel(b, vec, c_const);
615 else
616 return nir_ssa_undef(b, 1, vec->bit_size);
617 } else {
618 return _nir_vector_extract_helper(b, vec, c, 0, vec->num_components);
619 }
620 }
621
622 /** Replaces the component of `vec` specified by `c` with `scalar` */
623 static inline nir_ssa_def *
624 nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
625 nir_ssa_def *scalar, unsigned c)
626 {
627 assert(scalar->num_components == 1);
628 assert(c < vec->num_components);
629
630 nir_op vec_op = nir_op_vec(vec->num_components);
631 nir_alu_instr *vec_instr = nir_alu_instr_create(b->shader, vec_op);
632
633 for (unsigned i = 0; i < vec->num_components; i++) {
634 if (i == c) {
635 vec_instr->src[i].src = nir_src_for_ssa(scalar);
636 vec_instr->src[i].swizzle[0] = 0;
637 } else {
638 vec_instr->src[i].src = nir_src_for_ssa(vec);
639 vec_instr->src[i].swizzle[0] = i;
640 }
641 }
642
643 return nir_builder_alu_instr_finish_and_insert(b, vec_instr);
644 }
645
646 /** Replaces the component of `vec` specified by `c` with `scalar` */
647 static inline nir_ssa_def *
648 nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
649 nir_ssa_def *c)
650 {
651 assert(scalar->num_components == 1);
652 assert(c->num_components == 1);
653
654 nir_src c_src = nir_src_for_ssa(c);
655 if (nir_src_is_const(c_src)) {
656 uint64_t c_const = nir_src_as_uint(c_src);
657 if (c_const < vec->num_components)
658 return nir_vector_insert_imm(b, vec, scalar, c_const);
659 else
660 return vec;
661 } else {
662 nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
663 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
664 per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
665 nir_ssa_def *per_comp_idx =
666 nir_build_imm(b, vec->num_components,
667 c->bit_size, per_comp_idx_const);
668
669 /* nir_builder will automatically splat out scalars to vectors so an
670 * insert is as simple as "if I'm the channel, replace me with the
671 * scalar."
672 */
673 return nir_bcsel(b, nir_ieq(b, c, per_comp_idx), scalar, vec);
674 }
675 }
676
677 static inline nir_ssa_def *
678 nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
679 {
680 if (x->bit_size == dest_bit_size)
681 return x;
682
683 switch (dest_bit_size) {
684 case 64: return nir_i2i64(build, x);
685 case 32: return nir_i2i32(build, x);
686 case 16: return nir_i2i16(build, x);
687 case 8: return nir_i2i8(build, x);
688 default: unreachable("Invalid bit size");
689 }
690 }
691
692 static inline nir_ssa_def *
693 nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
694 {
695 if (x->bit_size == dest_bit_size)
696 return x;
697
698 switch (dest_bit_size) {
699 case 64: return nir_u2u64(build, x);
700 case 32: return nir_u2u32(build, x);
701 case 16: return nir_u2u16(build, x);
702 case 8: return nir_u2u8(build, x);
703 default: unreachable("Invalid bit size");
704 }
705 }
706
707 static inline nir_ssa_def *
708 nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
709 {
710 assert(x->bit_size <= 64);
711 if (x->bit_size < 64)
712 y &= (1ull << x->bit_size) - 1;
713
714 if (y == 0) {
715 return x;
716 } else {
717 return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
718 }
719 }
720
721 static inline nir_ssa_def *
722 _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
723 {
724 assert(x->bit_size <= 64);
725 if (x->bit_size < 64)
726 y &= (1ull << x->bit_size) - 1;
727
728 if (y == 0) {
729 return nir_imm_intN_t(build, 0, x->bit_size);
730 } else if (y == 1) {
731 return x;
732 } else if (!build->shader->options->lower_bitops &&
733 util_is_power_of_two_or_zero64(y)) {
734 return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
735 } else if (amul) {
736 return nir_amul(build, x, nir_imm_intN_t(build, y, x->bit_size));
737 } else {
738 return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
739 }
740 }
741
742 static inline nir_ssa_def *
743 nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
744 {
745 return _nir_mul_imm(build, x, y, false);
746 }
747
748 static inline nir_ssa_def *
749 nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
750 {
751 return _nir_mul_imm(build, x, y, true);
752 }
753
754 static inline nir_ssa_def *
755 nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
756 {
757 return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
758 }
759
760 static inline nir_ssa_def *
761 nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
762 {
763 return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
764 }
765
766 static inline nir_ssa_def *
767 nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
768 {
769 y &= BITFIELD64_MASK(x->bit_size);
770
771 if (y == 0) {
772 return nir_imm_intN_t(build, 0, x->bit_size);
773 } else if (y == BITFIELD64_MASK(x->bit_size)) {
774 return x;
775 } else {
776 return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
777 }
778 }
779
780 static inline nir_ssa_def *
781 nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
782 {
783 assert(src->num_components * src->bit_size == dest_bit_size);
784
785 switch (dest_bit_size) {
786 case 64:
787 switch (src->bit_size) {
788 case 32: return nir_pack_64_2x32(b, src);
789 case 16: return nir_pack_64_4x16(b, src);
790 default: break;
791 }
792 break;
793
794 case 32:
795 if (src->bit_size == 16)
796 return nir_pack_32_2x16(b, src);
797 break;
798
799 default:
800 break;
801 }
802
803 /* If we got here, we have no dedicated unpack opcode. */
804 nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
805 for (unsigned i = 0; i < src->num_components; i++) {
806 nir_ssa_def *val = nir_u2u(b, nir_channel(b, src, i), dest_bit_size);
807 val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
808 dest = nir_ior(b, dest, val);
809 }
810 return dest;
811 }
812
813 static inline nir_ssa_def *
814 nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
815 {
816 assert(src->num_components == 1);
817 assert(src->bit_size > dest_bit_size);
818 const unsigned dest_num_components = src->bit_size / dest_bit_size;
819 assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
820
821 switch (src->bit_size) {
822 case 64:
823 switch (dest_bit_size) {
824 case 32: return nir_unpack_64_2x32(b, src);
825 case 16: return nir_unpack_64_4x16(b, src);
826 default: break;
827 }
828 break;
829
830 case 32:
831 if (dest_bit_size == 16)
832 return nir_unpack_32_2x16(b, src);
833 break;
834
835 default:
836 break;
837 }
838
839 /* If we got here, we have no dedicated unpack opcode. */
840 nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
841 for (unsigned i = 0; i < dest_num_components; i++) {
842 nir_ssa_def *val = nir_ushr(b, src, nir_imm_int(b, i * dest_bit_size));
843 dest_comps[i] = nir_u2u(b, val, dest_bit_size);
844 }
845 return nir_vec(b, dest_comps, dest_num_components);
846 }
847
848 /**
849 * Treats srcs as if it's one big blob of bits and extracts the range of bits
850 * given by
851 *
852 * [first_bit, first_bit + dest_num_components * dest_bit_size)
853 *
854 * The range can have any alignment or size as long as it's an integer number
855 * of destination components and fits inside the concatenated sources.
856 *
857 * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
858 * values are involved because that would require pack/unpack to/from a vec8
859 * which NIR currently does not support.
860 */
861 static inline nir_ssa_def *
862 nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
863 unsigned first_bit,
864 unsigned dest_num_components, unsigned dest_bit_size)
865 {
866 const unsigned num_bits = dest_num_components * dest_bit_size;
867
868 /* Figure out the common bit size */
869 unsigned common_bit_size = dest_bit_size;
870 for (unsigned i = 0; i < num_srcs; i++)
871 common_bit_size = MIN2(common_bit_size, srcs[i]->bit_size);
872 if (first_bit > 0)
873 common_bit_size = MIN2(common_bit_size, (1u << (ffs(first_bit) - 1)));
874
875 /* We don't want to have to deal with 1-bit values */
876 assert(common_bit_size >= 8);
877
878 nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
879 assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
880
881 /* First, unpack to the common bit size and select the components from the
882 * source.
883 */
884 int src_idx = -1;
885 unsigned src_start_bit = 0;
886 unsigned src_end_bit = 0;
887 for (unsigned i = 0; i < num_bits / common_bit_size; i++) {
888 const unsigned bit = first_bit + (i * common_bit_size);
889 while (bit >= src_end_bit) {
890 src_idx++;
891 assert(src_idx < (int) num_srcs);
892 src_start_bit = src_end_bit;
893 src_end_bit += srcs[src_idx]->bit_size *
894 srcs[src_idx]->num_components;
895 }
896 assert(bit >= src_start_bit);
897 assert(bit + common_bit_size <= src_end_bit);
898 const unsigned rel_bit = bit - src_start_bit;
899 const unsigned src_bit_size = srcs[src_idx]->bit_size;
900
901 nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
902 rel_bit / src_bit_size);
903 if (srcs[src_idx]->bit_size > common_bit_size) {
904 nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
905 comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) /
906 common_bit_size);
907 }
908 common_comps[i] = comp;
909 }
910
911 /* Now, re-pack the destination if we have to */
912 if (dest_bit_size > common_bit_size) {
913 unsigned common_per_dest = dest_bit_size / common_bit_size;
914 nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
915 for (unsigned i = 0; i < dest_num_components; i++) {
916 nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
917 common_per_dest);
918 dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
919 }
920 return nir_vec(b, dest_comps, dest_num_components);
921 } else {
922 assert(dest_bit_size == common_bit_size);
923 return nir_vec(b, common_comps, dest_num_components);
924 }
925 }
926
927 static inline nir_ssa_def *
928 nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
929 {
930 assert((src->bit_size * src->num_components) % dest_bit_size == 0);
931 const unsigned dest_num_components =
932 (src->bit_size * src->num_components) / dest_bit_size;
933 assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
934
935 return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
936 }
937
938 /**
939 * Turns a nir_src into a nir_ssa_def * so it can be passed to
940 * nir_build_alu()-based builder calls.
941 *
942 * See nir_ssa_for_alu_src() for alu instructions.
943 */
944 static inline nir_ssa_def *
945 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
946 {
947 if (src.is_ssa && src.ssa->num_components == num_components)
948 return src.ssa;
949
950 nir_alu_src alu = { NIR_SRC_INIT };
951 alu.src = src;
952 for (int j = 0; j < 4; j++)
953 alu.swizzle[j] = j;
954
955 return nir_mov_alu(build, alu, num_components);
956 }
957
958 /**
959 * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
960 * nir_alu_src's swizzle.
961 */
962 static inline nir_ssa_def *
963 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
964 {
965 static uint8_t trivial_swizzle[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
966 STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle) == NIR_MAX_VEC_COMPONENTS);
967
968 nir_alu_src *src = &instr->src[srcn];
969 unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
970
971 if (src->src.is_ssa && (src->src.ssa->num_components == num_components) &&
972 !src->abs && !src->negate &&
973 (memcmp(src->swizzle, trivial_swizzle, num_components) == 0))
974 return src->src.ssa;
975
976 return nir_mov_alu(build, *src, num_components);
977 }
978
979 static inline unsigned
980 nir_get_ptr_bitsize(nir_shader *shader)
981 {
982 if (shader->info.stage == MESA_SHADER_KERNEL)
983 return shader->info.cs.ptr_size;
984 return 32;
985 }
986
987 static inline nir_deref_instr *
988 nir_build_deref_var(nir_builder *build, nir_variable *var)
989 {
990 nir_deref_instr *deref =
991 nir_deref_instr_create(build->shader, nir_deref_type_var);
992
993 deref->mode = (nir_variable_mode)var->data.mode;
994 deref->type = var->type;
995 deref->var = var;
996
997 nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
998 nir_get_ptr_bitsize(build->shader), NULL);
999
1000 nir_builder_instr_insert(build, &deref->instr);
1001
1002 return deref;
1003 }
1004
1005 static inline nir_deref_instr *
1006 nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
1007 nir_ssa_def *index)
1008 {
1009 assert(glsl_type_is_array(parent->type) ||
1010 glsl_type_is_matrix(parent->type) ||
1011 glsl_type_is_vector(parent->type));
1012
1013 assert(index->bit_size == parent->dest.ssa.bit_size);
1014
1015 nir_deref_instr *deref =
1016 nir_deref_instr_create(build->shader, nir_deref_type_array);
1017
1018 deref->mode = parent->mode;
1019 deref->type = glsl_get_array_element(parent->type);
1020 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1021 deref->arr.index = nir_src_for_ssa(index);
1022
1023 nir_ssa_dest_init(&deref->instr, &deref->dest,
1024 parent->dest.ssa.num_components,
1025 parent->dest.ssa.bit_size, NULL);
1026
1027 nir_builder_instr_insert(build, &deref->instr);
1028
1029 return deref;
1030 }
1031
1032 static inline nir_deref_instr *
1033 nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
1034 int64_t index)
1035 {
1036 assert(parent->dest.is_ssa);
1037 nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
1038 parent->dest.ssa.bit_size);
1039
1040 return nir_build_deref_array(build, parent, idx_ssa);
1041 }
1042
1043 static inline nir_deref_instr *
1044 nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
1045 nir_ssa_def *index)
1046 {
1047 assert(parent->deref_type == nir_deref_type_array ||
1048 parent->deref_type == nir_deref_type_ptr_as_array ||
1049 parent->deref_type == nir_deref_type_cast);
1050
1051 assert(index->bit_size == parent->dest.ssa.bit_size);
1052
1053 nir_deref_instr *deref =
1054 nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
1055
1056 deref->mode = parent->mode;
1057 deref->type = parent->type;
1058 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1059 deref->arr.index = nir_src_for_ssa(index);
1060
1061 nir_ssa_dest_init(&deref->instr, &deref->dest,
1062 parent->dest.ssa.num_components,
1063 parent->dest.ssa.bit_size, NULL);
1064
1065 nir_builder_instr_insert(build, &deref->instr);
1066
1067 return deref;
1068 }
1069
1070 static inline nir_deref_instr *
1071 nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
1072 {
1073 assert(glsl_type_is_array(parent->type) ||
1074 glsl_type_is_matrix(parent->type));
1075
1076 nir_deref_instr *deref =
1077 nir_deref_instr_create(build->shader, nir_deref_type_array_wildcard);
1078
1079 deref->mode = parent->mode;
1080 deref->type = glsl_get_array_element(parent->type);
1081 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1082
1083 nir_ssa_dest_init(&deref->instr, &deref->dest,
1084 parent->dest.ssa.num_components,
1085 parent->dest.ssa.bit_size, NULL);
1086
1087 nir_builder_instr_insert(build, &deref->instr);
1088
1089 return deref;
1090 }
1091
1092 static inline nir_deref_instr *
1093 nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
1094 unsigned index)
1095 {
1096 assert(glsl_type_is_struct_or_ifc(parent->type));
1097
1098 nir_deref_instr *deref =
1099 nir_deref_instr_create(build->shader, nir_deref_type_struct);
1100
1101 deref->mode = parent->mode;
1102 deref->type = glsl_get_struct_field(parent->type, index);
1103 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1104 deref->strct.index = index;
1105
1106 nir_ssa_dest_init(&deref->instr, &deref->dest,
1107 parent->dest.ssa.num_components,
1108 parent->dest.ssa.bit_size, NULL);
1109
1110 nir_builder_instr_insert(build, &deref->instr);
1111
1112 return deref;
1113 }
1114
1115 static inline nir_deref_instr *
1116 nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
1117 nir_variable_mode mode, const struct glsl_type *type,
1118 unsigned ptr_stride)
1119 {
1120 nir_deref_instr *deref =
1121 nir_deref_instr_create(build->shader, nir_deref_type_cast);
1122
1123 deref->mode = mode;
1124 deref->type = type;
1125 deref->parent = nir_src_for_ssa(parent);
1126 deref->cast.ptr_stride = ptr_stride;
1127
1128 nir_ssa_dest_init(&deref->instr, &deref->dest,
1129 parent->num_components, parent->bit_size, NULL);
1130
1131 nir_builder_instr_insert(build, &deref->instr);
1132
1133 return deref;
1134 }
1135
1136 /** Returns a deref that follows another but starting from the given parent
1137 *
1138 * The new deref will be the same type and take the same array or struct index
1139 * as the leader deref but it may have a different parent. This is very
1140 * useful for walking deref paths.
1141 */
1142 static inline nir_deref_instr *
1143 nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
1144 nir_deref_instr *leader)
1145 {
1146 /* If the derefs would have the same parent, don't make a new one */
1147 assert(leader->parent.is_ssa);
1148 if (leader->parent.ssa == &parent->dest.ssa)
1149 return leader;
1150
1151 UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
1152
1153 switch (leader->deref_type) {
1154 case nir_deref_type_var:
1155 unreachable("A var dereference cannot have a parent");
1156 break;
1157
1158 case nir_deref_type_array:
1159 case nir_deref_type_array_wildcard:
1160 assert(glsl_type_is_matrix(parent->type) ||
1161 glsl_type_is_array(parent->type) ||
1162 (leader->deref_type == nir_deref_type_array &&
1163 glsl_type_is_vector(parent->type)));
1164 assert(glsl_get_length(parent->type) ==
1165 glsl_get_length(leader_parent->type));
1166
1167 if (leader->deref_type == nir_deref_type_array) {
1168 assert(leader->arr.index.is_ssa);
1169 nir_ssa_def *index = nir_i2i(b, leader->arr.index.ssa,
1170 parent->dest.ssa.bit_size);
1171 return nir_build_deref_array(b, parent, index);
1172 } else {
1173 return nir_build_deref_array_wildcard(b, parent);
1174 }
1175
1176 case nir_deref_type_struct:
1177 assert(glsl_type_is_struct_or_ifc(parent->type));
1178 assert(glsl_get_length(parent->type) ==
1179 glsl_get_length(leader_parent->type));
1180
1181 return nir_build_deref_struct(b, parent, leader->strct.index);
1182
1183 default:
1184 unreachable("Invalid deref instruction type");
1185 }
1186 }
1187
1188 static inline nir_ssa_def *
1189 nir_load_reg(nir_builder *build, nir_register *reg)
1190 {
1191 return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
1192 }
1193
1194 static inline void
1195 nir_store_reg(nir_builder *build, nir_register *reg,
1196 nir_ssa_def *def, nir_component_mask_t write_mask)
1197 {
1198 assert(reg->num_components == def->num_components);
1199 assert(reg->bit_size == def->bit_size);
1200
1201 nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
1202 mov->src[0].src = nir_src_for_ssa(def);
1203 mov->dest.dest = nir_dest_for_reg(reg);
1204 mov->dest.write_mask = write_mask & BITFIELD_MASK(reg->num_components);
1205 nir_builder_instr_insert(build, &mov->instr);
1206 }
1207
1208 static inline nir_ssa_def *
1209 nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1210 enum gl_access_qualifier access)
1211 {
1212 nir_intrinsic_instr *load =
1213 nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref);
1214 load->num_components = glsl_get_vector_elements(deref->type);
1215 load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1216 nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
1217 glsl_get_bit_size(deref->type), NULL);
1218 nir_intrinsic_set_access(load, access);
1219 nir_builder_instr_insert(build, &load->instr);
1220 return &load->dest.ssa;
1221 }
1222
1223 static inline nir_ssa_def *
1224 nir_load_deref(nir_builder *build, nir_deref_instr *deref)
1225 {
1226 return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
1227 }
1228
1229 static inline void
1230 nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1231 nir_ssa_def *value, unsigned writemask,
1232 enum gl_access_qualifier access)
1233 {
1234 nir_intrinsic_instr *store =
1235 nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref);
1236 store->num_components = glsl_get_vector_elements(deref->type);
1237 store->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1238 store->src[1] = nir_src_for_ssa(value);
1239 nir_intrinsic_set_write_mask(store,
1240 writemask & ((1 << store->num_components) - 1));
1241 nir_intrinsic_set_access(store, access);
1242 nir_builder_instr_insert(build, &store->instr);
1243 }
1244
1245 static inline void
1246 nir_store_deref(nir_builder *build, nir_deref_instr *deref,
1247 nir_ssa_def *value, unsigned writemask)
1248 {
1249 nir_store_deref_with_access(build, deref, value, writemask,
1250 (enum gl_access_qualifier)0);
1251 }
1252
1253 static inline void
1254 nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1255 nir_deref_instr *src,
1256 enum gl_access_qualifier dest_access,
1257 enum gl_access_qualifier src_access)
1258 {
1259 nir_intrinsic_instr *copy =
1260 nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_deref);
1261 copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
1262 copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
1263 nir_intrinsic_set_dst_access(copy, dest_access);
1264 nir_intrinsic_set_src_access(copy, src_access);
1265 nir_builder_instr_insert(build, &copy->instr);
1266 }
1267
1268 static inline void
1269 nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
1270 {
1271 nir_copy_deref_with_access(build, dest, src,
1272 (enum gl_access_qualifier) 0,
1273 (enum gl_access_qualifier) 0);
1274 }
1275
1276 static inline nir_ssa_def *
1277 nir_load_var(nir_builder *build, nir_variable *var)
1278 {
1279 return nir_load_deref(build, nir_build_deref_var(build, var));
1280 }
1281
1282 static inline void
1283 nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
1284 unsigned writemask)
1285 {
1286 nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
1287 }
1288
1289 static inline void
1290 nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
1291 {
1292 nir_copy_deref(build, nir_build_deref_var(build, dest),
1293 nir_build_deref_var(build, src));
1294 }
1295
1296 static inline nir_ssa_def *
1297 nir_load_param(nir_builder *build, uint32_t param_idx)
1298 {
1299 assert(param_idx < build->impl->function->num_params);
1300 nir_parameter *param = &build->impl->function->params[param_idx];
1301
1302 nir_intrinsic_instr *load =
1303 nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_param);
1304 nir_intrinsic_set_param_idx(load, param_idx);
1305 load->num_components = param->num_components;
1306 nir_ssa_dest_init(&load->instr, &load->dest,
1307 param->num_components, param->bit_size, NULL);
1308 nir_builder_instr_insert(build, &load->instr);
1309 return &load->dest.ssa;
1310 }
1311
1312 #include "nir_builder_opcodes.h"
1313
1314 static inline nir_ssa_def *
1315 nir_f2b(nir_builder *build, nir_ssa_def *f)
1316 {
1317 return nir_f2b1(build, f);
1318 }
1319
1320 static inline nir_ssa_def *
1321 nir_i2b(nir_builder *build, nir_ssa_def *i)
1322 {
1323 return nir_i2b1(build, i);
1324 }
1325
1326 static inline nir_ssa_def *
1327 nir_b2f(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1328 {
1329 switch (bit_size) {
1330 case 64: return nir_b2f64(build, b);
1331 case 32: return nir_b2f32(build, b);
1332 case 16: return nir_b2f16(build, b);
1333 default:
1334 unreachable("Invalid bit-size");
1335 };
1336 }
1337
1338 static inline nir_ssa_def *
1339 nir_b2i(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1340 {
1341 switch (bit_size) {
1342 case 64: return nir_b2i64(build, b);
1343 case 32: return nir_b2i32(build, b);
1344 case 16: return nir_b2i16(build, b);
1345 case 8: return nir_b2i8(build, b);
1346 default:
1347 unreachable("Invalid bit-size");
1348 };
1349 }
1350 static inline nir_ssa_def *
1351 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
1352 unsigned interp_mode)
1353 {
1354 unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
1355 nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
1356 nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
1357 nir_intrinsic_set_interp_mode(bary, interp_mode);
1358 nir_builder_instr_insert(build, &bary->instr);
1359 return &bary->dest.ssa;
1360 }
1361
1362 static inline void
1363 nir_jump(nir_builder *build, nir_jump_type jump_type)
1364 {
1365 assert(jump_type != nir_jump_goto && jump_type != nir_jump_goto_if);
1366 nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
1367 nir_builder_instr_insert(build, &jump->instr);
1368 }
1369
1370 static inline void
1371 nir_goto(nir_builder *build, struct nir_block *target)
1372 {
1373 assert(!build->impl->structured);
1374 nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto);
1375 jump->target = target;
1376 nir_builder_instr_insert(build, &jump->instr);
1377 }
1378
1379 static inline void
1380 nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
1381 struct nir_block *else_target)
1382 {
1383 assert(!build->impl->structured);
1384 nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
1385 jump->condition = cond;
1386 jump->target = target;
1387 jump->else_target = else_target;
1388 nir_builder_instr_insert(build, &jump->instr);
1389 }
1390
1391 static inline nir_ssa_def *
1392 nir_compare_func(nir_builder *b, enum compare_func func,
1393 nir_ssa_def *src0, nir_ssa_def *src1)
1394 {
1395 switch (func) {
1396 case COMPARE_FUNC_NEVER:
1397 return nir_imm_int(b, 0);
1398 case COMPARE_FUNC_ALWAYS:
1399 return nir_imm_int(b, ~0);
1400 case COMPARE_FUNC_EQUAL:
1401 return nir_feq(b, src0, src1);
1402 case COMPARE_FUNC_NOTEQUAL:
1403 return nir_fne(b, src0, src1);
1404 case COMPARE_FUNC_GREATER:
1405 return nir_flt(b, src1, src0);
1406 case COMPARE_FUNC_GEQUAL:
1407 return nir_fge(b, src0, src1);
1408 case COMPARE_FUNC_LESS:
1409 return nir_flt(b, src0, src1);
1410 case COMPARE_FUNC_LEQUAL:
1411 return nir_fge(b, src1, src0);
1412 }
1413 unreachable("bad compare func");
1414 }
1415
1416 static inline void
1417 nir_scoped_barrier(nir_builder *b,
1418 nir_scope exec_scope,
1419 nir_scope mem_scope,
1420 nir_memory_semantics mem_semantics,
1421 nir_variable_mode mem_modes)
1422 {
1423 nir_intrinsic_instr *intrin =
1424 nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_barrier);
1425 nir_intrinsic_set_execution_scope(intrin, exec_scope);
1426 nir_intrinsic_set_memory_scope(intrin, mem_scope);
1427 nir_intrinsic_set_memory_semantics(intrin, mem_semantics);
1428 nir_intrinsic_set_memory_modes(intrin, mem_modes);
1429 nir_builder_instr_insert(b, &intrin->instr);
1430 }
1431
1432 static inline void
1433 nir_scoped_memory_barrier(nir_builder *b,
1434 nir_scope scope,
1435 nir_memory_semantics semantics,
1436 nir_variable_mode modes)
1437 {
1438 nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
1439 }
1440
1441 static inline nir_ssa_def *
1442 nir_convert_to_bit_size(nir_builder *b,
1443 nir_ssa_def *src,
1444 nir_alu_type type,
1445 unsigned bit_size)
1446 {
1447 nir_alu_type base_type = nir_alu_type_get_base_type(type);
1448 nir_alu_type dst_type = (nir_alu_type)(bit_size | base_type);
1449
1450 nir_op opcode =
1451 nir_type_conversion_op(type, dst_type, nir_rounding_mode_undef);
1452
1453 return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
1454 }
1455
1456 static inline nir_ssa_def *
1457 nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1458 {
1459 return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
1460 }
1461
1462 static inline nir_ssa_def *
1463 nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1464 {
1465 return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
1466 }
1467
1468 static inline nir_ssa_def *
1469 nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1470 {
1471 return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
1472 }
1473
1474 static inline nir_ssa_def *
1475 nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
1476 {
1477 return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
1478 }
1479
1480 #endif /* NIR_BUILDER_H */