nir+vtn: vec8+vec16 support
[mesa.git] / src / compiler / nir / nir_builder.h
1 /*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef NIR_BUILDER_H
25 #define NIR_BUILDER_H
26
27 #include "nir_control_flow.h"
28 #include "util/bitscan.h"
29 #include "util/half_float.h"
30
31 struct exec_list;
32
33 typedef struct nir_builder {
34 nir_cursor cursor;
35
36 /* Whether new ALU instructions will be marked "exact" */
37 bool exact;
38
39 nir_shader *shader;
40 nir_function_impl *impl;
41 } nir_builder;
42
43 static inline void
44 nir_builder_init(nir_builder *build, nir_function_impl *impl)
45 {
46 memset(build, 0, sizeof(*build));
47 build->exact = false;
48 build->impl = impl;
49 build->shader = impl->function->shader;
50 }
51
52 static inline void
53 nir_builder_init_simple_shader(nir_builder *build, void *mem_ctx,
54 gl_shader_stage stage,
55 const nir_shader_compiler_options *options)
56 {
57 build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
58 nir_function *func = nir_function_create(build->shader, "main");
59 func->is_entrypoint = true;
60 build->exact = false;
61 build->impl = nir_function_impl_create(func);
62 build->cursor = nir_after_cf_list(&build->impl->body);
63 }
64
65 static inline void
66 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
67 {
68 nir_instr_insert(build->cursor, instr);
69
70 /* Move the cursor forward. */
71 build->cursor = nir_after_instr(instr);
72 }
73
74 static inline nir_instr *
75 nir_builder_last_instr(nir_builder *build)
76 {
77 assert(build->cursor.option == nir_cursor_after_instr);
78 return build->cursor.instr;
79 }
80
81 static inline void
82 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
83 {
84 nir_cf_node_insert(build->cursor, cf);
85 }
86
87 static inline bool
88 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
89 {
90 nir_block *block = nir_cursor_current_block(build->cursor);
91 for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
92 if (n == cf_node)
93 return true;
94 }
95 return false;
96 }
97
98 static inline nir_if *
99 nir_push_if(nir_builder *build, nir_ssa_def *condition)
100 {
101 nir_if *nif = nir_if_create(build->shader);
102 nif->condition = nir_src_for_ssa(condition);
103 nir_builder_cf_insert(build, &nif->cf_node);
104 build->cursor = nir_before_cf_list(&nif->then_list);
105 return nif;
106 }
107
108 static inline nir_if *
109 nir_push_else(nir_builder *build, nir_if *nif)
110 {
111 if (nif) {
112 assert(nir_builder_is_inside_cf(build, &nif->cf_node));
113 } else {
114 nir_block *block = nir_cursor_current_block(build->cursor);
115 nif = nir_cf_node_as_if(block->cf_node.parent);
116 }
117 build->cursor = nir_before_cf_list(&nif->else_list);
118 return nif;
119 }
120
121 static inline void
122 nir_pop_if(nir_builder *build, nir_if *nif)
123 {
124 if (nif) {
125 assert(nir_builder_is_inside_cf(build, &nif->cf_node));
126 } else {
127 nir_block *block = nir_cursor_current_block(build->cursor);
128 nif = nir_cf_node_as_if(block->cf_node.parent);
129 }
130 build->cursor = nir_after_cf_node(&nif->cf_node);
131 }
132
133 static inline nir_ssa_def *
134 nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
135 {
136 nir_block *block = nir_cursor_current_block(build->cursor);
137 nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
138
139 nir_phi_instr *phi = nir_phi_instr_create(build->shader);
140
141 nir_phi_src *src = ralloc(phi, nir_phi_src);
142 src->pred = nir_if_last_then_block(nif);
143 src->src = nir_src_for_ssa(then_def);
144 exec_list_push_tail(&phi->srcs, &src->node);
145
146 src = ralloc(phi, nir_phi_src);
147 src->pred = nir_if_last_else_block(nif);
148 src->src = nir_src_for_ssa(else_def);
149 exec_list_push_tail(&phi->srcs, &src->node);
150
151 assert(then_def->num_components == else_def->num_components);
152 assert(then_def->bit_size == else_def->bit_size);
153 nir_ssa_dest_init(&phi->instr, &phi->dest,
154 then_def->num_components, then_def->bit_size, NULL);
155
156 nir_builder_instr_insert(build, &phi->instr);
157
158 return &phi->dest.ssa;
159 }
160
161 static inline nir_loop *
162 nir_push_loop(nir_builder *build)
163 {
164 nir_loop *loop = nir_loop_create(build->shader);
165 nir_builder_cf_insert(build, &loop->cf_node);
166 build->cursor = nir_before_cf_list(&loop->body);
167 return loop;
168 }
169
170 static inline void
171 nir_pop_loop(nir_builder *build, nir_loop *loop)
172 {
173 if (loop) {
174 assert(nir_builder_is_inside_cf(build, &loop->cf_node));
175 } else {
176 nir_block *block = nir_cursor_current_block(build->cursor);
177 loop = nir_cf_node_as_loop(block->cf_node.parent);
178 }
179 build->cursor = nir_after_cf_node(&loop->cf_node);
180 }
181
182 static inline nir_ssa_def *
183 nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
184 {
185 nir_ssa_undef_instr *undef =
186 nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
187 if (!undef)
188 return NULL;
189
190 nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
191
192 return &undef->def;
193 }
194
195 static inline nir_ssa_def *
196 nir_build_imm(nir_builder *build, unsigned num_components,
197 unsigned bit_size, const nir_const_value *value)
198 {
199 nir_load_const_instr *load_const =
200 nir_load_const_instr_create(build->shader, num_components, bit_size);
201 if (!load_const)
202 return NULL;
203
204 memcpy(load_const->value, value, sizeof(nir_const_value) * num_components);
205
206 nir_builder_instr_insert(build, &load_const->instr);
207
208 return &load_const->def;
209 }
210
211 static inline nir_ssa_def *
212 nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
213 {
214 nir_load_const_instr *load_const =
215 nir_load_const_instr_create(build->shader, num_components, bit_size);
216
217 /* nir_load_const_instr_create uses rzalloc so it's already zero */
218
219 nir_builder_instr_insert(build, &load_const->instr);
220
221 return &load_const->def;
222 }
223
224 static inline nir_ssa_def *
225 nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
226 {
227 nir_const_value v = nir_const_value_for_bool(x, bit_size);
228 return nir_build_imm(build, 1, bit_size, &v);
229 }
230
231 static inline nir_ssa_def *
232 nir_imm_bool(nir_builder *build, bool x)
233 {
234 return nir_imm_boolN_t(build, x, 1);
235 }
236
237 static inline nir_ssa_def *
238 nir_imm_true(nir_builder *build)
239 {
240 return nir_imm_bool(build, true);
241 }
242
243 static inline nir_ssa_def *
244 nir_imm_false(nir_builder *build)
245 {
246 return nir_imm_bool(build, false);
247 }
248
249 static inline nir_ssa_def *
250 nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
251 {
252 nir_const_value v = nir_const_value_for_float(x, bit_size);
253 return nir_build_imm(build, 1, bit_size, &v);
254 }
255
256 static inline nir_ssa_def *
257 nir_imm_float16(nir_builder *build, float x)
258 {
259 return nir_imm_floatN_t(build, x, 16);
260 }
261
262 static inline nir_ssa_def *
263 nir_imm_float(nir_builder *build, float x)
264 {
265 return nir_imm_floatN_t(build, x, 32);
266 }
267
268 static inline nir_ssa_def *
269 nir_imm_double(nir_builder *build, double x)
270 {
271 return nir_imm_floatN_t(build, x, 64);
272 }
273
274 static inline nir_ssa_def *
275 nir_imm_vec2(nir_builder *build, float x, float y)
276 {
277 nir_const_value v[2] = {
278 nir_const_value_for_float(x, 32),
279 nir_const_value_for_float(y, 32),
280 };
281 return nir_build_imm(build, 2, 32, v);
282 }
283
284 static inline nir_ssa_def *
285 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
286 {
287 nir_const_value v[4] = {
288 nir_const_value_for_float(x, 32),
289 nir_const_value_for_float(y, 32),
290 nir_const_value_for_float(z, 32),
291 nir_const_value_for_float(w, 32),
292 };
293
294 return nir_build_imm(build, 4, 32, v);
295 }
296
297 static inline nir_ssa_def *
298 nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
299 {
300 nir_const_value v[4] = {
301 nir_const_value_for_float(x, 16),
302 nir_const_value_for_float(y, 16),
303 nir_const_value_for_float(z, 16),
304 nir_const_value_for_float(w, 16),
305 };
306
307 return nir_build_imm(build, 4, 16, v);
308 }
309
310 static inline nir_ssa_def *
311 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
312 {
313 nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
314 return nir_build_imm(build, 1, bit_size, &v);
315 }
316
317 static inline nir_ssa_def *
318 nir_imm_int(nir_builder *build, int x)
319 {
320 return nir_imm_intN_t(build, x, 32);
321 }
322
323 static inline nir_ssa_def *
324 nir_imm_int64(nir_builder *build, int64_t x)
325 {
326 return nir_imm_intN_t(build, x, 64);
327 }
328
329 static inline nir_ssa_def *
330 nir_imm_ivec2(nir_builder *build, int x, int y)
331 {
332 nir_const_value v[2] = {
333 nir_const_value_for_int(x, 32),
334 nir_const_value_for_int(y, 32),
335 };
336
337 return nir_build_imm(build, 2, 32, v);
338 }
339
340 static inline nir_ssa_def *
341 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
342 {
343 nir_const_value v[4] = {
344 nir_const_value_for_int(x, 32),
345 nir_const_value_for_int(y, 32),
346 nir_const_value_for_int(z, 32),
347 nir_const_value_for_int(w, 32),
348 };
349
350 return nir_build_imm(build, 4, 32, v);
351 }
352
353 static inline nir_ssa_def *
354 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
355 {
356 const nir_op_info *op_info = &nir_op_infos[instr->op];
357
358 instr->exact = build->exact;
359
360 /* Guess the number of components the destination temporary should have
361 * based on our input sizes, if it's not fixed for the op.
362 */
363 unsigned num_components = op_info->output_size;
364 if (num_components == 0) {
365 for (unsigned i = 0; i < op_info->num_inputs; i++) {
366 if (op_info->input_sizes[i] == 0)
367 num_components = MAX2(num_components,
368 instr->src[i].src.ssa->num_components);
369 }
370 }
371 assert(num_components != 0);
372
373 /* Figure out the bitwidth based on the source bitwidth if the instruction
374 * is variable-width.
375 */
376 unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
377 if (bit_size == 0) {
378 for (unsigned i = 0; i < op_info->num_inputs; i++) {
379 unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
380 if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
381 if (bit_size)
382 assert(src_bit_size == bit_size);
383 else
384 bit_size = src_bit_size;
385 } else {
386 assert(src_bit_size ==
387 nir_alu_type_get_type_size(op_info->input_types[i]));
388 }
389 }
390 }
391
392 /* When in doubt, assume 32. */
393 if (bit_size == 0)
394 bit_size = 32;
395
396 /* Make sure we don't swizzle from outside of our source vector (like if a
397 * scalar value was passed into a multiply with a vector).
398 */
399 for (unsigned i = 0; i < op_info->num_inputs; i++) {
400 for (unsigned j = instr->src[i].src.ssa->num_components;
401 j < NIR_MAX_VEC_COMPONENTS; j++) {
402 instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
403 }
404 }
405
406 nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
407 bit_size, NULL);
408 instr->dest.write_mask = (1 << num_components) - 1;
409
410 nir_builder_instr_insert(build, &instr->instr);
411
412 return &instr->dest.dest.ssa;
413 }
414
415 static inline nir_ssa_def *
416 nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
417 nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
418 {
419 nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
420 if (!instr)
421 return NULL;
422
423 instr->src[0].src = nir_src_for_ssa(src0);
424 if (src1)
425 instr->src[1].src = nir_src_for_ssa(src1);
426 if (src2)
427 instr->src[2].src = nir_src_for_ssa(src2);
428 if (src3)
429 instr->src[3].src = nir_src_for_ssa(src3);
430
431 return nir_builder_alu_instr_finish_and_insert(build, instr);
432 }
433
434 /* for the couple special cases with more than 4 src args: */
435 static inline nir_ssa_def *
436 nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
437 {
438 const nir_op_info *op_info = &nir_op_infos[op];
439 nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
440 if (!instr)
441 return NULL;
442
443 for (unsigned i = 0; i < op_info->num_inputs; i++)
444 instr->src[i].src = nir_src_for_ssa(srcs[i]);
445
446 return nir_builder_alu_instr_finish_and_insert(build, instr);
447 }
448
449 #include "nir_builder_opcodes.h"
450
451 static inline nir_ssa_def *
452 nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
453 {
454 return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
455 }
456
457 static inline nir_ssa_def *
458 nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
459 {
460 assert(!src.abs && !src.negate);
461 if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
462 bool any_swizzles = false;
463 for (unsigned i = 0; i < num_components; i++) {
464 if (src.swizzle[i] != i)
465 any_swizzles = true;
466 }
467 if (!any_swizzles)
468 return src.src.ssa;
469 }
470
471 nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
472 nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
473 nir_src_bit_size(src.src), NULL);
474 mov->exact = build->exact;
475 mov->dest.write_mask = (1 << num_components) - 1;
476 mov->src[0] = src;
477 nir_builder_instr_insert(build, &mov->instr);
478
479 return &mov->dest.dest.ssa;
480 }
481
482 /**
483 * Construct an fmov or imov that reswizzles the source's components.
484 */
485 static inline nir_ssa_def *
486 nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
487 unsigned num_components)
488 {
489 assert(num_components <= NIR_MAX_VEC_COMPONENTS);
490 nir_alu_src alu_src = { NIR_SRC_INIT };
491 alu_src.src = nir_src_for_ssa(src);
492
493 bool is_identity_swizzle = true;
494 for (unsigned i = 0; i < num_components && i < NIR_MAX_VEC_COMPONENTS; i++) {
495 if (swiz[i] != i)
496 is_identity_swizzle = false;
497 alu_src.swizzle[i] = swiz[i];
498 }
499
500 if (num_components == src->num_components && is_identity_swizzle)
501 return src;
502
503 return nir_mov_alu(build, alu_src, num_components);
504 }
505
506 /* Selects the right fdot given the number of components in each source. */
507 static inline nir_ssa_def *
508 nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
509 {
510 assert(src0->num_components == src1->num_components);
511 switch (src0->num_components) {
512 case 1: return nir_fmul(build, src0, src1);
513 case 2: return nir_fdot2(build, src0, src1);
514 case 3: return nir_fdot3(build, src0, src1);
515 case 4: return nir_fdot4(build, src0, src1);
516 default:
517 unreachable("bad component size");
518 }
519
520 return NULL;
521 }
522
523 static inline nir_ssa_def *
524 nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
525 {
526 switch (src0->num_components) {
527 case 1: return nir_ieq(b, src0, src1);
528 case 2: return nir_ball_iequal2(b, src0, src1);
529 case 3: return nir_ball_iequal3(b, src0, src1);
530 case 4: return nir_ball_iequal4(b, src0, src1);
531 default:
532 unreachable("bad component size");
533 }
534 }
535
536 static inline nir_ssa_def *
537 nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
538 {
539 switch (src0->num_components) {
540 case 1: return nir_ine(b, src0, src1);
541 case 2: return nir_bany_inequal2(b, src0, src1);
542 case 3: return nir_bany_inequal3(b, src0, src1);
543 case 4: return nir_bany_inequal4(b, src0, src1);
544 default:
545 unreachable("bad component size");
546 }
547 }
548
549 static inline nir_ssa_def *
550 nir_bany(nir_builder *b, nir_ssa_def *src)
551 {
552 return nir_bany_inequal(b, src, nir_imm_false(b));
553 }
554
555 static inline nir_ssa_def *
556 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
557 {
558 return nir_swizzle(b, def, &c, 1);
559 }
560
561 static inline nir_ssa_def *
562 nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
563 {
564 unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
565
566 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
567 if ((mask & (1 << i)) == 0)
568 continue;
569 swizzle[num_channels++] = i;
570 }
571
572 return nir_swizzle(b, def, swizzle, num_channels);
573 }
574
575 static inline nir_ssa_def *
576 _nir_vector_extract_helper(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c,
577 unsigned start, unsigned end)
578 {
579 if (start == end - 1) {
580 return nir_channel(b, vec, start);
581 } else {
582 unsigned mid = start + (end - start) / 2;
583 return nir_bcsel(b, nir_ilt(b, c, nir_imm_int(b, mid)),
584 _nir_vector_extract_helper(b, vec, c, start, mid),
585 _nir_vector_extract_helper(b, vec, c, mid, end));
586 }
587 }
588
589 static inline nir_ssa_def *
590 nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
591 {
592 nir_src c_src = nir_src_for_ssa(c);
593 if (nir_src_is_const(c_src)) {
594 unsigned c_const = nir_src_as_uint(c_src);
595 if (c_const < vec->num_components)
596 return nir_channel(b, vec, c_const);
597 else
598 return nir_ssa_undef(b, 1, vec->bit_size);
599 } else {
600 return _nir_vector_extract_helper(b, vec, c, 0, vec->num_components);
601 }
602 }
603
604 static inline nir_ssa_def *
605 nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
606 {
607 if (x->bit_size == dest_bit_size)
608 return x;
609
610 switch (dest_bit_size) {
611 case 64: return nir_i2i64(build, x);
612 case 32: return nir_i2i32(build, x);
613 case 16: return nir_i2i16(build, x);
614 case 8: return nir_i2i8(build, x);
615 default: unreachable("Invalid bit size");
616 }
617 }
618
619 static inline nir_ssa_def *
620 nir_u2u(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
621 {
622 if (x->bit_size == dest_bit_size)
623 return x;
624
625 switch (dest_bit_size) {
626 case 64: return nir_u2u64(build, x);
627 case 32: return nir_u2u32(build, x);
628 case 16: return nir_u2u16(build, x);
629 case 8: return nir_u2u8(build, x);
630 default: unreachable("Invalid bit size");
631 }
632 }
633
634 static inline nir_ssa_def *
635 nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
636 {
637 assert(x->bit_size <= 64);
638 if (x->bit_size < 64)
639 y &= (1ull << x->bit_size) - 1;
640
641 if (y == 0) {
642 return x;
643 } else {
644 return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
645 }
646 }
647
648 static inline nir_ssa_def *
649 _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
650 {
651 assert(x->bit_size <= 64);
652 if (x->bit_size < 64)
653 y &= (1ull << x->bit_size) - 1;
654
655 if (y == 0) {
656 return nir_imm_intN_t(build, 0, x->bit_size);
657 } else if (y == 1) {
658 return x;
659 } else if (util_is_power_of_two_or_zero64(y)) {
660 return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
661 } else if (amul) {
662 return nir_amul(build, x, nir_imm_intN_t(build, y, x->bit_size));
663 } else {
664 return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
665 }
666 }
667
668 static inline nir_ssa_def *
669 nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
670 {
671 return _nir_mul_imm(build, x, y, false);
672 }
673
674 static inline nir_ssa_def *
675 nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
676 {
677 return _nir_mul_imm(build, x, y, true);
678 }
679
680 static inline nir_ssa_def *
681 nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
682 {
683 return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
684 }
685
686 static inline nir_ssa_def *
687 nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
688 {
689 return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
690 }
691
692 static inline nir_ssa_def *
693 nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
694 {
695 assert(src->num_components * src->bit_size == dest_bit_size);
696
697 switch (dest_bit_size) {
698 case 64:
699 switch (src->bit_size) {
700 case 32: return nir_pack_64_2x32(b, src);
701 case 16: return nir_pack_64_4x16(b, src);
702 default: break;
703 }
704 break;
705
706 case 32:
707 if (src->bit_size == 16)
708 return nir_pack_32_2x16(b, src);
709 break;
710
711 default:
712 break;
713 }
714
715 /* If we got here, we have no dedicated unpack opcode. */
716 nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
717 for (unsigned i = 0; i < src->num_components; i++) {
718 nir_ssa_def *val = nir_u2u(b, nir_channel(b, src, i), dest_bit_size);
719 val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
720 dest = nir_ior(b, dest, val);
721 }
722 return dest;
723 }
724
725 static inline nir_ssa_def *
726 nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
727 {
728 assert(src->num_components == 1);
729 assert(src->bit_size > dest_bit_size);
730 const unsigned dest_num_components = src->bit_size / dest_bit_size;
731 assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
732
733 switch (src->bit_size) {
734 case 64:
735 switch (dest_bit_size) {
736 case 32: return nir_unpack_64_2x32(b, src);
737 case 16: return nir_unpack_64_4x16(b, src);
738 default: break;
739 }
740 break;
741
742 case 32:
743 if (dest_bit_size == 16)
744 return nir_unpack_32_2x16(b, src);
745 break;
746
747 default:
748 break;
749 }
750
751 /* If we got here, we have no dedicated unpack opcode. */
752 nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
753 for (unsigned i = 0; i < dest_num_components; i++) {
754 nir_ssa_def *val = nir_ushr(b, src, nir_imm_int(b, i * dest_bit_size));
755 dest_comps[i] = nir_u2u(b, val, dest_bit_size);
756 }
757 return nir_vec(b, dest_comps, dest_num_components);
758 }
759
760 /**
761 * Treats srcs as if it's one big blob of bits and extracts the range of bits
762 * given by
763 *
764 * [first_bit, first_bit + dest_num_components * dest_bit_size)
765 *
766 * The range can have any alignment or size as long as it's an integer number
767 * of destination components and fits inside the concatenated sources.
768 *
769 * TODO: The one caveat here is that we can't handle byte alignment if 64-bit
770 * values are involved because that would require pack/unpack to/from a vec8
771 * which NIR currently does not support.
772 */
773 static inline nir_ssa_def *
774 nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
775 unsigned first_bit,
776 unsigned dest_num_components, unsigned dest_bit_size)
777 {
778 const unsigned num_bits = dest_num_components * dest_bit_size;
779
780 /* Figure out the common bit size */
781 unsigned common_bit_size = dest_bit_size;
782 for (unsigned i = 0; i < num_srcs; i++)
783 common_bit_size = MIN2(common_bit_size, srcs[i]->bit_size);
784 if (first_bit > 0)
785 common_bit_size = MIN2(common_bit_size, (1u << (ffs(first_bit) - 1)));
786
787 /* We don't want to have to deal with 1-bit values */
788 assert(common_bit_size >= 8);
789
790 nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
791 assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
792
793 /* First, unpack to the common bit size and select the components from the
794 * source.
795 */
796 int src_idx = -1;
797 unsigned src_start_bit = 0;
798 unsigned src_end_bit = 0;
799 for (unsigned i = 0; i < num_bits / common_bit_size; i++) {
800 const unsigned bit = first_bit + (i * common_bit_size);
801 while (bit >= src_end_bit) {
802 src_idx++;
803 assert(src_idx < (int) num_srcs);
804 src_start_bit = src_end_bit;
805 src_end_bit += srcs[src_idx]->bit_size *
806 srcs[src_idx]->num_components;
807 }
808 assert(bit >= src_start_bit);
809 assert(bit + common_bit_size <= src_end_bit);
810 const unsigned rel_bit = bit - src_start_bit;
811 const unsigned src_bit_size = srcs[src_idx]->bit_size;
812
813 nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
814 rel_bit / src_bit_size);
815 if (srcs[src_idx]->bit_size > common_bit_size) {
816 nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
817 comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) /
818 common_bit_size);
819 }
820 common_comps[i] = comp;
821 }
822
823 /* Now, re-pack the destination if we have to */
824 if (dest_bit_size > common_bit_size) {
825 unsigned common_per_dest = dest_bit_size / common_bit_size;
826 nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
827 for (unsigned i = 0; i < dest_num_components; i++) {
828 nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
829 common_per_dest);
830 dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
831 }
832 return nir_vec(b, dest_comps, dest_num_components);
833 } else {
834 assert(dest_bit_size == common_bit_size);
835 return nir_vec(b, common_comps, dest_num_components);
836 }
837 }
838
839 static inline nir_ssa_def *
840 nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
841 {
842 assert((src->bit_size * src->num_components) % dest_bit_size == 0);
843 const unsigned dest_num_components =
844 (src->bit_size * src->num_components) / dest_bit_size;
845 assert(dest_num_components <= NIR_MAX_VEC_COMPONENTS);
846
847 return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
848 }
849
850 /**
851 * Turns a nir_src into a nir_ssa_def * so it can be passed to
852 * nir_build_alu()-based builder calls.
853 *
854 * See nir_ssa_for_alu_src() for alu instructions.
855 */
856 static inline nir_ssa_def *
857 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
858 {
859 if (src.is_ssa && src.ssa->num_components == num_components)
860 return src.ssa;
861
862 nir_alu_src alu = { NIR_SRC_INIT };
863 alu.src = src;
864 for (int j = 0; j < 4; j++)
865 alu.swizzle[j] = j;
866
867 return nir_mov_alu(build, alu, num_components);
868 }
869
870 /**
871 * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
872 * nir_alu_src's swizzle.
873 */
874 static inline nir_ssa_def *
875 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
876 {
877 static uint8_t trivial_swizzle[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
878 STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle) == NIR_MAX_VEC_COMPONENTS);
879
880 nir_alu_src *src = &instr->src[srcn];
881 unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
882
883 if (src->src.is_ssa && (src->src.ssa->num_components == num_components) &&
884 !src->abs && !src->negate &&
885 (memcmp(src->swizzle, trivial_swizzle, num_components) == 0))
886 return src->src.ssa;
887
888 return nir_mov_alu(build, *src, num_components);
889 }
890
891 static inline unsigned
892 nir_get_ptr_bitsize(nir_builder *build)
893 {
894 if (build->shader->info.stage == MESA_SHADER_KERNEL)
895 return build->shader->info.cs.ptr_size;
896 return 32;
897 }
898
899 static inline nir_deref_instr *
900 nir_build_deref_var(nir_builder *build, nir_variable *var)
901 {
902 nir_deref_instr *deref =
903 nir_deref_instr_create(build->shader, nir_deref_type_var);
904
905 deref->mode = var->data.mode;
906 deref->type = var->type;
907 deref->var = var;
908
909 nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
910 nir_get_ptr_bitsize(build), NULL);
911
912 nir_builder_instr_insert(build, &deref->instr);
913
914 return deref;
915 }
916
917 static inline nir_deref_instr *
918 nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
919 nir_ssa_def *index)
920 {
921 assert(glsl_type_is_array(parent->type) ||
922 glsl_type_is_matrix(parent->type) ||
923 glsl_type_is_vector(parent->type));
924
925 assert(index->bit_size == parent->dest.ssa.bit_size);
926
927 nir_deref_instr *deref =
928 nir_deref_instr_create(build->shader, nir_deref_type_array);
929
930 deref->mode = parent->mode;
931 deref->type = glsl_get_array_element(parent->type);
932 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
933 deref->arr.index = nir_src_for_ssa(index);
934
935 nir_ssa_dest_init(&deref->instr, &deref->dest,
936 parent->dest.ssa.num_components,
937 parent->dest.ssa.bit_size, NULL);
938
939 nir_builder_instr_insert(build, &deref->instr);
940
941 return deref;
942 }
943
944 static inline nir_deref_instr *
945 nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
946 int64_t index)
947 {
948 assert(parent->dest.is_ssa);
949 nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
950 parent->dest.ssa.bit_size);
951
952 return nir_build_deref_array(build, parent, idx_ssa);
953 }
954
955 static inline nir_deref_instr *
956 nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
957 nir_ssa_def *index)
958 {
959 assert(parent->deref_type == nir_deref_type_array ||
960 parent->deref_type == nir_deref_type_ptr_as_array ||
961 parent->deref_type == nir_deref_type_cast);
962
963 assert(index->bit_size == parent->dest.ssa.bit_size);
964
965 nir_deref_instr *deref =
966 nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
967
968 deref->mode = parent->mode;
969 deref->type = parent->type;
970 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
971 deref->arr.index = nir_src_for_ssa(index);
972
973 nir_ssa_dest_init(&deref->instr, &deref->dest,
974 parent->dest.ssa.num_components,
975 parent->dest.ssa.bit_size, NULL);
976
977 nir_builder_instr_insert(build, &deref->instr);
978
979 return deref;
980 }
981
982 static inline nir_deref_instr *
983 nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
984 {
985 assert(glsl_type_is_array(parent->type) ||
986 glsl_type_is_matrix(parent->type));
987
988 nir_deref_instr *deref =
989 nir_deref_instr_create(build->shader, nir_deref_type_array_wildcard);
990
991 deref->mode = parent->mode;
992 deref->type = glsl_get_array_element(parent->type);
993 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
994
995 nir_ssa_dest_init(&deref->instr, &deref->dest,
996 parent->dest.ssa.num_components,
997 parent->dest.ssa.bit_size, NULL);
998
999 nir_builder_instr_insert(build, &deref->instr);
1000
1001 return deref;
1002 }
1003
1004 static inline nir_deref_instr *
1005 nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
1006 unsigned index)
1007 {
1008 assert(glsl_type_is_struct_or_ifc(parent->type));
1009
1010 nir_deref_instr *deref =
1011 nir_deref_instr_create(build->shader, nir_deref_type_struct);
1012
1013 deref->mode = parent->mode;
1014 deref->type = glsl_get_struct_field(parent->type, index);
1015 deref->parent = nir_src_for_ssa(&parent->dest.ssa);
1016 deref->strct.index = index;
1017
1018 nir_ssa_dest_init(&deref->instr, &deref->dest,
1019 parent->dest.ssa.num_components,
1020 parent->dest.ssa.bit_size, NULL);
1021
1022 nir_builder_instr_insert(build, &deref->instr);
1023
1024 return deref;
1025 }
1026
1027 static inline nir_deref_instr *
1028 nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
1029 nir_variable_mode mode, const struct glsl_type *type,
1030 unsigned ptr_stride)
1031 {
1032 nir_deref_instr *deref =
1033 nir_deref_instr_create(build->shader, nir_deref_type_cast);
1034
1035 deref->mode = mode;
1036 deref->type = type;
1037 deref->parent = nir_src_for_ssa(parent);
1038 deref->cast.ptr_stride = ptr_stride;
1039
1040 nir_ssa_dest_init(&deref->instr, &deref->dest,
1041 parent->num_components, parent->bit_size, NULL);
1042
1043 nir_builder_instr_insert(build, &deref->instr);
1044
1045 return deref;
1046 }
1047
1048 /** Returns a deref that follows another but starting from the given parent
1049 *
1050 * The new deref will be the same type and take the same array or struct index
1051 * as the leader deref but it may have a different parent. This is very
1052 * useful for walking deref paths.
1053 */
1054 static inline nir_deref_instr *
1055 nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
1056 nir_deref_instr *leader)
1057 {
1058 /* If the derefs would have the same parent, don't make a new one */
1059 assert(leader->parent.is_ssa);
1060 if (leader->parent.ssa == &parent->dest.ssa)
1061 return leader;
1062
1063 UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
1064
1065 switch (leader->deref_type) {
1066 case nir_deref_type_var:
1067 unreachable("A var dereference cannot have a parent");
1068 break;
1069
1070 case nir_deref_type_array:
1071 case nir_deref_type_array_wildcard:
1072 assert(glsl_type_is_matrix(parent->type) ||
1073 glsl_type_is_array(parent->type) ||
1074 (leader->deref_type == nir_deref_type_array &&
1075 glsl_type_is_vector(parent->type)));
1076 assert(glsl_get_length(parent->type) ==
1077 glsl_get_length(leader_parent->type));
1078
1079 if (leader->deref_type == nir_deref_type_array) {
1080 assert(leader->arr.index.is_ssa);
1081 nir_ssa_def *index = nir_i2i(b, leader->arr.index.ssa,
1082 parent->dest.ssa.bit_size);
1083 return nir_build_deref_array(b, parent, index);
1084 } else {
1085 return nir_build_deref_array_wildcard(b, parent);
1086 }
1087
1088 case nir_deref_type_struct:
1089 assert(glsl_type_is_struct_or_ifc(parent->type));
1090 assert(glsl_get_length(parent->type) ==
1091 glsl_get_length(leader_parent->type));
1092
1093 return nir_build_deref_struct(b, parent, leader->strct.index);
1094
1095 default:
1096 unreachable("Invalid deref instruction type");
1097 }
1098 }
1099
1100 static inline nir_ssa_def *
1101 nir_load_reg(nir_builder *build, nir_register *reg)
1102 {
1103 return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
1104 }
1105
1106 static inline nir_ssa_def *
1107 nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1108 enum gl_access_qualifier access)
1109 {
1110 nir_intrinsic_instr *load =
1111 nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref);
1112 load->num_components = glsl_get_vector_elements(deref->type);
1113 load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1114 nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
1115 glsl_get_bit_size(deref->type), NULL);
1116 nir_intrinsic_set_access(load, access);
1117 nir_builder_instr_insert(build, &load->instr);
1118 return &load->dest.ssa;
1119 }
1120
1121 static inline nir_ssa_def *
1122 nir_load_deref(nir_builder *build, nir_deref_instr *deref)
1123 {
1124 return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
1125 }
1126
1127 static inline void
1128 nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
1129 nir_ssa_def *value, unsigned writemask,
1130 enum gl_access_qualifier access)
1131 {
1132 nir_intrinsic_instr *store =
1133 nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref);
1134 store->num_components = glsl_get_vector_elements(deref->type);
1135 store->src[0] = nir_src_for_ssa(&deref->dest.ssa);
1136 store->src[1] = nir_src_for_ssa(value);
1137 nir_intrinsic_set_write_mask(store,
1138 writemask & ((1 << store->num_components) - 1));
1139 nir_intrinsic_set_access(store, access);
1140 nir_builder_instr_insert(build, &store->instr);
1141 }
1142
1143 static inline void
1144 nir_store_deref(nir_builder *build, nir_deref_instr *deref,
1145 nir_ssa_def *value, unsigned writemask)
1146 {
1147 nir_store_deref_with_access(build, deref, value, writemask,
1148 (enum gl_access_qualifier)0);
1149 }
1150
1151 static inline void
1152 nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
1153 nir_deref_instr *src,
1154 enum gl_access_qualifier dest_access,
1155 enum gl_access_qualifier src_access)
1156 {
1157 nir_intrinsic_instr *copy =
1158 nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_deref);
1159 copy->src[0] = nir_src_for_ssa(&dest->dest.ssa);
1160 copy->src[1] = nir_src_for_ssa(&src->dest.ssa);
1161 nir_intrinsic_set_dst_access(copy, dest_access);
1162 nir_intrinsic_set_src_access(copy, src_access);
1163 nir_builder_instr_insert(build, &copy->instr);
1164 }
1165
1166 static inline void
1167 nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
1168 {
1169 nir_copy_deref_with_access(build, dest, src,
1170 (enum gl_access_qualifier) 0,
1171 (enum gl_access_qualifier) 0);
1172 }
1173
1174 static inline nir_ssa_def *
1175 nir_load_var(nir_builder *build, nir_variable *var)
1176 {
1177 return nir_load_deref(build, nir_build_deref_var(build, var));
1178 }
1179
1180 static inline void
1181 nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
1182 unsigned writemask)
1183 {
1184 nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
1185 }
1186
1187 static inline void
1188 nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
1189 {
1190 nir_copy_deref(build, nir_build_deref_var(build, dest),
1191 nir_build_deref_var(build, src));
1192 }
1193
1194 static inline nir_ssa_def *
1195 nir_load_param(nir_builder *build, uint32_t param_idx)
1196 {
1197 assert(param_idx < build->impl->function->num_params);
1198 nir_parameter *param = &build->impl->function->params[param_idx];
1199
1200 nir_intrinsic_instr *load =
1201 nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_param);
1202 nir_intrinsic_set_param_idx(load, param_idx);
1203 load->num_components = param->num_components;
1204 nir_ssa_dest_init(&load->instr, &load->dest,
1205 param->num_components, param->bit_size, NULL);
1206 nir_builder_instr_insert(build, &load->instr);
1207 return &load->dest.ssa;
1208 }
1209
1210 #include "nir_builder_opcodes.h"
1211
1212 static inline nir_ssa_def *
1213 nir_f2b(nir_builder *build, nir_ssa_def *f)
1214 {
1215 return nir_f2b1(build, f);
1216 }
1217
1218 static inline nir_ssa_def *
1219 nir_i2b(nir_builder *build, nir_ssa_def *i)
1220 {
1221 return nir_i2b1(build, i);
1222 }
1223
1224 static inline nir_ssa_def *
1225 nir_b2f(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1226 {
1227 switch (bit_size) {
1228 case 64: return nir_b2f64(build, b);
1229 case 32: return nir_b2f32(build, b);
1230 case 16: return nir_b2f16(build, b);
1231 default:
1232 unreachable("Invalid bit-size");
1233 };
1234 }
1235
1236 static inline nir_ssa_def *
1237 nir_b2i(nir_builder *build, nir_ssa_def *b, uint32_t bit_size)
1238 {
1239 switch (bit_size) {
1240 case 64: return nir_b2i64(build, b);
1241 case 32: return nir_b2i32(build, b);
1242 case 16: return nir_b2i16(build, b);
1243 case 8: return nir_b2i8(build, b);
1244 default:
1245 unreachable("Invalid bit-size");
1246 };
1247 }
1248 static inline nir_ssa_def *
1249 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
1250 unsigned interp_mode)
1251 {
1252 nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
1253 nir_ssa_dest_init(&bary->instr, &bary->dest, 2, 32, NULL);
1254 nir_intrinsic_set_interp_mode(bary, interp_mode);
1255 nir_builder_instr_insert(build, &bary->instr);
1256 return &bary->dest.ssa;
1257 }
1258
1259 static inline void
1260 nir_jump(nir_builder *build, nir_jump_type jump_type)
1261 {
1262 nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
1263 nir_builder_instr_insert(build, &jump->instr);
1264 }
1265
1266 static inline nir_ssa_def *
1267 nir_compare_func(nir_builder *b, enum compare_func func,
1268 nir_ssa_def *src0, nir_ssa_def *src1)
1269 {
1270 switch (func) {
1271 case COMPARE_FUNC_NEVER:
1272 return nir_imm_int(b, 0);
1273 case COMPARE_FUNC_ALWAYS:
1274 return nir_imm_int(b, ~0);
1275 case COMPARE_FUNC_EQUAL:
1276 return nir_feq(b, src0, src1);
1277 case COMPARE_FUNC_NOTEQUAL:
1278 return nir_fne(b, src0, src1);
1279 case COMPARE_FUNC_GREATER:
1280 return nir_flt(b, src1, src0);
1281 case COMPARE_FUNC_GEQUAL:
1282 return nir_fge(b, src0, src1);
1283 case COMPARE_FUNC_LESS:
1284 return nir_flt(b, src0, src1);
1285 case COMPARE_FUNC_LEQUAL:
1286 return nir_fge(b, src1, src0);
1287 }
1288 unreachable("bad compare func");
1289 }
1290
1291 #endif /* NIR_BUILDER_H */