nir: Tag entrypoint for easy recognition by nir_shader_get_entrypoint()
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->registers);
62 exec_list_make_empty(&shader->globals);
63 exec_list_make_empty(&shader->system_values);
64 shader->reg_alloc = 0;
65
66 shader->num_inputs = 0;
67 shader->num_outputs = 0;
68 shader->num_uniforms = 0;
69 shader->num_shared = 0;
70
71 return shader;
72 }
73
74 static nir_register *
75 reg_create(void *mem_ctx, struct exec_list *list)
76 {
77 nir_register *reg = ralloc(mem_ctx, nir_register);
78
79 list_inithead(&reg->uses);
80 list_inithead(&reg->defs);
81 list_inithead(&reg->if_uses);
82
83 reg->num_components = 0;
84 reg->bit_size = 32;
85 reg->num_array_elems = 0;
86 reg->is_packed = false;
87 reg->name = NULL;
88
89 exec_list_push_tail(list, &reg->node);
90
91 return reg;
92 }
93
94 nir_register *
95 nir_global_reg_create(nir_shader *shader)
96 {
97 nir_register *reg = reg_create(shader, &shader->registers);
98 reg->index = shader->reg_alloc++;
99 reg->is_global = true;
100
101 return reg;
102 }
103
104 nir_register *
105 nir_local_reg_create(nir_function_impl *impl)
106 {
107 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
108 reg->index = impl->reg_alloc++;
109 reg->is_global = false;
110
111 return reg;
112 }
113
114 void
115 nir_reg_remove(nir_register *reg)
116 {
117 exec_node_remove(&reg->node);
118 }
119
120 void
121 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
122 {
123 switch (var->data.mode) {
124 case nir_var_all:
125 assert(!"invalid mode");
126 break;
127
128 case nir_var_function:
129 assert(!"nir_shader_add_variable cannot be used for local variables");
130 break;
131
132 case nir_var_private:
133 exec_list_push_tail(&shader->globals, &var->node);
134 break;
135
136 case nir_var_shader_in:
137 exec_list_push_tail(&shader->inputs, &var->node);
138 break;
139
140 case nir_var_shader_out:
141 exec_list_push_tail(&shader->outputs, &var->node);
142 break;
143
144 case nir_var_uniform:
145 case nir_var_ubo:
146 case nir_var_ssbo:
147 exec_list_push_tail(&shader->uniforms, &var->node);
148 break;
149
150 case nir_var_shared:
151 assert(shader->info.stage == MESA_SHADER_COMPUTE);
152 exec_list_push_tail(&shader->shared, &var->node);
153 break;
154
155 case nir_var_system_value:
156 exec_list_push_tail(&shader->system_values, &var->node);
157 break;
158 }
159 }
160
161 nir_variable *
162 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
163 const struct glsl_type *type, const char *name)
164 {
165 nir_variable *var = rzalloc(shader, nir_variable);
166 var->name = ralloc_strdup(var, name);
167 var->type = type;
168 var->data.mode = mode;
169 var->data.how_declared = nir_var_declared_normally;
170
171 if ((mode == nir_var_shader_in &&
172 shader->info.stage != MESA_SHADER_VERTEX) ||
173 (mode == nir_var_shader_out &&
174 shader->info.stage != MESA_SHADER_FRAGMENT))
175 var->data.interpolation = INTERP_MODE_SMOOTH;
176
177 if (mode == nir_var_shader_in || mode == nir_var_uniform)
178 var->data.read_only = true;
179
180 nir_shader_add_variable(shader, var);
181
182 return var;
183 }
184
185 nir_variable *
186 nir_local_variable_create(nir_function_impl *impl,
187 const struct glsl_type *type, const char *name)
188 {
189 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
190 var->name = ralloc_strdup(var, name);
191 var->type = type;
192 var->data.mode = nir_var_function;
193
194 nir_function_impl_add_variable(impl, var);
195
196 return var;
197 }
198
199 nir_function *
200 nir_function_create(nir_shader *shader, const char *name)
201 {
202 nir_function *func = ralloc(shader, nir_function);
203
204 exec_list_push_tail(&shader->functions, &func->node);
205
206 func->name = ralloc_strdup(func, name);
207 func->shader = shader;
208 func->num_params = 0;
209 func->params = NULL;
210 func->impl = NULL;
211 func->is_entrypoint = false;
212
213 return func;
214 }
215
216 /* NOTE: if the instruction you are copying a src to is already added
217 * to the IR, use nir_instr_rewrite_src() instead.
218 */
219 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
220 {
221 dest->is_ssa = src->is_ssa;
222 if (src->is_ssa) {
223 dest->ssa = src->ssa;
224 } else {
225 dest->reg.base_offset = src->reg.base_offset;
226 dest->reg.reg = src->reg.reg;
227 if (src->reg.indirect) {
228 dest->reg.indirect = ralloc(mem_ctx, nir_src);
229 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
230 } else {
231 dest->reg.indirect = NULL;
232 }
233 }
234 }
235
236 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
237 {
238 /* Copying an SSA definition makes no sense whatsoever. */
239 assert(!src->is_ssa);
240
241 dest->is_ssa = false;
242
243 dest->reg.base_offset = src->reg.base_offset;
244 dest->reg.reg = src->reg.reg;
245 if (src->reg.indirect) {
246 dest->reg.indirect = ralloc(instr, nir_src);
247 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
248 } else {
249 dest->reg.indirect = NULL;
250 }
251 }
252
253 void
254 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
255 nir_alu_instr *instr)
256 {
257 nir_src_copy(&dest->src, &src->src, &instr->instr);
258 dest->abs = src->abs;
259 dest->negate = src->negate;
260 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
261 dest->swizzle[i] = src->swizzle[i];
262 }
263
264 void
265 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
266 nir_alu_instr *instr)
267 {
268 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
269 dest->write_mask = src->write_mask;
270 dest->saturate = src->saturate;
271 }
272
273
274 static void
275 cf_init(nir_cf_node *node, nir_cf_node_type type)
276 {
277 exec_node_init(&node->node);
278 node->parent = NULL;
279 node->type = type;
280 }
281
282 nir_function_impl *
283 nir_function_impl_create_bare(nir_shader *shader)
284 {
285 nir_function_impl *impl = ralloc(shader, nir_function_impl);
286
287 impl->function = NULL;
288
289 cf_init(&impl->cf_node, nir_cf_node_function);
290
291 exec_list_make_empty(&impl->body);
292 exec_list_make_empty(&impl->registers);
293 exec_list_make_empty(&impl->locals);
294 impl->reg_alloc = 0;
295 impl->ssa_alloc = 0;
296 impl->valid_metadata = nir_metadata_none;
297
298 /* create start & end blocks */
299 nir_block *start_block = nir_block_create(shader);
300 nir_block *end_block = nir_block_create(shader);
301 start_block->cf_node.parent = &impl->cf_node;
302 end_block->cf_node.parent = &impl->cf_node;
303 impl->end_block = end_block;
304
305 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
306
307 start_block->successors[0] = end_block;
308 _mesa_set_add(end_block->predecessors, start_block);
309 return impl;
310 }
311
312 nir_function_impl *
313 nir_function_impl_create(nir_function *function)
314 {
315 assert(function->impl == NULL);
316
317 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
318
319 function->impl = impl;
320 impl->function = function;
321
322 return impl;
323 }
324
325 nir_block *
326 nir_block_create(nir_shader *shader)
327 {
328 nir_block *block = rzalloc(shader, nir_block);
329
330 cf_init(&block->cf_node, nir_cf_node_block);
331
332 block->successors[0] = block->successors[1] = NULL;
333 block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
334 _mesa_key_pointer_equal);
335 block->imm_dom = NULL;
336 /* XXX maybe it would be worth it to defer allocation? This
337 * way it doesn't get allocated for shader refs that never run
338 * nir_calc_dominance? For example, state-tracker creates an
339 * initial IR, clones that, runs appropriate lowering pass, passes
340 * to driver which does common lowering/opt, and then stores ref
341 * which is later used to do state specific lowering and futher
342 * opt. Do any of the references not need dominance metadata?
343 */
344 block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
345 _mesa_key_pointer_equal);
346
347 exec_list_make_empty(&block->instr_list);
348
349 return block;
350 }
351
352 static inline void
353 src_init(nir_src *src)
354 {
355 src->is_ssa = false;
356 src->reg.reg = NULL;
357 src->reg.indirect = NULL;
358 src->reg.base_offset = 0;
359 }
360
361 nir_if *
362 nir_if_create(nir_shader *shader)
363 {
364 nir_if *if_stmt = ralloc(shader, nir_if);
365
366 cf_init(&if_stmt->cf_node, nir_cf_node_if);
367 src_init(&if_stmt->condition);
368
369 nir_block *then = nir_block_create(shader);
370 exec_list_make_empty(&if_stmt->then_list);
371 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
372 then->cf_node.parent = &if_stmt->cf_node;
373
374 nir_block *else_stmt = nir_block_create(shader);
375 exec_list_make_empty(&if_stmt->else_list);
376 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
377 else_stmt->cf_node.parent = &if_stmt->cf_node;
378
379 return if_stmt;
380 }
381
382 nir_loop *
383 nir_loop_create(nir_shader *shader)
384 {
385 nir_loop *loop = rzalloc(shader, nir_loop);
386
387 cf_init(&loop->cf_node, nir_cf_node_loop);
388
389 nir_block *body = nir_block_create(shader);
390 exec_list_make_empty(&loop->body);
391 exec_list_push_tail(&loop->body, &body->cf_node.node);
392 body->cf_node.parent = &loop->cf_node;
393
394 body->successors[0] = body;
395 _mesa_set_add(body->predecessors, body);
396
397 return loop;
398 }
399
400 static void
401 instr_init(nir_instr *instr, nir_instr_type type)
402 {
403 instr->type = type;
404 instr->block = NULL;
405 exec_node_init(&instr->node);
406 }
407
408 static void
409 dest_init(nir_dest *dest)
410 {
411 dest->is_ssa = false;
412 dest->reg.reg = NULL;
413 dest->reg.indirect = NULL;
414 dest->reg.base_offset = 0;
415 }
416
417 static void
418 alu_dest_init(nir_alu_dest *dest)
419 {
420 dest_init(&dest->dest);
421 dest->saturate = false;
422 dest->write_mask = 0xf;
423 }
424
425 static void
426 alu_src_init(nir_alu_src *src)
427 {
428 src_init(&src->src);
429 src->abs = src->negate = false;
430 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
431 src->swizzle[i] = i;
432 }
433
434 nir_alu_instr *
435 nir_alu_instr_create(nir_shader *shader, nir_op op)
436 {
437 unsigned num_srcs = nir_op_infos[op].num_inputs;
438 /* TODO: don't use rzalloc */
439 nir_alu_instr *instr =
440 rzalloc_size(shader,
441 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
442
443 instr_init(&instr->instr, nir_instr_type_alu);
444 instr->op = op;
445 alu_dest_init(&instr->dest);
446 for (unsigned i = 0; i < num_srcs; i++)
447 alu_src_init(&instr->src[i]);
448
449 return instr;
450 }
451
452 nir_deref_instr *
453 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
454 {
455 nir_deref_instr *instr =
456 rzalloc_size(shader, sizeof(nir_deref_instr));
457
458 instr_init(&instr->instr, nir_instr_type_deref);
459
460 instr->deref_type = deref_type;
461 if (deref_type != nir_deref_type_var)
462 src_init(&instr->parent);
463
464 if (deref_type == nir_deref_type_array ||
465 deref_type == nir_deref_type_ptr_as_array)
466 src_init(&instr->arr.index);
467
468 dest_init(&instr->dest);
469
470 return instr;
471 }
472
473 nir_jump_instr *
474 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
475 {
476 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
477 instr_init(&instr->instr, nir_instr_type_jump);
478 instr->type = type;
479 return instr;
480 }
481
482 nir_load_const_instr *
483 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
484 unsigned bit_size)
485 {
486 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
487 instr_init(&instr->instr, nir_instr_type_load_const);
488
489 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
490
491 return instr;
492 }
493
494 nir_intrinsic_instr *
495 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
496 {
497 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
498 /* TODO: don't use rzalloc */
499 nir_intrinsic_instr *instr =
500 rzalloc_size(shader,
501 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
502
503 instr_init(&instr->instr, nir_instr_type_intrinsic);
504 instr->intrinsic = op;
505
506 if (nir_intrinsic_infos[op].has_dest)
507 dest_init(&instr->dest);
508
509 for (unsigned i = 0; i < num_srcs; i++)
510 src_init(&instr->src[i]);
511
512 return instr;
513 }
514
515 nir_call_instr *
516 nir_call_instr_create(nir_shader *shader, nir_function *callee)
517 {
518 const unsigned num_params = callee->num_params;
519 nir_call_instr *instr =
520 rzalloc_size(shader, sizeof(*instr) +
521 num_params * sizeof(instr->params[0]));
522
523 instr_init(&instr->instr, nir_instr_type_call);
524 instr->callee = callee;
525 instr->num_params = num_params;
526 for (unsigned i = 0; i < num_params; i++)
527 src_init(&instr->params[i]);
528
529 return instr;
530 }
531
532 nir_tex_instr *
533 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
534 {
535 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
536 instr_init(&instr->instr, nir_instr_type_tex);
537
538 dest_init(&instr->dest);
539
540 instr->num_srcs = num_srcs;
541 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
542 for (unsigned i = 0; i < num_srcs; i++)
543 src_init(&instr->src[i].src);
544
545 instr->texture_index = 0;
546 instr->texture_array_size = 0;
547 instr->sampler_index = 0;
548
549 return instr;
550 }
551
552 void
553 nir_tex_instr_add_src(nir_tex_instr *tex,
554 nir_tex_src_type src_type,
555 nir_src src)
556 {
557 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
558 tex->num_srcs + 1);
559
560 for (unsigned i = 0; i < tex->num_srcs; i++) {
561 new_srcs[i].src_type = tex->src[i].src_type;
562 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
563 &tex->src[i].src);
564 }
565
566 ralloc_free(tex->src);
567 tex->src = new_srcs;
568
569 tex->src[tex->num_srcs].src_type = src_type;
570 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
571 tex->num_srcs++;
572 }
573
574 void
575 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
576 {
577 assert(src_idx < tex->num_srcs);
578
579 /* First rewrite the source to NIR_SRC_INIT */
580 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
581
582 /* Now, move all of the other sources down */
583 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
584 tex->src[i-1].src_type = tex->src[i].src_type;
585 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
586 }
587 tex->num_srcs--;
588 }
589
590 nir_phi_instr *
591 nir_phi_instr_create(nir_shader *shader)
592 {
593 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
594 instr_init(&instr->instr, nir_instr_type_phi);
595
596 dest_init(&instr->dest);
597 exec_list_make_empty(&instr->srcs);
598 return instr;
599 }
600
601 nir_parallel_copy_instr *
602 nir_parallel_copy_instr_create(nir_shader *shader)
603 {
604 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
605 instr_init(&instr->instr, nir_instr_type_parallel_copy);
606
607 exec_list_make_empty(&instr->entries);
608
609 return instr;
610 }
611
612 nir_ssa_undef_instr *
613 nir_ssa_undef_instr_create(nir_shader *shader,
614 unsigned num_components,
615 unsigned bit_size)
616 {
617 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
618 instr_init(&instr->instr, nir_instr_type_ssa_undef);
619
620 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
621
622 return instr;
623 }
624
625 static nir_const_value
626 const_value_float(double d, unsigned bit_size)
627 {
628 nir_const_value v;
629 switch (bit_size) {
630 case 16: v.u16[0] = _mesa_float_to_half(d); break;
631 case 32: v.f32[0] = d; break;
632 case 64: v.f64[0] = d; break;
633 default:
634 unreachable("Invalid bit size");
635 }
636 return v;
637 }
638
639 static nir_const_value
640 const_value_int(int64_t i, unsigned bit_size)
641 {
642 nir_const_value v;
643 switch (bit_size) {
644 case 1: v.b[0] = i & 1; break;
645 case 8: v.i8[0] = i; break;
646 case 16: v.i16[0] = i; break;
647 case 32: v.i32[0] = i; break;
648 case 64: v.i64[0] = i; break;
649 default:
650 unreachable("Invalid bit size");
651 }
652 return v;
653 }
654
655 nir_const_value
656 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
657 {
658 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
659 const int64_t min_int = -max_int - 1;
660 switch (binop) {
661 case nir_op_iadd:
662 return const_value_int(0, bit_size);
663 case nir_op_fadd:
664 return const_value_float(0, bit_size);
665 case nir_op_imul:
666 return const_value_int(1, bit_size);
667 case nir_op_fmul:
668 return const_value_float(1, bit_size);
669 case nir_op_imin:
670 return const_value_int(max_int, bit_size);
671 case nir_op_umin:
672 return const_value_int(~0ull, bit_size);
673 case nir_op_fmin:
674 return const_value_float(INFINITY, bit_size);
675 case nir_op_imax:
676 return const_value_int(min_int, bit_size);
677 case nir_op_umax:
678 return const_value_int(0, bit_size);
679 case nir_op_fmax:
680 return const_value_float(-INFINITY, bit_size);
681 case nir_op_iand:
682 return const_value_int(~0ull, bit_size);
683 case nir_op_ior:
684 return const_value_int(0, bit_size);
685 case nir_op_ixor:
686 return const_value_int(0, bit_size);
687 default:
688 unreachable("Invalid reduction operation");
689 }
690 }
691
692 nir_function_impl *
693 nir_cf_node_get_function(nir_cf_node *node)
694 {
695 while (node->type != nir_cf_node_function) {
696 node = node->parent;
697 }
698
699 return nir_cf_node_as_function(node);
700 }
701
702 /* Reduces a cursor by trying to convert everything to after and trying to
703 * go up to block granularity when possible.
704 */
705 static nir_cursor
706 reduce_cursor(nir_cursor cursor)
707 {
708 switch (cursor.option) {
709 case nir_cursor_before_block:
710 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
711 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
712 if (exec_list_is_empty(&cursor.block->instr_list)) {
713 /* Empty block. After is as good as before. */
714 cursor.option = nir_cursor_after_block;
715 }
716 return cursor;
717
718 case nir_cursor_after_block:
719 return cursor;
720
721 case nir_cursor_before_instr: {
722 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
723 if (prev_instr) {
724 /* Before this instruction is after the previous */
725 cursor.instr = prev_instr;
726 cursor.option = nir_cursor_after_instr;
727 } else {
728 /* No previous instruction. Switch to before block */
729 cursor.block = cursor.instr->block;
730 cursor.option = nir_cursor_before_block;
731 }
732 return reduce_cursor(cursor);
733 }
734
735 case nir_cursor_after_instr:
736 if (nir_instr_next(cursor.instr) == NULL) {
737 /* This is the last instruction, switch to after block */
738 cursor.option = nir_cursor_after_block;
739 cursor.block = cursor.instr->block;
740 }
741 return cursor;
742
743 default:
744 unreachable("Inavlid cursor option");
745 }
746 }
747
748 bool
749 nir_cursors_equal(nir_cursor a, nir_cursor b)
750 {
751 /* Reduced cursors should be unique */
752 a = reduce_cursor(a);
753 b = reduce_cursor(b);
754
755 return a.block == b.block && a.option == b.option;
756 }
757
758 static bool
759 add_use_cb(nir_src *src, void *state)
760 {
761 nir_instr *instr = state;
762
763 src->parent_instr = instr;
764 list_addtail(&src->use_link,
765 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
766
767 return true;
768 }
769
770 static bool
771 add_ssa_def_cb(nir_ssa_def *def, void *state)
772 {
773 nir_instr *instr = state;
774
775 if (instr->block && def->index == UINT_MAX) {
776 nir_function_impl *impl =
777 nir_cf_node_get_function(&instr->block->cf_node);
778
779 def->index = impl->ssa_alloc++;
780 }
781
782 return true;
783 }
784
785 static bool
786 add_reg_def_cb(nir_dest *dest, void *state)
787 {
788 nir_instr *instr = state;
789
790 if (!dest->is_ssa) {
791 dest->reg.parent_instr = instr;
792 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
793 }
794
795 return true;
796 }
797
798 static void
799 add_defs_uses(nir_instr *instr)
800 {
801 nir_foreach_src(instr, add_use_cb, instr);
802 nir_foreach_dest(instr, add_reg_def_cb, instr);
803 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
804 }
805
806 void
807 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
808 {
809 switch (cursor.option) {
810 case nir_cursor_before_block:
811 /* Only allow inserting jumps into empty blocks. */
812 if (instr->type == nir_instr_type_jump)
813 assert(exec_list_is_empty(&cursor.block->instr_list));
814
815 instr->block = cursor.block;
816 add_defs_uses(instr);
817 exec_list_push_head(&cursor.block->instr_list, &instr->node);
818 break;
819 case nir_cursor_after_block: {
820 /* Inserting instructions after a jump is illegal. */
821 nir_instr *last = nir_block_last_instr(cursor.block);
822 assert(last == NULL || last->type != nir_instr_type_jump);
823 (void) last;
824
825 instr->block = cursor.block;
826 add_defs_uses(instr);
827 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
828 break;
829 }
830 case nir_cursor_before_instr:
831 assert(instr->type != nir_instr_type_jump);
832 instr->block = cursor.instr->block;
833 add_defs_uses(instr);
834 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
835 break;
836 case nir_cursor_after_instr:
837 /* Inserting instructions after a jump is illegal. */
838 assert(cursor.instr->type != nir_instr_type_jump);
839
840 /* Only allow inserting jumps at the end of the block. */
841 if (instr->type == nir_instr_type_jump)
842 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
843
844 instr->block = cursor.instr->block;
845 add_defs_uses(instr);
846 exec_node_insert_after(&cursor.instr->node, &instr->node);
847 break;
848 }
849
850 if (instr->type == nir_instr_type_jump)
851 nir_handle_add_jump(instr->block);
852 }
853
854 static bool
855 src_is_valid(const nir_src *src)
856 {
857 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
858 }
859
860 static bool
861 remove_use_cb(nir_src *src, void *state)
862 {
863 (void) state;
864
865 if (src_is_valid(src))
866 list_del(&src->use_link);
867
868 return true;
869 }
870
871 static bool
872 remove_def_cb(nir_dest *dest, void *state)
873 {
874 (void) state;
875
876 if (!dest->is_ssa)
877 list_del(&dest->reg.def_link);
878
879 return true;
880 }
881
882 static void
883 remove_defs_uses(nir_instr *instr)
884 {
885 nir_foreach_dest(instr, remove_def_cb, instr);
886 nir_foreach_src(instr, remove_use_cb, instr);
887 }
888
889 void nir_instr_remove_v(nir_instr *instr)
890 {
891 remove_defs_uses(instr);
892 exec_node_remove(&instr->node);
893
894 if (instr->type == nir_instr_type_jump) {
895 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
896 nir_handle_remove_jump(instr->block, jump_instr->type);
897 }
898 }
899
900 /*@}*/
901
902 void
903 nir_index_local_regs(nir_function_impl *impl)
904 {
905 unsigned index = 0;
906 foreach_list_typed(nir_register, reg, node, &impl->registers) {
907 reg->index = index++;
908 }
909 impl->reg_alloc = index;
910 }
911
912 void
913 nir_index_global_regs(nir_shader *shader)
914 {
915 unsigned index = 0;
916 foreach_list_typed(nir_register, reg, node, &shader->registers) {
917 reg->index = index++;
918 }
919 shader->reg_alloc = index;
920 }
921
922 static bool
923 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
924 {
925 return cb(&instr->dest.dest, state);
926 }
927
928 static bool
929 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
930 {
931 return cb(&instr->dest, state);
932 }
933
934 static bool
935 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
936 void *state)
937 {
938 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
939 return cb(&instr->dest, state);
940
941 return true;
942 }
943
944 static bool
945 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
946 void *state)
947 {
948 return cb(&instr->dest, state);
949 }
950
951 static bool
952 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
953 {
954 return cb(&instr->dest, state);
955 }
956
957 static bool
958 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
959 nir_foreach_dest_cb cb, void *state)
960 {
961 nir_foreach_parallel_copy_entry(entry, instr) {
962 if (!cb(&entry->dest, state))
963 return false;
964 }
965
966 return true;
967 }
968
969 bool
970 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
971 {
972 switch (instr->type) {
973 case nir_instr_type_alu:
974 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
975 case nir_instr_type_deref:
976 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
977 case nir_instr_type_intrinsic:
978 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
979 case nir_instr_type_tex:
980 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
981 case nir_instr_type_phi:
982 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
983 case nir_instr_type_parallel_copy:
984 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
985 cb, state);
986
987 case nir_instr_type_load_const:
988 case nir_instr_type_ssa_undef:
989 case nir_instr_type_call:
990 case nir_instr_type_jump:
991 break;
992
993 default:
994 unreachable("Invalid instruction type");
995 break;
996 }
997
998 return true;
999 }
1000
1001 struct foreach_ssa_def_state {
1002 nir_foreach_ssa_def_cb cb;
1003 void *client_state;
1004 };
1005
1006 static inline bool
1007 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1008 {
1009 struct foreach_ssa_def_state *state = void_state;
1010
1011 if (dest->is_ssa)
1012 return state->cb(&dest->ssa, state->client_state);
1013 else
1014 return true;
1015 }
1016
1017 bool
1018 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1019 {
1020 switch (instr->type) {
1021 case nir_instr_type_alu:
1022 case nir_instr_type_deref:
1023 case nir_instr_type_tex:
1024 case nir_instr_type_intrinsic:
1025 case nir_instr_type_phi:
1026 case nir_instr_type_parallel_copy: {
1027 struct foreach_ssa_def_state foreach_state = {cb, state};
1028 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1029 }
1030
1031 case nir_instr_type_load_const:
1032 return cb(&nir_instr_as_load_const(instr)->def, state);
1033 case nir_instr_type_ssa_undef:
1034 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1035 case nir_instr_type_call:
1036 case nir_instr_type_jump:
1037 return true;
1038 default:
1039 unreachable("Invalid instruction type");
1040 }
1041 }
1042
1043 static bool
1044 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1045 {
1046 if (!cb(src, state))
1047 return false;
1048 if (!src->is_ssa && src->reg.indirect)
1049 return cb(src->reg.indirect, state);
1050 return true;
1051 }
1052
1053 static bool
1054 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1055 {
1056 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1057 if (!visit_src(&instr->src[i].src, cb, state))
1058 return false;
1059
1060 return true;
1061 }
1062
1063 static bool
1064 visit_deref_instr_src(nir_deref_instr *instr,
1065 nir_foreach_src_cb cb, void *state)
1066 {
1067 if (instr->deref_type != nir_deref_type_var) {
1068 if (!visit_src(&instr->parent, cb, state))
1069 return false;
1070 }
1071
1072 if (instr->deref_type == nir_deref_type_array ||
1073 instr->deref_type == nir_deref_type_ptr_as_array) {
1074 if (!visit_src(&instr->arr.index, cb, state))
1075 return false;
1076 }
1077
1078 return true;
1079 }
1080
1081 static bool
1082 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1083 {
1084 for (unsigned i = 0; i < instr->num_srcs; i++) {
1085 if (!visit_src(&instr->src[i].src, cb, state))
1086 return false;
1087 }
1088
1089 return true;
1090 }
1091
1092 static bool
1093 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1094 void *state)
1095 {
1096 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1097 for (unsigned i = 0; i < num_srcs; i++) {
1098 if (!visit_src(&instr->src[i], cb, state))
1099 return false;
1100 }
1101
1102 return true;
1103 }
1104
1105 static bool
1106 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1107 {
1108 for (unsigned i = 0; i < instr->num_params; i++) {
1109 if (!visit_src(&instr->params[i], cb, state))
1110 return false;
1111 }
1112
1113 return true;
1114 }
1115
1116 static bool
1117 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1118 {
1119 nir_foreach_phi_src(src, instr) {
1120 if (!visit_src(&src->src, cb, state))
1121 return false;
1122 }
1123
1124 return true;
1125 }
1126
1127 static bool
1128 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1129 nir_foreach_src_cb cb, void *state)
1130 {
1131 nir_foreach_parallel_copy_entry(entry, instr) {
1132 if (!visit_src(&entry->src, cb, state))
1133 return false;
1134 }
1135
1136 return true;
1137 }
1138
1139 typedef struct {
1140 void *state;
1141 nir_foreach_src_cb cb;
1142 } visit_dest_indirect_state;
1143
1144 static bool
1145 visit_dest_indirect(nir_dest *dest, void *_state)
1146 {
1147 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1148
1149 if (!dest->is_ssa && dest->reg.indirect)
1150 return state->cb(dest->reg.indirect, state->state);
1151
1152 return true;
1153 }
1154
1155 bool
1156 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1157 {
1158 switch (instr->type) {
1159 case nir_instr_type_alu:
1160 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1161 return false;
1162 break;
1163 case nir_instr_type_deref:
1164 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1165 return false;
1166 break;
1167 case nir_instr_type_intrinsic:
1168 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1169 return false;
1170 break;
1171 case nir_instr_type_tex:
1172 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1173 return false;
1174 break;
1175 case nir_instr_type_call:
1176 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1177 return false;
1178 break;
1179 case nir_instr_type_load_const:
1180 /* Constant load instructions have no regular sources */
1181 break;
1182 case nir_instr_type_phi:
1183 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1184 return false;
1185 break;
1186 case nir_instr_type_parallel_copy:
1187 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1188 cb, state))
1189 return false;
1190 break;
1191 case nir_instr_type_jump:
1192 case nir_instr_type_ssa_undef:
1193 return true;
1194
1195 default:
1196 unreachable("Invalid instruction type");
1197 break;
1198 }
1199
1200 visit_dest_indirect_state dest_state;
1201 dest_state.state = state;
1202 dest_state.cb = cb;
1203 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1204 }
1205
1206 int64_t
1207 nir_src_comp_as_int(nir_src src, unsigned comp)
1208 {
1209 assert(nir_src_is_const(src));
1210 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1211
1212 assert(comp < load->def.num_components);
1213 switch (load->def.bit_size) {
1214 /* int1_t uses 0/-1 convention */
1215 case 1: return -(int)load->value.b[comp];
1216 case 8: return load->value.i8[comp];
1217 case 16: return load->value.i16[comp];
1218 case 32: return load->value.i32[comp];
1219 case 64: return load->value.i64[comp];
1220 default:
1221 unreachable("Invalid bit size");
1222 }
1223 }
1224
1225 uint64_t
1226 nir_src_comp_as_uint(nir_src src, unsigned comp)
1227 {
1228 assert(nir_src_is_const(src));
1229 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1230
1231 assert(comp < load->def.num_components);
1232 switch (load->def.bit_size) {
1233 case 1: return load->value.b[comp];
1234 case 8: return load->value.u8[comp];
1235 case 16: return load->value.u16[comp];
1236 case 32: return load->value.u32[comp];
1237 case 64: return load->value.u64[comp];
1238 default:
1239 unreachable("Invalid bit size");
1240 }
1241 }
1242
1243 bool
1244 nir_src_comp_as_bool(nir_src src, unsigned comp)
1245 {
1246 int64_t i = nir_src_comp_as_int(src, comp);
1247
1248 /* Booleans of any size use 0/-1 convention */
1249 assert(i == 0 || i == -1);
1250
1251 return i;
1252 }
1253
1254 double
1255 nir_src_comp_as_float(nir_src src, unsigned comp)
1256 {
1257 assert(nir_src_is_const(src));
1258 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1259
1260 assert(comp < load->def.num_components);
1261 switch (load->def.bit_size) {
1262 case 16: return _mesa_half_to_float(load->value.u16[comp]);
1263 case 32: return load->value.f32[comp];
1264 case 64: return load->value.f64[comp];
1265 default:
1266 unreachable("Invalid bit size");
1267 }
1268 }
1269
1270 int64_t
1271 nir_src_as_int(nir_src src)
1272 {
1273 assert(nir_src_num_components(src) == 1);
1274 return nir_src_comp_as_int(src, 0);
1275 }
1276
1277 uint64_t
1278 nir_src_as_uint(nir_src src)
1279 {
1280 assert(nir_src_num_components(src) == 1);
1281 return nir_src_comp_as_uint(src, 0);
1282 }
1283
1284 bool
1285 nir_src_as_bool(nir_src src)
1286 {
1287 assert(nir_src_num_components(src) == 1);
1288 return nir_src_comp_as_bool(src, 0);
1289 }
1290
1291 double
1292 nir_src_as_float(nir_src src)
1293 {
1294 assert(nir_src_num_components(src) == 1);
1295 return nir_src_comp_as_float(src, 0);
1296 }
1297
1298 nir_const_value *
1299 nir_src_as_const_value(nir_src src)
1300 {
1301 if (!src.is_ssa)
1302 return NULL;
1303
1304 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1305 return NULL;
1306
1307 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1308
1309 return &load->value;
1310 }
1311
1312 /**
1313 * Returns true if the source is known to be dynamically uniform. Otherwise it
1314 * returns false which means it may or may not be dynamically uniform but it
1315 * can't be determined.
1316 */
1317 bool
1318 nir_src_is_dynamically_uniform(nir_src src)
1319 {
1320 if (!src.is_ssa)
1321 return false;
1322
1323 /* Constants are trivially dynamically uniform */
1324 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1325 return true;
1326
1327 /* As are uniform variables */
1328 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1329 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1330
1331 if (intr->intrinsic == nir_intrinsic_load_uniform)
1332 return true;
1333 }
1334
1335 /* XXX: this could have many more tests, such as when a sampler function is
1336 * called with dynamically uniform arguments.
1337 */
1338 return false;
1339 }
1340
1341 static void
1342 src_remove_all_uses(nir_src *src)
1343 {
1344 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1345 if (!src_is_valid(src))
1346 continue;
1347
1348 list_del(&src->use_link);
1349 }
1350 }
1351
1352 static void
1353 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1354 {
1355 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1356 if (!src_is_valid(src))
1357 continue;
1358
1359 if (parent_instr) {
1360 src->parent_instr = parent_instr;
1361 if (src->is_ssa)
1362 list_addtail(&src->use_link, &src->ssa->uses);
1363 else
1364 list_addtail(&src->use_link, &src->reg.reg->uses);
1365 } else {
1366 assert(parent_if);
1367 src->parent_if = parent_if;
1368 if (src->is_ssa)
1369 list_addtail(&src->use_link, &src->ssa->if_uses);
1370 else
1371 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1372 }
1373 }
1374 }
1375
1376 void
1377 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1378 {
1379 assert(!src_is_valid(src) || src->parent_instr == instr);
1380
1381 src_remove_all_uses(src);
1382 *src = new_src;
1383 src_add_all_uses(src, instr, NULL);
1384 }
1385
1386 void
1387 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1388 {
1389 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1390
1391 src_remove_all_uses(dest);
1392 src_remove_all_uses(src);
1393 *dest = *src;
1394 *src = NIR_SRC_INIT;
1395 src_add_all_uses(dest, dest_instr, NULL);
1396 }
1397
1398 void
1399 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1400 {
1401 nir_src *src = &if_stmt->condition;
1402 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1403
1404 src_remove_all_uses(src);
1405 *src = new_src;
1406 src_add_all_uses(src, NULL, if_stmt);
1407 }
1408
1409 void
1410 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1411 {
1412 if (dest->is_ssa) {
1413 /* We can only overwrite an SSA destination if it has no uses. */
1414 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1415 } else {
1416 list_del(&dest->reg.def_link);
1417 if (dest->reg.indirect)
1418 src_remove_all_uses(dest->reg.indirect);
1419 }
1420
1421 /* We can't re-write with an SSA def */
1422 assert(!new_dest.is_ssa);
1423
1424 nir_dest_copy(dest, &new_dest, instr);
1425
1426 dest->reg.parent_instr = instr;
1427 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1428
1429 if (dest->reg.indirect)
1430 src_add_all_uses(dest->reg.indirect, instr, NULL);
1431 }
1432
1433 /* note: does *not* take ownership of 'name' */
1434 void
1435 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1436 unsigned num_components,
1437 unsigned bit_size, const char *name)
1438 {
1439 def->name = ralloc_strdup(instr, name);
1440 def->parent_instr = instr;
1441 list_inithead(&def->uses);
1442 list_inithead(&def->if_uses);
1443 def->num_components = num_components;
1444 def->bit_size = bit_size;
1445
1446 if (instr->block) {
1447 nir_function_impl *impl =
1448 nir_cf_node_get_function(&instr->block->cf_node);
1449
1450 def->index = impl->ssa_alloc++;
1451 } else {
1452 def->index = UINT_MAX;
1453 }
1454 }
1455
1456 /* note: does *not* take ownership of 'name' */
1457 void
1458 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1459 unsigned num_components, unsigned bit_size,
1460 const char *name)
1461 {
1462 dest->is_ssa = true;
1463 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1464 }
1465
1466 void
1467 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1468 {
1469 assert(!new_src.is_ssa || def != new_src.ssa);
1470
1471 nir_foreach_use_safe(use_src, def)
1472 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1473
1474 nir_foreach_if_use_safe(use_src, def)
1475 nir_if_rewrite_condition(use_src->parent_if, new_src);
1476 }
1477
1478 static bool
1479 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1480 {
1481 assert(start->block == end->block);
1482
1483 if (between->block != start->block)
1484 return false;
1485
1486 /* Search backwards looking for "between" */
1487 while (start != end) {
1488 if (between == end)
1489 return true;
1490
1491 end = nir_instr_prev(end);
1492 assert(end);
1493 }
1494
1495 return false;
1496 }
1497
1498 /* Replaces all uses of the given SSA def with the given source but only if
1499 * the use comes after the after_me instruction. This can be useful if you
1500 * are emitting code to fix up the result of some instruction: you can freely
1501 * use the result in that code and then call rewrite_uses_after and pass the
1502 * last fixup instruction as after_me and it will replace all of the uses you
1503 * want without touching the fixup code.
1504 *
1505 * This function assumes that after_me is in the same block as
1506 * def->parent_instr and that after_me comes after def->parent_instr.
1507 */
1508 void
1509 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1510 nir_instr *after_me)
1511 {
1512 assert(!new_src.is_ssa || def != new_src.ssa);
1513
1514 nir_foreach_use_safe(use_src, def) {
1515 assert(use_src->parent_instr != def->parent_instr);
1516 /* Since def already dominates all of its uses, the only way a use can
1517 * not be dominated by after_me is if it is between def and after_me in
1518 * the instruction list.
1519 */
1520 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1521 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1522 }
1523
1524 nir_foreach_if_use_safe(use_src, def)
1525 nir_if_rewrite_condition(use_src->parent_if, new_src);
1526 }
1527
1528 nir_component_mask_t
1529 nir_ssa_def_components_read(const nir_ssa_def *def)
1530 {
1531 nir_component_mask_t read_mask = 0;
1532 nir_foreach_use(use, def) {
1533 if (use->parent_instr->type == nir_instr_type_alu) {
1534 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1535 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1536 int src_idx = alu_src - &alu->src[0];
1537 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1538 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1539 } else {
1540 return (1 << def->num_components) - 1;
1541 }
1542 }
1543
1544 if (!list_empty(&def->if_uses))
1545 read_mask |= 1;
1546
1547 return read_mask;
1548 }
1549
1550 nir_block *
1551 nir_block_cf_tree_next(nir_block *block)
1552 {
1553 if (block == NULL) {
1554 /* nir_foreach_block_safe() will call this function on a NULL block
1555 * after the last iteration, but it won't use the result so just return
1556 * NULL here.
1557 */
1558 return NULL;
1559 }
1560
1561 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1562 if (cf_next)
1563 return nir_cf_node_cf_tree_first(cf_next);
1564
1565 nir_cf_node *parent = block->cf_node.parent;
1566
1567 switch (parent->type) {
1568 case nir_cf_node_if: {
1569 /* Are we at the end of the if? Go to the beginning of the else */
1570 nir_if *if_stmt = nir_cf_node_as_if(parent);
1571 if (block == nir_if_last_then_block(if_stmt))
1572 return nir_if_first_else_block(if_stmt);
1573
1574 assert(block == nir_if_last_else_block(if_stmt));
1575 /* fall through */
1576 }
1577
1578 case nir_cf_node_loop:
1579 return nir_cf_node_as_block(nir_cf_node_next(parent));
1580
1581 case nir_cf_node_function:
1582 return NULL;
1583
1584 default:
1585 unreachable("unknown cf node type");
1586 }
1587 }
1588
1589 nir_block *
1590 nir_block_cf_tree_prev(nir_block *block)
1591 {
1592 if (block == NULL) {
1593 /* do this for consistency with nir_block_cf_tree_next() */
1594 return NULL;
1595 }
1596
1597 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1598 if (cf_prev)
1599 return nir_cf_node_cf_tree_last(cf_prev);
1600
1601 nir_cf_node *parent = block->cf_node.parent;
1602
1603 switch (parent->type) {
1604 case nir_cf_node_if: {
1605 /* Are we at the beginning of the else? Go to the end of the if */
1606 nir_if *if_stmt = nir_cf_node_as_if(parent);
1607 if (block == nir_if_first_else_block(if_stmt))
1608 return nir_if_last_then_block(if_stmt);
1609
1610 assert(block == nir_if_first_then_block(if_stmt));
1611 /* fall through */
1612 }
1613
1614 case nir_cf_node_loop:
1615 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1616
1617 case nir_cf_node_function:
1618 return NULL;
1619
1620 default:
1621 unreachable("unknown cf node type");
1622 }
1623 }
1624
1625 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1626 {
1627 switch (node->type) {
1628 case nir_cf_node_function: {
1629 nir_function_impl *impl = nir_cf_node_as_function(node);
1630 return nir_start_block(impl);
1631 }
1632
1633 case nir_cf_node_if: {
1634 nir_if *if_stmt = nir_cf_node_as_if(node);
1635 return nir_if_first_then_block(if_stmt);
1636 }
1637
1638 case nir_cf_node_loop: {
1639 nir_loop *loop = nir_cf_node_as_loop(node);
1640 return nir_loop_first_block(loop);
1641 }
1642
1643 case nir_cf_node_block: {
1644 return nir_cf_node_as_block(node);
1645 }
1646
1647 default:
1648 unreachable("unknown node type");
1649 }
1650 }
1651
1652 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1653 {
1654 switch (node->type) {
1655 case nir_cf_node_function: {
1656 nir_function_impl *impl = nir_cf_node_as_function(node);
1657 return nir_impl_last_block(impl);
1658 }
1659
1660 case nir_cf_node_if: {
1661 nir_if *if_stmt = nir_cf_node_as_if(node);
1662 return nir_if_last_else_block(if_stmt);
1663 }
1664
1665 case nir_cf_node_loop: {
1666 nir_loop *loop = nir_cf_node_as_loop(node);
1667 return nir_loop_last_block(loop);
1668 }
1669
1670 case nir_cf_node_block: {
1671 return nir_cf_node_as_block(node);
1672 }
1673
1674 default:
1675 unreachable("unknown node type");
1676 }
1677 }
1678
1679 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1680 {
1681 if (node->type == nir_cf_node_block)
1682 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1683 else if (node->type == nir_cf_node_function)
1684 return NULL;
1685 else
1686 return nir_cf_node_as_block(nir_cf_node_next(node));
1687 }
1688
1689 nir_if *
1690 nir_block_get_following_if(nir_block *block)
1691 {
1692 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1693 return NULL;
1694
1695 if (nir_cf_node_is_last(&block->cf_node))
1696 return NULL;
1697
1698 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1699
1700 if (next_node->type != nir_cf_node_if)
1701 return NULL;
1702
1703 return nir_cf_node_as_if(next_node);
1704 }
1705
1706 nir_loop *
1707 nir_block_get_following_loop(nir_block *block)
1708 {
1709 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1710 return NULL;
1711
1712 if (nir_cf_node_is_last(&block->cf_node))
1713 return NULL;
1714
1715 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1716
1717 if (next_node->type != nir_cf_node_loop)
1718 return NULL;
1719
1720 return nir_cf_node_as_loop(next_node);
1721 }
1722
1723 void
1724 nir_index_blocks(nir_function_impl *impl)
1725 {
1726 unsigned index = 0;
1727
1728 if (impl->valid_metadata & nir_metadata_block_index)
1729 return;
1730
1731 nir_foreach_block(block, impl) {
1732 block->index = index++;
1733 }
1734
1735 /* The end_block isn't really part of the program, which is why its index
1736 * is >= num_blocks.
1737 */
1738 impl->num_blocks = impl->end_block->index = index;
1739 }
1740
1741 static bool
1742 index_ssa_def_cb(nir_ssa_def *def, void *state)
1743 {
1744 unsigned *index = (unsigned *) state;
1745 def->index = (*index)++;
1746
1747 return true;
1748 }
1749
1750 /**
1751 * The indices are applied top-to-bottom which has the very nice property
1752 * that, if A dominates B, then A->index <= B->index.
1753 */
1754 void
1755 nir_index_ssa_defs(nir_function_impl *impl)
1756 {
1757 unsigned index = 0;
1758
1759 nir_foreach_block(block, impl) {
1760 nir_foreach_instr(instr, block)
1761 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1762 }
1763
1764 impl->ssa_alloc = index;
1765 }
1766
1767 /**
1768 * The indices are applied top-to-bottom which has the very nice property
1769 * that, if A dominates B, then A->index <= B->index.
1770 */
1771 unsigned
1772 nir_index_instrs(nir_function_impl *impl)
1773 {
1774 unsigned index = 0;
1775
1776 nir_foreach_block(block, impl) {
1777 nir_foreach_instr(instr, block)
1778 instr->index = index++;
1779 }
1780
1781 return index;
1782 }
1783
1784 nir_intrinsic_op
1785 nir_intrinsic_from_system_value(gl_system_value val)
1786 {
1787 switch (val) {
1788 case SYSTEM_VALUE_VERTEX_ID:
1789 return nir_intrinsic_load_vertex_id;
1790 case SYSTEM_VALUE_INSTANCE_ID:
1791 return nir_intrinsic_load_instance_id;
1792 case SYSTEM_VALUE_DRAW_ID:
1793 return nir_intrinsic_load_draw_id;
1794 case SYSTEM_VALUE_BASE_INSTANCE:
1795 return nir_intrinsic_load_base_instance;
1796 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1797 return nir_intrinsic_load_vertex_id_zero_base;
1798 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1799 return nir_intrinsic_load_is_indexed_draw;
1800 case SYSTEM_VALUE_FIRST_VERTEX:
1801 return nir_intrinsic_load_first_vertex;
1802 case SYSTEM_VALUE_BASE_VERTEX:
1803 return nir_intrinsic_load_base_vertex;
1804 case SYSTEM_VALUE_INVOCATION_ID:
1805 return nir_intrinsic_load_invocation_id;
1806 case SYSTEM_VALUE_FRAG_COORD:
1807 return nir_intrinsic_load_frag_coord;
1808 case SYSTEM_VALUE_FRONT_FACE:
1809 return nir_intrinsic_load_front_face;
1810 case SYSTEM_VALUE_SAMPLE_ID:
1811 return nir_intrinsic_load_sample_id;
1812 case SYSTEM_VALUE_SAMPLE_POS:
1813 return nir_intrinsic_load_sample_pos;
1814 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1815 return nir_intrinsic_load_sample_mask_in;
1816 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1817 return nir_intrinsic_load_local_invocation_id;
1818 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1819 return nir_intrinsic_load_local_invocation_index;
1820 case SYSTEM_VALUE_WORK_GROUP_ID:
1821 return nir_intrinsic_load_work_group_id;
1822 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1823 return nir_intrinsic_load_num_work_groups;
1824 case SYSTEM_VALUE_PRIMITIVE_ID:
1825 return nir_intrinsic_load_primitive_id;
1826 case SYSTEM_VALUE_TESS_COORD:
1827 return nir_intrinsic_load_tess_coord;
1828 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1829 return nir_intrinsic_load_tess_level_outer;
1830 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1831 return nir_intrinsic_load_tess_level_inner;
1832 case SYSTEM_VALUE_VERTICES_IN:
1833 return nir_intrinsic_load_patch_vertices_in;
1834 case SYSTEM_VALUE_HELPER_INVOCATION:
1835 return nir_intrinsic_load_helper_invocation;
1836 case SYSTEM_VALUE_VIEW_INDEX:
1837 return nir_intrinsic_load_view_index;
1838 case SYSTEM_VALUE_SUBGROUP_SIZE:
1839 return nir_intrinsic_load_subgroup_size;
1840 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1841 return nir_intrinsic_load_subgroup_invocation;
1842 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1843 return nir_intrinsic_load_subgroup_eq_mask;
1844 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1845 return nir_intrinsic_load_subgroup_ge_mask;
1846 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1847 return nir_intrinsic_load_subgroup_gt_mask;
1848 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1849 return nir_intrinsic_load_subgroup_le_mask;
1850 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1851 return nir_intrinsic_load_subgroup_lt_mask;
1852 case SYSTEM_VALUE_NUM_SUBGROUPS:
1853 return nir_intrinsic_load_num_subgroups;
1854 case SYSTEM_VALUE_SUBGROUP_ID:
1855 return nir_intrinsic_load_subgroup_id;
1856 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1857 return nir_intrinsic_load_local_group_size;
1858 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1859 return nir_intrinsic_load_global_invocation_id;
1860 case SYSTEM_VALUE_WORK_DIM:
1861 return nir_intrinsic_load_work_dim;
1862 default:
1863 unreachable("system value does not directly correspond to intrinsic");
1864 }
1865 }
1866
1867 gl_system_value
1868 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1869 {
1870 switch (intrin) {
1871 case nir_intrinsic_load_vertex_id:
1872 return SYSTEM_VALUE_VERTEX_ID;
1873 case nir_intrinsic_load_instance_id:
1874 return SYSTEM_VALUE_INSTANCE_ID;
1875 case nir_intrinsic_load_draw_id:
1876 return SYSTEM_VALUE_DRAW_ID;
1877 case nir_intrinsic_load_base_instance:
1878 return SYSTEM_VALUE_BASE_INSTANCE;
1879 case nir_intrinsic_load_vertex_id_zero_base:
1880 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1881 case nir_intrinsic_load_first_vertex:
1882 return SYSTEM_VALUE_FIRST_VERTEX;
1883 case nir_intrinsic_load_is_indexed_draw:
1884 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1885 case nir_intrinsic_load_base_vertex:
1886 return SYSTEM_VALUE_BASE_VERTEX;
1887 case nir_intrinsic_load_invocation_id:
1888 return SYSTEM_VALUE_INVOCATION_ID;
1889 case nir_intrinsic_load_frag_coord:
1890 return SYSTEM_VALUE_FRAG_COORD;
1891 case nir_intrinsic_load_front_face:
1892 return SYSTEM_VALUE_FRONT_FACE;
1893 case nir_intrinsic_load_sample_id:
1894 return SYSTEM_VALUE_SAMPLE_ID;
1895 case nir_intrinsic_load_sample_pos:
1896 return SYSTEM_VALUE_SAMPLE_POS;
1897 case nir_intrinsic_load_sample_mask_in:
1898 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1899 case nir_intrinsic_load_local_invocation_id:
1900 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1901 case nir_intrinsic_load_local_invocation_index:
1902 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1903 case nir_intrinsic_load_num_work_groups:
1904 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1905 case nir_intrinsic_load_work_group_id:
1906 return SYSTEM_VALUE_WORK_GROUP_ID;
1907 case nir_intrinsic_load_primitive_id:
1908 return SYSTEM_VALUE_PRIMITIVE_ID;
1909 case nir_intrinsic_load_tess_coord:
1910 return SYSTEM_VALUE_TESS_COORD;
1911 case nir_intrinsic_load_tess_level_outer:
1912 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1913 case nir_intrinsic_load_tess_level_inner:
1914 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1915 case nir_intrinsic_load_patch_vertices_in:
1916 return SYSTEM_VALUE_VERTICES_IN;
1917 case nir_intrinsic_load_helper_invocation:
1918 return SYSTEM_VALUE_HELPER_INVOCATION;
1919 case nir_intrinsic_load_view_index:
1920 return SYSTEM_VALUE_VIEW_INDEX;
1921 case nir_intrinsic_load_subgroup_size:
1922 return SYSTEM_VALUE_SUBGROUP_SIZE;
1923 case nir_intrinsic_load_subgroup_invocation:
1924 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1925 case nir_intrinsic_load_subgroup_eq_mask:
1926 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1927 case nir_intrinsic_load_subgroup_ge_mask:
1928 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1929 case nir_intrinsic_load_subgroup_gt_mask:
1930 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1931 case nir_intrinsic_load_subgroup_le_mask:
1932 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1933 case nir_intrinsic_load_subgroup_lt_mask:
1934 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1935 case nir_intrinsic_load_num_subgroups:
1936 return SYSTEM_VALUE_NUM_SUBGROUPS;
1937 case nir_intrinsic_load_subgroup_id:
1938 return SYSTEM_VALUE_SUBGROUP_ID;
1939 case nir_intrinsic_load_local_group_size:
1940 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1941 case nir_intrinsic_load_global_invocation_id:
1942 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1943 default:
1944 unreachable("intrinsic doesn't produce a system value");
1945 }
1946 }
1947
1948 /* OpenGL utility method that remaps the location attributes if they are
1949 * doubles. Not needed for vulkan due the differences on the input location
1950 * count for doubles on vulkan vs OpenGL
1951 *
1952 * The bitfield returned in dual_slot is one bit for each double input slot in
1953 * the original OpenGL single-slot input numbering. The mapping from old
1954 * locations to new locations is as follows:
1955 *
1956 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1957 */
1958 void
1959 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1960 {
1961 assert(shader->info.stage == MESA_SHADER_VERTEX);
1962
1963 *dual_slot = 0;
1964 nir_foreach_variable(var, &shader->inputs) {
1965 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1966 unsigned slots = glsl_count_attribute_slots(var->type, true);
1967 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1968 }
1969 }
1970
1971 nir_foreach_variable(var, &shader->inputs) {
1972 var->data.location +=
1973 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1974 }
1975 }
1976
1977 /* Returns an attribute mask that has been re-compacted using the given
1978 * dual_slot mask.
1979 */
1980 uint64_t
1981 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1982 {
1983 while (dual_slot) {
1984 unsigned loc = u_bit_scan64(&dual_slot);
1985 /* mask of all bits up to and including loc */
1986 uint64_t mask = BITFIELD64_MASK(loc + 1);
1987 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
1988 }
1989 return attribs;
1990 }