b0b031cde61cb08c3291f2704d9cf178be808b35
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->registers);
62 exec_list_make_empty(&shader->globals);
63 exec_list_make_empty(&shader->system_values);
64 shader->reg_alloc = 0;
65
66 shader->num_inputs = 0;
67 shader->num_outputs = 0;
68 shader->num_uniforms = 0;
69 shader->num_shared = 0;
70
71 return shader;
72 }
73
74 static nir_register *
75 reg_create(void *mem_ctx, struct exec_list *list)
76 {
77 nir_register *reg = ralloc(mem_ctx, nir_register);
78
79 list_inithead(&reg->uses);
80 list_inithead(&reg->defs);
81 list_inithead(&reg->if_uses);
82
83 reg->num_components = 0;
84 reg->bit_size = 32;
85 reg->num_array_elems = 0;
86 reg->is_packed = false;
87 reg->name = NULL;
88
89 exec_list_push_tail(list, &reg->node);
90
91 return reg;
92 }
93
94 nir_register *
95 nir_global_reg_create(nir_shader *shader)
96 {
97 nir_register *reg = reg_create(shader, &shader->registers);
98 reg->index = shader->reg_alloc++;
99 reg->is_global = true;
100
101 return reg;
102 }
103
104 nir_register *
105 nir_local_reg_create(nir_function_impl *impl)
106 {
107 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
108 reg->index = impl->reg_alloc++;
109 reg->is_global = false;
110
111 return reg;
112 }
113
114 void
115 nir_reg_remove(nir_register *reg)
116 {
117 exec_node_remove(&reg->node);
118 }
119
120 void
121 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
122 {
123 switch (var->data.mode) {
124 case nir_var_all:
125 assert(!"invalid mode");
126 break;
127
128 case nir_var_local:
129 assert(!"nir_shader_add_variable cannot be used for local variables");
130 break;
131
132 case nir_var_global:
133 exec_list_push_tail(&shader->globals, &var->node);
134 break;
135
136 case nir_var_shader_in:
137 exec_list_push_tail(&shader->inputs, &var->node);
138 break;
139
140 case nir_var_shader_out:
141 exec_list_push_tail(&shader->outputs, &var->node);
142 break;
143
144 case nir_var_uniform:
145 case nir_var_ubo:
146 case nir_var_ssbo:
147 exec_list_push_tail(&shader->uniforms, &var->node);
148 break;
149
150 case nir_var_shared:
151 assert(shader->info.stage == MESA_SHADER_COMPUTE);
152 exec_list_push_tail(&shader->shared, &var->node);
153 break;
154
155 case nir_var_system_value:
156 exec_list_push_tail(&shader->system_values, &var->node);
157 break;
158 }
159 }
160
161 nir_variable *
162 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
163 const struct glsl_type *type, const char *name)
164 {
165 nir_variable *var = rzalloc(shader, nir_variable);
166 var->name = ralloc_strdup(var, name);
167 var->type = type;
168 var->data.mode = mode;
169 var->data.how_declared = nir_var_declared_normally;
170
171 if ((mode == nir_var_shader_in &&
172 shader->info.stage != MESA_SHADER_VERTEX) ||
173 (mode == nir_var_shader_out &&
174 shader->info.stage != MESA_SHADER_FRAGMENT))
175 var->data.interpolation = INTERP_MODE_SMOOTH;
176
177 if (mode == nir_var_shader_in || mode == nir_var_uniform)
178 var->data.read_only = true;
179
180 nir_shader_add_variable(shader, var);
181
182 return var;
183 }
184
185 nir_variable *
186 nir_local_variable_create(nir_function_impl *impl,
187 const struct glsl_type *type, const char *name)
188 {
189 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
190 var->name = ralloc_strdup(var, name);
191 var->type = type;
192 var->data.mode = nir_var_local;
193
194 nir_function_impl_add_variable(impl, var);
195
196 return var;
197 }
198
199 nir_function *
200 nir_function_create(nir_shader *shader, const char *name)
201 {
202 nir_function *func = ralloc(shader, nir_function);
203
204 exec_list_push_tail(&shader->functions, &func->node);
205
206 func->name = ralloc_strdup(func, name);
207 func->shader = shader;
208 func->num_params = 0;
209 func->params = NULL;
210 func->impl = NULL;
211
212 return func;
213 }
214
215 /* NOTE: if the instruction you are copying a src to is already added
216 * to the IR, use nir_instr_rewrite_src() instead.
217 */
218 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
219 {
220 dest->is_ssa = src->is_ssa;
221 if (src->is_ssa) {
222 dest->ssa = src->ssa;
223 } else {
224 dest->reg.base_offset = src->reg.base_offset;
225 dest->reg.reg = src->reg.reg;
226 if (src->reg.indirect) {
227 dest->reg.indirect = ralloc(mem_ctx, nir_src);
228 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
229 } else {
230 dest->reg.indirect = NULL;
231 }
232 }
233 }
234
235 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
236 {
237 /* Copying an SSA definition makes no sense whatsoever. */
238 assert(!src->is_ssa);
239
240 dest->is_ssa = false;
241
242 dest->reg.base_offset = src->reg.base_offset;
243 dest->reg.reg = src->reg.reg;
244 if (src->reg.indirect) {
245 dest->reg.indirect = ralloc(instr, nir_src);
246 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
247 } else {
248 dest->reg.indirect = NULL;
249 }
250 }
251
252 void
253 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
254 nir_alu_instr *instr)
255 {
256 nir_src_copy(&dest->src, &src->src, &instr->instr);
257 dest->abs = src->abs;
258 dest->negate = src->negate;
259 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
260 dest->swizzle[i] = src->swizzle[i];
261 }
262
263 void
264 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
265 nir_alu_instr *instr)
266 {
267 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
268 dest->write_mask = src->write_mask;
269 dest->saturate = src->saturate;
270 }
271
272
273 static void
274 cf_init(nir_cf_node *node, nir_cf_node_type type)
275 {
276 exec_node_init(&node->node);
277 node->parent = NULL;
278 node->type = type;
279 }
280
281 nir_function_impl *
282 nir_function_impl_create_bare(nir_shader *shader)
283 {
284 nir_function_impl *impl = ralloc(shader, nir_function_impl);
285
286 impl->function = NULL;
287
288 cf_init(&impl->cf_node, nir_cf_node_function);
289
290 exec_list_make_empty(&impl->body);
291 exec_list_make_empty(&impl->registers);
292 exec_list_make_empty(&impl->locals);
293 impl->reg_alloc = 0;
294 impl->ssa_alloc = 0;
295 impl->valid_metadata = nir_metadata_none;
296
297 /* create start & end blocks */
298 nir_block *start_block = nir_block_create(shader);
299 nir_block *end_block = nir_block_create(shader);
300 start_block->cf_node.parent = &impl->cf_node;
301 end_block->cf_node.parent = &impl->cf_node;
302 impl->end_block = end_block;
303
304 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
305
306 start_block->successors[0] = end_block;
307 _mesa_set_add(end_block->predecessors, start_block);
308 return impl;
309 }
310
311 nir_function_impl *
312 nir_function_impl_create(nir_function *function)
313 {
314 assert(function->impl == NULL);
315
316 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
317
318 function->impl = impl;
319 impl->function = function;
320
321 return impl;
322 }
323
324 nir_block *
325 nir_block_create(nir_shader *shader)
326 {
327 nir_block *block = rzalloc(shader, nir_block);
328
329 cf_init(&block->cf_node, nir_cf_node_block);
330
331 block->successors[0] = block->successors[1] = NULL;
332 block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
333 _mesa_key_pointer_equal);
334 block->imm_dom = NULL;
335 /* XXX maybe it would be worth it to defer allocation? This
336 * way it doesn't get allocated for shader refs that never run
337 * nir_calc_dominance? For example, state-tracker creates an
338 * initial IR, clones that, runs appropriate lowering pass, passes
339 * to driver which does common lowering/opt, and then stores ref
340 * which is later used to do state specific lowering and futher
341 * opt. Do any of the references not need dominance metadata?
342 */
343 block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
344 _mesa_key_pointer_equal);
345
346 exec_list_make_empty(&block->instr_list);
347
348 return block;
349 }
350
351 static inline void
352 src_init(nir_src *src)
353 {
354 src->is_ssa = false;
355 src->reg.reg = NULL;
356 src->reg.indirect = NULL;
357 src->reg.base_offset = 0;
358 }
359
360 nir_if *
361 nir_if_create(nir_shader *shader)
362 {
363 nir_if *if_stmt = ralloc(shader, nir_if);
364
365 cf_init(&if_stmt->cf_node, nir_cf_node_if);
366 src_init(&if_stmt->condition);
367
368 nir_block *then = nir_block_create(shader);
369 exec_list_make_empty(&if_stmt->then_list);
370 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
371 then->cf_node.parent = &if_stmt->cf_node;
372
373 nir_block *else_stmt = nir_block_create(shader);
374 exec_list_make_empty(&if_stmt->else_list);
375 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
376 else_stmt->cf_node.parent = &if_stmt->cf_node;
377
378 return if_stmt;
379 }
380
381 nir_loop *
382 nir_loop_create(nir_shader *shader)
383 {
384 nir_loop *loop = rzalloc(shader, nir_loop);
385
386 cf_init(&loop->cf_node, nir_cf_node_loop);
387
388 nir_block *body = nir_block_create(shader);
389 exec_list_make_empty(&loop->body);
390 exec_list_push_tail(&loop->body, &body->cf_node.node);
391 body->cf_node.parent = &loop->cf_node;
392
393 body->successors[0] = body;
394 _mesa_set_add(body->predecessors, body);
395
396 return loop;
397 }
398
399 static void
400 instr_init(nir_instr *instr, nir_instr_type type)
401 {
402 instr->type = type;
403 instr->block = NULL;
404 exec_node_init(&instr->node);
405 }
406
407 static void
408 dest_init(nir_dest *dest)
409 {
410 dest->is_ssa = false;
411 dest->reg.reg = NULL;
412 dest->reg.indirect = NULL;
413 dest->reg.base_offset = 0;
414 }
415
416 static void
417 alu_dest_init(nir_alu_dest *dest)
418 {
419 dest_init(&dest->dest);
420 dest->saturate = false;
421 dest->write_mask = 0xf;
422 }
423
424 static void
425 alu_src_init(nir_alu_src *src)
426 {
427 src_init(&src->src);
428 src->abs = src->negate = false;
429 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
430 src->swizzle[i] = i;
431 }
432
433 nir_alu_instr *
434 nir_alu_instr_create(nir_shader *shader, nir_op op)
435 {
436 unsigned num_srcs = nir_op_infos[op].num_inputs;
437 /* TODO: don't use rzalloc */
438 nir_alu_instr *instr =
439 rzalloc_size(shader,
440 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
441
442 instr_init(&instr->instr, nir_instr_type_alu);
443 instr->op = op;
444 alu_dest_init(&instr->dest);
445 for (unsigned i = 0; i < num_srcs; i++)
446 alu_src_init(&instr->src[i]);
447
448 return instr;
449 }
450
451 nir_deref_instr *
452 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
453 {
454 nir_deref_instr *instr =
455 rzalloc_size(shader, sizeof(nir_deref_instr));
456
457 instr_init(&instr->instr, nir_instr_type_deref);
458
459 instr->deref_type = deref_type;
460 if (deref_type != nir_deref_type_var)
461 src_init(&instr->parent);
462
463 if (deref_type == nir_deref_type_array)
464 src_init(&instr->arr.index);
465
466 dest_init(&instr->dest);
467
468 return instr;
469 }
470
471 nir_jump_instr *
472 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
473 {
474 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
475 instr_init(&instr->instr, nir_instr_type_jump);
476 instr->type = type;
477 return instr;
478 }
479
480 nir_load_const_instr *
481 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
482 unsigned bit_size)
483 {
484 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
485 instr_init(&instr->instr, nir_instr_type_load_const);
486
487 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
488
489 return instr;
490 }
491
492 nir_intrinsic_instr *
493 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
494 {
495 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
496 /* TODO: don't use rzalloc */
497 nir_intrinsic_instr *instr =
498 rzalloc_size(shader,
499 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
500
501 instr_init(&instr->instr, nir_instr_type_intrinsic);
502 instr->intrinsic = op;
503
504 if (nir_intrinsic_infos[op].has_dest)
505 dest_init(&instr->dest);
506
507 for (unsigned i = 0; i < num_srcs; i++)
508 src_init(&instr->src[i]);
509
510 return instr;
511 }
512
513 nir_call_instr *
514 nir_call_instr_create(nir_shader *shader, nir_function *callee)
515 {
516 const unsigned num_params = callee->num_params;
517 nir_call_instr *instr =
518 rzalloc_size(shader, sizeof(*instr) +
519 num_params * sizeof(instr->params[0]));
520
521 instr_init(&instr->instr, nir_instr_type_call);
522 instr->callee = callee;
523 instr->num_params = num_params;
524 for (unsigned i = 0; i < num_params; i++)
525 src_init(&instr->params[i]);
526
527 return instr;
528 }
529
530 nir_tex_instr *
531 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
532 {
533 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
534 instr_init(&instr->instr, nir_instr_type_tex);
535
536 dest_init(&instr->dest);
537
538 instr->num_srcs = num_srcs;
539 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
540 for (unsigned i = 0; i < num_srcs; i++)
541 src_init(&instr->src[i].src);
542
543 instr->texture_index = 0;
544 instr->texture_array_size = 0;
545 instr->sampler_index = 0;
546
547 return instr;
548 }
549
550 void
551 nir_tex_instr_add_src(nir_tex_instr *tex,
552 nir_tex_src_type src_type,
553 nir_src src)
554 {
555 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
556 tex->num_srcs + 1);
557
558 for (unsigned i = 0; i < tex->num_srcs; i++) {
559 new_srcs[i].src_type = tex->src[i].src_type;
560 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
561 &tex->src[i].src);
562 }
563
564 ralloc_free(tex->src);
565 tex->src = new_srcs;
566
567 tex->src[tex->num_srcs].src_type = src_type;
568 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
569 tex->num_srcs++;
570 }
571
572 void
573 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
574 {
575 assert(src_idx < tex->num_srcs);
576
577 /* First rewrite the source to NIR_SRC_INIT */
578 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
579
580 /* Now, move all of the other sources down */
581 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
582 tex->src[i-1].src_type = tex->src[i].src_type;
583 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
584 }
585 tex->num_srcs--;
586 }
587
588 nir_phi_instr *
589 nir_phi_instr_create(nir_shader *shader)
590 {
591 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
592 instr_init(&instr->instr, nir_instr_type_phi);
593
594 dest_init(&instr->dest);
595 exec_list_make_empty(&instr->srcs);
596 return instr;
597 }
598
599 nir_parallel_copy_instr *
600 nir_parallel_copy_instr_create(nir_shader *shader)
601 {
602 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
603 instr_init(&instr->instr, nir_instr_type_parallel_copy);
604
605 exec_list_make_empty(&instr->entries);
606
607 return instr;
608 }
609
610 nir_ssa_undef_instr *
611 nir_ssa_undef_instr_create(nir_shader *shader,
612 unsigned num_components,
613 unsigned bit_size)
614 {
615 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
616 instr_init(&instr->instr, nir_instr_type_ssa_undef);
617
618 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
619
620 return instr;
621 }
622
623 static nir_const_value
624 const_value_float(double d, unsigned bit_size)
625 {
626 nir_const_value v;
627 switch (bit_size) {
628 case 16: v.u16[0] = _mesa_float_to_half(d); break;
629 case 32: v.f32[0] = d; break;
630 case 64: v.f64[0] = d; break;
631 default:
632 unreachable("Invalid bit size");
633 }
634 return v;
635 }
636
637 static nir_const_value
638 const_value_int(int64_t i, unsigned bit_size)
639 {
640 nir_const_value v;
641 switch (bit_size) {
642 case 1: v.b[0] = i & 1; break;
643 case 8: v.i8[0] = i; break;
644 case 16: v.i16[0] = i; break;
645 case 32: v.i32[0] = i; break;
646 case 64: v.i64[0] = i; break;
647 default:
648 unreachable("Invalid bit size");
649 }
650 return v;
651 }
652
653 nir_const_value
654 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
655 {
656 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
657 const int64_t min_int = -max_int - 1;
658 switch (binop) {
659 case nir_op_iadd:
660 return const_value_int(0, bit_size);
661 case nir_op_fadd:
662 return const_value_float(0, bit_size);
663 case nir_op_imul:
664 return const_value_int(1, bit_size);
665 case nir_op_fmul:
666 return const_value_float(1, bit_size);
667 case nir_op_imin:
668 return const_value_int(max_int, bit_size);
669 case nir_op_umin:
670 return const_value_int(~0ull, bit_size);
671 case nir_op_fmin:
672 return const_value_float(INFINITY, bit_size);
673 case nir_op_imax:
674 return const_value_int(min_int, bit_size);
675 case nir_op_umax:
676 return const_value_int(0, bit_size);
677 case nir_op_fmax:
678 return const_value_float(-INFINITY, bit_size);
679 case nir_op_iand:
680 return const_value_int(~0ull, bit_size);
681 case nir_op_ior:
682 return const_value_int(0, bit_size);
683 case nir_op_ixor:
684 return const_value_int(0, bit_size);
685 default:
686 unreachable("Invalid reduction operation");
687 }
688 }
689
690 nir_function_impl *
691 nir_cf_node_get_function(nir_cf_node *node)
692 {
693 while (node->type != nir_cf_node_function) {
694 node = node->parent;
695 }
696
697 return nir_cf_node_as_function(node);
698 }
699
700 /* Reduces a cursor by trying to convert everything to after and trying to
701 * go up to block granularity when possible.
702 */
703 static nir_cursor
704 reduce_cursor(nir_cursor cursor)
705 {
706 switch (cursor.option) {
707 case nir_cursor_before_block:
708 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
709 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
710 if (exec_list_is_empty(&cursor.block->instr_list)) {
711 /* Empty block. After is as good as before. */
712 cursor.option = nir_cursor_after_block;
713 }
714 return cursor;
715
716 case nir_cursor_after_block:
717 return cursor;
718
719 case nir_cursor_before_instr: {
720 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
721 if (prev_instr) {
722 /* Before this instruction is after the previous */
723 cursor.instr = prev_instr;
724 cursor.option = nir_cursor_after_instr;
725 } else {
726 /* No previous instruction. Switch to before block */
727 cursor.block = cursor.instr->block;
728 cursor.option = nir_cursor_before_block;
729 }
730 return reduce_cursor(cursor);
731 }
732
733 case nir_cursor_after_instr:
734 if (nir_instr_next(cursor.instr) == NULL) {
735 /* This is the last instruction, switch to after block */
736 cursor.option = nir_cursor_after_block;
737 cursor.block = cursor.instr->block;
738 }
739 return cursor;
740
741 default:
742 unreachable("Inavlid cursor option");
743 }
744 }
745
746 bool
747 nir_cursors_equal(nir_cursor a, nir_cursor b)
748 {
749 /* Reduced cursors should be unique */
750 a = reduce_cursor(a);
751 b = reduce_cursor(b);
752
753 return a.block == b.block && a.option == b.option;
754 }
755
756 static bool
757 add_use_cb(nir_src *src, void *state)
758 {
759 nir_instr *instr = state;
760
761 src->parent_instr = instr;
762 list_addtail(&src->use_link,
763 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
764
765 return true;
766 }
767
768 static bool
769 add_ssa_def_cb(nir_ssa_def *def, void *state)
770 {
771 nir_instr *instr = state;
772
773 if (instr->block && def->index == UINT_MAX) {
774 nir_function_impl *impl =
775 nir_cf_node_get_function(&instr->block->cf_node);
776
777 def->index = impl->ssa_alloc++;
778 }
779
780 return true;
781 }
782
783 static bool
784 add_reg_def_cb(nir_dest *dest, void *state)
785 {
786 nir_instr *instr = state;
787
788 if (!dest->is_ssa) {
789 dest->reg.parent_instr = instr;
790 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
791 }
792
793 return true;
794 }
795
796 static void
797 add_defs_uses(nir_instr *instr)
798 {
799 nir_foreach_src(instr, add_use_cb, instr);
800 nir_foreach_dest(instr, add_reg_def_cb, instr);
801 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
802 }
803
804 void
805 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
806 {
807 switch (cursor.option) {
808 case nir_cursor_before_block:
809 /* Only allow inserting jumps into empty blocks. */
810 if (instr->type == nir_instr_type_jump)
811 assert(exec_list_is_empty(&cursor.block->instr_list));
812
813 instr->block = cursor.block;
814 add_defs_uses(instr);
815 exec_list_push_head(&cursor.block->instr_list, &instr->node);
816 break;
817 case nir_cursor_after_block: {
818 /* Inserting instructions after a jump is illegal. */
819 nir_instr *last = nir_block_last_instr(cursor.block);
820 assert(last == NULL || last->type != nir_instr_type_jump);
821 (void) last;
822
823 instr->block = cursor.block;
824 add_defs_uses(instr);
825 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
826 break;
827 }
828 case nir_cursor_before_instr:
829 assert(instr->type != nir_instr_type_jump);
830 instr->block = cursor.instr->block;
831 add_defs_uses(instr);
832 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
833 break;
834 case nir_cursor_after_instr:
835 /* Inserting instructions after a jump is illegal. */
836 assert(cursor.instr->type != nir_instr_type_jump);
837
838 /* Only allow inserting jumps at the end of the block. */
839 if (instr->type == nir_instr_type_jump)
840 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
841
842 instr->block = cursor.instr->block;
843 add_defs_uses(instr);
844 exec_node_insert_after(&cursor.instr->node, &instr->node);
845 break;
846 }
847
848 if (instr->type == nir_instr_type_jump)
849 nir_handle_add_jump(instr->block);
850 }
851
852 static bool
853 src_is_valid(const nir_src *src)
854 {
855 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
856 }
857
858 static bool
859 remove_use_cb(nir_src *src, void *state)
860 {
861 (void) state;
862
863 if (src_is_valid(src))
864 list_del(&src->use_link);
865
866 return true;
867 }
868
869 static bool
870 remove_def_cb(nir_dest *dest, void *state)
871 {
872 (void) state;
873
874 if (!dest->is_ssa)
875 list_del(&dest->reg.def_link);
876
877 return true;
878 }
879
880 static void
881 remove_defs_uses(nir_instr *instr)
882 {
883 nir_foreach_dest(instr, remove_def_cb, instr);
884 nir_foreach_src(instr, remove_use_cb, instr);
885 }
886
887 void nir_instr_remove_v(nir_instr *instr)
888 {
889 remove_defs_uses(instr);
890 exec_node_remove(&instr->node);
891
892 if (instr->type == nir_instr_type_jump) {
893 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
894 nir_handle_remove_jump(instr->block, jump_instr->type);
895 }
896 }
897
898 /*@}*/
899
900 void
901 nir_index_local_regs(nir_function_impl *impl)
902 {
903 unsigned index = 0;
904 foreach_list_typed(nir_register, reg, node, &impl->registers) {
905 reg->index = index++;
906 }
907 impl->reg_alloc = index;
908 }
909
910 void
911 nir_index_global_regs(nir_shader *shader)
912 {
913 unsigned index = 0;
914 foreach_list_typed(nir_register, reg, node, &shader->registers) {
915 reg->index = index++;
916 }
917 shader->reg_alloc = index;
918 }
919
920 static bool
921 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
922 {
923 return cb(&instr->dest.dest, state);
924 }
925
926 static bool
927 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
928 {
929 return cb(&instr->dest, state);
930 }
931
932 static bool
933 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
934 void *state)
935 {
936 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
937 return cb(&instr->dest, state);
938
939 return true;
940 }
941
942 static bool
943 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
944 void *state)
945 {
946 return cb(&instr->dest, state);
947 }
948
949 static bool
950 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
951 {
952 return cb(&instr->dest, state);
953 }
954
955 static bool
956 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
957 nir_foreach_dest_cb cb, void *state)
958 {
959 nir_foreach_parallel_copy_entry(entry, instr) {
960 if (!cb(&entry->dest, state))
961 return false;
962 }
963
964 return true;
965 }
966
967 bool
968 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
969 {
970 switch (instr->type) {
971 case nir_instr_type_alu:
972 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
973 case nir_instr_type_deref:
974 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
975 case nir_instr_type_intrinsic:
976 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
977 case nir_instr_type_tex:
978 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
979 case nir_instr_type_phi:
980 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
981 case nir_instr_type_parallel_copy:
982 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
983 cb, state);
984
985 case nir_instr_type_load_const:
986 case nir_instr_type_ssa_undef:
987 case nir_instr_type_call:
988 case nir_instr_type_jump:
989 break;
990
991 default:
992 unreachable("Invalid instruction type");
993 break;
994 }
995
996 return true;
997 }
998
999 struct foreach_ssa_def_state {
1000 nir_foreach_ssa_def_cb cb;
1001 void *client_state;
1002 };
1003
1004 static inline bool
1005 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1006 {
1007 struct foreach_ssa_def_state *state = void_state;
1008
1009 if (dest->is_ssa)
1010 return state->cb(&dest->ssa, state->client_state);
1011 else
1012 return true;
1013 }
1014
1015 bool
1016 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1017 {
1018 switch (instr->type) {
1019 case nir_instr_type_alu:
1020 case nir_instr_type_deref:
1021 case nir_instr_type_tex:
1022 case nir_instr_type_intrinsic:
1023 case nir_instr_type_phi:
1024 case nir_instr_type_parallel_copy: {
1025 struct foreach_ssa_def_state foreach_state = {cb, state};
1026 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1027 }
1028
1029 case nir_instr_type_load_const:
1030 return cb(&nir_instr_as_load_const(instr)->def, state);
1031 case nir_instr_type_ssa_undef:
1032 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1033 case nir_instr_type_call:
1034 case nir_instr_type_jump:
1035 return true;
1036 default:
1037 unreachable("Invalid instruction type");
1038 }
1039 }
1040
1041 static bool
1042 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1043 {
1044 if (!cb(src, state))
1045 return false;
1046 if (!src->is_ssa && src->reg.indirect)
1047 return cb(src->reg.indirect, state);
1048 return true;
1049 }
1050
1051 static bool
1052 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1053 {
1054 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1055 if (!visit_src(&instr->src[i].src, cb, state))
1056 return false;
1057
1058 return true;
1059 }
1060
1061 static bool
1062 visit_deref_instr_src(nir_deref_instr *instr,
1063 nir_foreach_src_cb cb, void *state)
1064 {
1065 if (instr->deref_type != nir_deref_type_var) {
1066 if (!visit_src(&instr->parent, cb, state))
1067 return false;
1068 }
1069
1070 if (instr->deref_type == nir_deref_type_array) {
1071 if (!visit_src(&instr->arr.index, cb, state))
1072 return false;
1073 }
1074
1075 return true;
1076 }
1077
1078 static bool
1079 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1080 {
1081 for (unsigned i = 0; i < instr->num_srcs; i++) {
1082 if (!visit_src(&instr->src[i].src, cb, state))
1083 return false;
1084 }
1085
1086 return true;
1087 }
1088
1089 static bool
1090 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1091 void *state)
1092 {
1093 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1094 for (unsigned i = 0; i < num_srcs; i++) {
1095 if (!visit_src(&instr->src[i], cb, state))
1096 return false;
1097 }
1098
1099 return true;
1100 }
1101
1102 static bool
1103 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1104 {
1105 for (unsigned i = 0; i < instr->num_params; i++) {
1106 if (!visit_src(&instr->params[i], cb, state))
1107 return false;
1108 }
1109
1110 return true;
1111 }
1112
1113 static bool
1114 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1115 {
1116 nir_foreach_phi_src(src, instr) {
1117 if (!visit_src(&src->src, cb, state))
1118 return false;
1119 }
1120
1121 return true;
1122 }
1123
1124 static bool
1125 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1126 nir_foreach_src_cb cb, void *state)
1127 {
1128 nir_foreach_parallel_copy_entry(entry, instr) {
1129 if (!visit_src(&entry->src, cb, state))
1130 return false;
1131 }
1132
1133 return true;
1134 }
1135
1136 typedef struct {
1137 void *state;
1138 nir_foreach_src_cb cb;
1139 } visit_dest_indirect_state;
1140
1141 static bool
1142 visit_dest_indirect(nir_dest *dest, void *_state)
1143 {
1144 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1145
1146 if (!dest->is_ssa && dest->reg.indirect)
1147 return state->cb(dest->reg.indirect, state->state);
1148
1149 return true;
1150 }
1151
1152 bool
1153 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1154 {
1155 switch (instr->type) {
1156 case nir_instr_type_alu:
1157 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1158 return false;
1159 break;
1160 case nir_instr_type_deref:
1161 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1162 return false;
1163 break;
1164 case nir_instr_type_intrinsic:
1165 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1166 return false;
1167 break;
1168 case nir_instr_type_tex:
1169 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1170 return false;
1171 break;
1172 case nir_instr_type_call:
1173 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1174 return false;
1175 break;
1176 case nir_instr_type_load_const:
1177 /* Constant load instructions have no regular sources */
1178 break;
1179 case nir_instr_type_phi:
1180 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1181 return false;
1182 break;
1183 case nir_instr_type_parallel_copy:
1184 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1185 cb, state))
1186 return false;
1187 break;
1188 case nir_instr_type_jump:
1189 case nir_instr_type_ssa_undef:
1190 return true;
1191
1192 default:
1193 unreachable("Invalid instruction type");
1194 break;
1195 }
1196
1197 visit_dest_indirect_state dest_state;
1198 dest_state.state = state;
1199 dest_state.cb = cb;
1200 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1201 }
1202
1203 int64_t
1204 nir_src_comp_as_int(nir_src src, unsigned comp)
1205 {
1206 assert(nir_src_is_const(src));
1207 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1208
1209 assert(comp < load->def.num_components);
1210 switch (load->def.bit_size) {
1211 /* int1_t uses 0/-1 convention */
1212 case 1: return -(int)load->value.b[comp];
1213 case 8: return load->value.i8[comp];
1214 case 16: return load->value.i16[comp];
1215 case 32: return load->value.i32[comp];
1216 case 64: return load->value.i64[comp];
1217 default:
1218 unreachable("Invalid bit size");
1219 }
1220 }
1221
1222 uint64_t
1223 nir_src_comp_as_uint(nir_src src, unsigned comp)
1224 {
1225 assert(nir_src_is_const(src));
1226 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1227
1228 assert(comp < load->def.num_components);
1229 switch (load->def.bit_size) {
1230 case 1: return load->value.b[comp];
1231 case 8: return load->value.u8[comp];
1232 case 16: return load->value.u16[comp];
1233 case 32: return load->value.u32[comp];
1234 case 64: return load->value.u64[comp];
1235 default:
1236 unreachable("Invalid bit size");
1237 }
1238 }
1239
1240 bool
1241 nir_src_comp_as_bool(nir_src src, unsigned comp)
1242 {
1243 int64_t i = nir_src_comp_as_int(src, comp);
1244
1245 /* Booleans of any size use 0/-1 convention */
1246 assert(i == 0 || i == -1);
1247
1248 return i;
1249 }
1250
1251 double
1252 nir_src_comp_as_float(nir_src src, unsigned comp)
1253 {
1254 assert(nir_src_is_const(src));
1255 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1256
1257 assert(comp < load->def.num_components);
1258 switch (load->def.bit_size) {
1259 case 16: return _mesa_half_to_float(load->value.u16[comp]);
1260 case 32: return load->value.f32[comp];
1261 case 64: return load->value.f64[comp];
1262 default:
1263 unreachable("Invalid bit size");
1264 }
1265 }
1266
1267 int64_t
1268 nir_src_as_int(nir_src src)
1269 {
1270 assert(nir_src_num_components(src) == 1);
1271 return nir_src_comp_as_int(src, 0);
1272 }
1273
1274 uint64_t
1275 nir_src_as_uint(nir_src src)
1276 {
1277 assert(nir_src_num_components(src) == 1);
1278 return nir_src_comp_as_uint(src, 0);
1279 }
1280
1281 bool
1282 nir_src_as_bool(nir_src src)
1283 {
1284 assert(nir_src_num_components(src) == 1);
1285 return nir_src_comp_as_bool(src, 0);
1286 }
1287
1288 double
1289 nir_src_as_float(nir_src src)
1290 {
1291 assert(nir_src_num_components(src) == 1);
1292 return nir_src_comp_as_float(src, 0);
1293 }
1294
1295 nir_const_value *
1296 nir_src_as_const_value(nir_src src)
1297 {
1298 if (!src.is_ssa)
1299 return NULL;
1300
1301 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1302 return NULL;
1303
1304 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1305
1306 return &load->value;
1307 }
1308
1309 /**
1310 * Returns true if the source is known to be dynamically uniform. Otherwise it
1311 * returns false which means it may or may not be dynamically uniform but it
1312 * can't be determined.
1313 */
1314 bool
1315 nir_src_is_dynamically_uniform(nir_src src)
1316 {
1317 if (!src.is_ssa)
1318 return false;
1319
1320 /* Constants are trivially dynamically uniform */
1321 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1322 return true;
1323
1324 /* As are uniform variables */
1325 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1326 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1327
1328 if (intr->intrinsic == nir_intrinsic_load_uniform)
1329 return true;
1330 }
1331
1332 /* XXX: this could have many more tests, such as when a sampler function is
1333 * called with dynamically uniform arguments.
1334 */
1335 return false;
1336 }
1337
1338 static void
1339 src_remove_all_uses(nir_src *src)
1340 {
1341 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1342 if (!src_is_valid(src))
1343 continue;
1344
1345 list_del(&src->use_link);
1346 }
1347 }
1348
1349 static void
1350 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1351 {
1352 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1353 if (!src_is_valid(src))
1354 continue;
1355
1356 if (parent_instr) {
1357 src->parent_instr = parent_instr;
1358 if (src->is_ssa)
1359 list_addtail(&src->use_link, &src->ssa->uses);
1360 else
1361 list_addtail(&src->use_link, &src->reg.reg->uses);
1362 } else {
1363 assert(parent_if);
1364 src->parent_if = parent_if;
1365 if (src->is_ssa)
1366 list_addtail(&src->use_link, &src->ssa->if_uses);
1367 else
1368 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1369 }
1370 }
1371 }
1372
1373 void
1374 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1375 {
1376 assert(!src_is_valid(src) || src->parent_instr == instr);
1377
1378 src_remove_all_uses(src);
1379 *src = new_src;
1380 src_add_all_uses(src, instr, NULL);
1381 }
1382
1383 void
1384 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1385 {
1386 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1387
1388 src_remove_all_uses(dest);
1389 src_remove_all_uses(src);
1390 *dest = *src;
1391 *src = NIR_SRC_INIT;
1392 src_add_all_uses(dest, dest_instr, NULL);
1393 }
1394
1395 void
1396 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1397 {
1398 nir_src *src = &if_stmt->condition;
1399 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1400
1401 src_remove_all_uses(src);
1402 *src = new_src;
1403 src_add_all_uses(src, NULL, if_stmt);
1404 }
1405
1406 void
1407 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1408 {
1409 if (dest->is_ssa) {
1410 /* We can only overwrite an SSA destination if it has no uses. */
1411 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1412 } else {
1413 list_del(&dest->reg.def_link);
1414 if (dest->reg.indirect)
1415 src_remove_all_uses(dest->reg.indirect);
1416 }
1417
1418 /* We can't re-write with an SSA def */
1419 assert(!new_dest.is_ssa);
1420
1421 nir_dest_copy(dest, &new_dest, instr);
1422
1423 dest->reg.parent_instr = instr;
1424 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1425
1426 if (dest->reg.indirect)
1427 src_add_all_uses(dest->reg.indirect, instr, NULL);
1428 }
1429
1430 /* note: does *not* take ownership of 'name' */
1431 void
1432 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1433 unsigned num_components,
1434 unsigned bit_size, const char *name)
1435 {
1436 def->name = ralloc_strdup(instr, name);
1437 def->parent_instr = instr;
1438 list_inithead(&def->uses);
1439 list_inithead(&def->if_uses);
1440 def->num_components = num_components;
1441 def->bit_size = bit_size;
1442
1443 if (instr->block) {
1444 nir_function_impl *impl =
1445 nir_cf_node_get_function(&instr->block->cf_node);
1446
1447 def->index = impl->ssa_alloc++;
1448 } else {
1449 def->index = UINT_MAX;
1450 }
1451 }
1452
1453 /* note: does *not* take ownership of 'name' */
1454 void
1455 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1456 unsigned num_components, unsigned bit_size,
1457 const char *name)
1458 {
1459 dest->is_ssa = true;
1460 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1461 }
1462
1463 void
1464 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1465 {
1466 assert(!new_src.is_ssa || def != new_src.ssa);
1467
1468 nir_foreach_use_safe(use_src, def)
1469 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1470
1471 nir_foreach_if_use_safe(use_src, def)
1472 nir_if_rewrite_condition(use_src->parent_if, new_src);
1473 }
1474
1475 static bool
1476 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1477 {
1478 assert(start->block == end->block);
1479
1480 if (between->block != start->block)
1481 return false;
1482
1483 /* Search backwards looking for "between" */
1484 while (start != end) {
1485 if (between == end)
1486 return true;
1487
1488 end = nir_instr_prev(end);
1489 assert(end);
1490 }
1491
1492 return false;
1493 }
1494
1495 /* Replaces all uses of the given SSA def with the given source but only if
1496 * the use comes after the after_me instruction. This can be useful if you
1497 * are emitting code to fix up the result of some instruction: you can freely
1498 * use the result in that code and then call rewrite_uses_after and pass the
1499 * last fixup instruction as after_me and it will replace all of the uses you
1500 * want without touching the fixup code.
1501 *
1502 * This function assumes that after_me is in the same block as
1503 * def->parent_instr and that after_me comes after def->parent_instr.
1504 */
1505 void
1506 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1507 nir_instr *after_me)
1508 {
1509 assert(!new_src.is_ssa || def != new_src.ssa);
1510
1511 nir_foreach_use_safe(use_src, def) {
1512 assert(use_src->parent_instr != def->parent_instr);
1513 /* Since def already dominates all of its uses, the only way a use can
1514 * not be dominated by after_me is if it is between def and after_me in
1515 * the instruction list.
1516 */
1517 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1518 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1519 }
1520
1521 nir_foreach_if_use_safe(use_src, def)
1522 nir_if_rewrite_condition(use_src->parent_if, new_src);
1523 }
1524
1525 nir_component_mask_t
1526 nir_ssa_def_components_read(const nir_ssa_def *def)
1527 {
1528 nir_component_mask_t read_mask = 0;
1529 nir_foreach_use(use, def) {
1530 if (use->parent_instr->type == nir_instr_type_alu) {
1531 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1532 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1533 int src_idx = alu_src - &alu->src[0];
1534 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1535 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1536 } else {
1537 return (1 << def->num_components) - 1;
1538 }
1539 }
1540
1541 if (!list_empty(&def->if_uses))
1542 read_mask |= 1;
1543
1544 return read_mask;
1545 }
1546
1547 nir_block *
1548 nir_block_cf_tree_next(nir_block *block)
1549 {
1550 if (block == NULL) {
1551 /* nir_foreach_block_safe() will call this function on a NULL block
1552 * after the last iteration, but it won't use the result so just return
1553 * NULL here.
1554 */
1555 return NULL;
1556 }
1557
1558 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1559 if (cf_next)
1560 return nir_cf_node_cf_tree_first(cf_next);
1561
1562 nir_cf_node *parent = block->cf_node.parent;
1563
1564 switch (parent->type) {
1565 case nir_cf_node_if: {
1566 /* Are we at the end of the if? Go to the beginning of the else */
1567 nir_if *if_stmt = nir_cf_node_as_if(parent);
1568 if (block == nir_if_last_then_block(if_stmt))
1569 return nir_if_first_else_block(if_stmt);
1570
1571 assert(block == nir_if_last_else_block(if_stmt));
1572 /* fall through */
1573 }
1574
1575 case nir_cf_node_loop:
1576 return nir_cf_node_as_block(nir_cf_node_next(parent));
1577
1578 case nir_cf_node_function:
1579 return NULL;
1580
1581 default:
1582 unreachable("unknown cf node type");
1583 }
1584 }
1585
1586 nir_block *
1587 nir_block_cf_tree_prev(nir_block *block)
1588 {
1589 if (block == NULL) {
1590 /* do this for consistency with nir_block_cf_tree_next() */
1591 return NULL;
1592 }
1593
1594 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1595 if (cf_prev)
1596 return nir_cf_node_cf_tree_last(cf_prev);
1597
1598 nir_cf_node *parent = block->cf_node.parent;
1599
1600 switch (parent->type) {
1601 case nir_cf_node_if: {
1602 /* Are we at the beginning of the else? Go to the end of the if */
1603 nir_if *if_stmt = nir_cf_node_as_if(parent);
1604 if (block == nir_if_first_else_block(if_stmt))
1605 return nir_if_last_then_block(if_stmt);
1606
1607 assert(block == nir_if_first_then_block(if_stmt));
1608 /* fall through */
1609 }
1610
1611 case nir_cf_node_loop:
1612 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1613
1614 case nir_cf_node_function:
1615 return NULL;
1616
1617 default:
1618 unreachable("unknown cf node type");
1619 }
1620 }
1621
1622 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1623 {
1624 switch (node->type) {
1625 case nir_cf_node_function: {
1626 nir_function_impl *impl = nir_cf_node_as_function(node);
1627 return nir_start_block(impl);
1628 }
1629
1630 case nir_cf_node_if: {
1631 nir_if *if_stmt = nir_cf_node_as_if(node);
1632 return nir_if_first_then_block(if_stmt);
1633 }
1634
1635 case nir_cf_node_loop: {
1636 nir_loop *loop = nir_cf_node_as_loop(node);
1637 return nir_loop_first_block(loop);
1638 }
1639
1640 case nir_cf_node_block: {
1641 return nir_cf_node_as_block(node);
1642 }
1643
1644 default:
1645 unreachable("unknown node type");
1646 }
1647 }
1648
1649 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1650 {
1651 switch (node->type) {
1652 case nir_cf_node_function: {
1653 nir_function_impl *impl = nir_cf_node_as_function(node);
1654 return nir_impl_last_block(impl);
1655 }
1656
1657 case nir_cf_node_if: {
1658 nir_if *if_stmt = nir_cf_node_as_if(node);
1659 return nir_if_last_else_block(if_stmt);
1660 }
1661
1662 case nir_cf_node_loop: {
1663 nir_loop *loop = nir_cf_node_as_loop(node);
1664 return nir_loop_last_block(loop);
1665 }
1666
1667 case nir_cf_node_block: {
1668 return nir_cf_node_as_block(node);
1669 }
1670
1671 default:
1672 unreachable("unknown node type");
1673 }
1674 }
1675
1676 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1677 {
1678 if (node->type == nir_cf_node_block)
1679 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1680 else if (node->type == nir_cf_node_function)
1681 return NULL;
1682 else
1683 return nir_cf_node_as_block(nir_cf_node_next(node));
1684 }
1685
1686 nir_if *
1687 nir_block_get_following_if(nir_block *block)
1688 {
1689 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1690 return NULL;
1691
1692 if (nir_cf_node_is_last(&block->cf_node))
1693 return NULL;
1694
1695 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1696
1697 if (next_node->type != nir_cf_node_if)
1698 return NULL;
1699
1700 return nir_cf_node_as_if(next_node);
1701 }
1702
1703 nir_loop *
1704 nir_block_get_following_loop(nir_block *block)
1705 {
1706 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1707 return NULL;
1708
1709 if (nir_cf_node_is_last(&block->cf_node))
1710 return NULL;
1711
1712 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1713
1714 if (next_node->type != nir_cf_node_loop)
1715 return NULL;
1716
1717 return nir_cf_node_as_loop(next_node);
1718 }
1719
1720 void
1721 nir_index_blocks(nir_function_impl *impl)
1722 {
1723 unsigned index = 0;
1724
1725 if (impl->valid_metadata & nir_metadata_block_index)
1726 return;
1727
1728 nir_foreach_block(block, impl) {
1729 block->index = index++;
1730 }
1731
1732 /* The end_block isn't really part of the program, which is why its index
1733 * is >= num_blocks.
1734 */
1735 impl->num_blocks = impl->end_block->index = index;
1736 }
1737
1738 static bool
1739 index_ssa_def_cb(nir_ssa_def *def, void *state)
1740 {
1741 unsigned *index = (unsigned *) state;
1742 def->index = (*index)++;
1743
1744 return true;
1745 }
1746
1747 /**
1748 * The indices are applied top-to-bottom which has the very nice property
1749 * that, if A dominates B, then A->index <= B->index.
1750 */
1751 void
1752 nir_index_ssa_defs(nir_function_impl *impl)
1753 {
1754 unsigned index = 0;
1755
1756 nir_foreach_block(block, impl) {
1757 nir_foreach_instr(instr, block)
1758 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1759 }
1760
1761 impl->ssa_alloc = index;
1762 }
1763
1764 /**
1765 * The indices are applied top-to-bottom which has the very nice property
1766 * that, if A dominates B, then A->index <= B->index.
1767 */
1768 unsigned
1769 nir_index_instrs(nir_function_impl *impl)
1770 {
1771 unsigned index = 0;
1772
1773 nir_foreach_block(block, impl) {
1774 nir_foreach_instr(instr, block)
1775 instr->index = index++;
1776 }
1777
1778 return index;
1779 }
1780
1781 nir_intrinsic_op
1782 nir_intrinsic_from_system_value(gl_system_value val)
1783 {
1784 switch (val) {
1785 case SYSTEM_VALUE_VERTEX_ID:
1786 return nir_intrinsic_load_vertex_id;
1787 case SYSTEM_VALUE_INSTANCE_ID:
1788 return nir_intrinsic_load_instance_id;
1789 case SYSTEM_VALUE_DRAW_ID:
1790 return nir_intrinsic_load_draw_id;
1791 case SYSTEM_VALUE_BASE_INSTANCE:
1792 return nir_intrinsic_load_base_instance;
1793 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1794 return nir_intrinsic_load_vertex_id_zero_base;
1795 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1796 return nir_intrinsic_load_is_indexed_draw;
1797 case SYSTEM_VALUE_FIRST_VERTEX:
1798 return nir_intrinsic_load_first_vertex;
1799 case SYSTEM_VALUE_BASE_VERTEX:
1800 return nir_intrinsic_load_base_vertex;
1801 case SYSTEM_VALUE_INVOCATION_ID:
1802 return nir_intrinsic_load_invocation_id;
1803 case SYSTEM_VALUE_FRAG_COORD:
1804 return nir_intrinsic_load_frag_coord;
1805 case SYSTEM_VALUE_FRONT_FACE:
1806 return nir_intrinsic_load_front_face;
1807 case SYSTEM_VALUE_SAMPLE_ID:
1808 return nir_intrinsic_load_sample_id;
1809 case SYSTEM_VALUE_SAMPLE_POS:
1810 return nir_intrinsic_load_sample_pos;
1811 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1812 return nir_intrinsic_load_sample_mask_in;
1813 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1814 return nir_intrinsic_load_local_invocation_id;
1815 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1816 return nir_intrinsic_load_local_invocation_index;
1817 case SYSTEM_VALUE_WORK_GROUP_ID:
1818 return nir_intrinsic_load_work_group_id;
1819 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1820 return nir_intrinsic_load_num_work_groups;
1821 case SYSTEM_VALUE_PRIMITIVE_ID:
1822 return nir_intrinsic_load_primitive_id;
1823 case SYSTEM_VALUE_TESS_COORD:
1824 return nir_intrinsic_load_tess_coord;
1825 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1826 return nir_intrinsic_load_tess_level_outer;
1827 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1828 return nir_intrinsic_load_tess_level_inner;
1829 case SYSTEM_VALUE_VERTICES_IN:
1830 return nir_intrinsic_load_patch_vertices_in;
1831 case SYSTEM_VALUE_HELPER_INVOCATION:
1832 return nir_intrinsic_load_helper_invocation;
1833 case SYSTEM_VALUE_VIEW_INDEX:
1834 return nir_intrinsic_load_view_index;
1835 case SYSTEM_VALUE_SUBGROUP_SIZE:
1836 return nir_intrinsic_load_subgroup_size;
1837 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1838 return nir_intrinsic_load_subgroup_invocation;
1839 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1840 return nir_intrinsic_load_subgroup_eq_mask;
1841 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1842 return nir_intrinsic_load_subgroup_ge_mask;
1843 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1844 return nir_intrinsic_load_subgroup_gt_mask;
1845 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1846 return nir_intrinsic_load_subgroup_le_mask;
1847 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1848 return nir_intrinsic_load_subgroup_lt_mask;
1849 case SYSTEM_VALUE_NUM_SUBGROUPS:
1850 return nir_intrinsic_load_num_subgroups;
1851 case SYSTEM_VALUE_SUBGROUP_ID:
1852 return nir_intrinsic_load_subgroup_id;
1853 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1854 return nir_intrinsic_load_local_group_size;
1855 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1856 return nir_intrinsic_load_global_invocation_id;
1857 case SYSTEM_VALUE_WORK_DIM:
1858 return nir_intrinsic_load_work_dim;
1859 default:
1860 unreachable("system value does not directly correspond to intrinsic");
1861 }
1862 }
1863
1864 gl_system_value
1865 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1866 {
1867 switch (intrin) {
1868 case nir_intrinsic_load_vertex_id:
1869 return SYSTEM_VALUE_VERTEX_ID;
1870 case nir_intrinsic_load_instance_id:
1871 return SYSTEM_VALUE_INSTANCE_ID;
1872 case nir_intrinsic_load_draw_id:
1873 return SYSTEM_VALUE_DRAW_ID;
1874 case nir_intrinsic_load_base_instance:
1875 return SYSTEM_VALUE_BASE_INSTANCE;
1876 case nir_intrinsic_load_vertex_id_zero_base:
1877 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1878 case nir_intrinsic_load_first_vertex:
1879 return SYSTEM_VALUE_FIRST_VERTEX;
1880 case nir_intrinsic_load_is_indexed_draw:
1881 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1882 case nir_intrinsic_load_base_vertex:
1883 return SYSTEM_VALUE_BASE_VERTEX;
1884 case nir_intrinsic_load_invocation_id:
1885 return SYSTEM_VALUE_INVOCATION_ID;
1886 case nir_intrinsic_load_frag_coord:
1887 return SYSTEM_VALUE_FRAG_COORD;
1888 case nir_intrinsic_load_front_face:
1889 return SYSTEM_VALUE_FRONT_FACE;
1890 case nir_intrinsic_load_sample_id:
1891 return SYSTEM_VALUE_SAMPLE_ID;
1892 case nir_intrinsic_load_sample_pos:
1893 return SYSTEM_VALUE_SAMPLE_POS;
1894 case nir_intrinsic_load_sample_mask_in:
1895 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1896 case nir_intrinsic_load_local_invocation_id:
1897 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1898 case nir_intrinsic_load_local_invocation_index:
1899 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1900 case nir_intrinsic_load_num_work_groups:
1901 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1902 case nir_intrinsic_load_work_group_id:
1903 return SYSTEM_VALUE_WORK_GROUP_ID;
1904 case nir_intrinsic_load_primitive_id:
1905 return SYSTEM_VALUE_PRIMITIVE_ID;
1906 case nir_intrinsic_load_tess_coord:
1907 return SYSTEM_VALUE_TESS_COORD;
1908 case nir_intrinsic_load_tess_level_outer:
1909 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1910 case nir_intrinsic_load_tess_level_inner:
1911 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1912 case nir_intrinsic_load_patch_vertices_in:
1913 return SYSTEM_VALUE_VERTICES_IN;
1914 case nir_intrinsic_load_helper_invocation:
1915 return SYSTEM_VALUE_HELPER_INVOCATION;
1916 case nir_intrinsic_load_view_index:
1917 return SYSTEM_VALUE_VIEW_INDEX;
1918 case nir_intrinsic_load_subgroup_size:
1919 return SYSTEM_VALUE_SUBGROUP_SIZE;
1920 case nir_intrinsic_load_subgroup_invocation:
1921 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1922 case nir_intrinsic_load_subgroup_eq_mask:
1923 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1924 case nir_intrinsic_load_subgroup_ge_mask:
1925 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1926 case nir_intrinsic_load_subgroup_gt_mask:
1927 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1928 case nir_intrinsic_load_subgroup_le_mask:
1929 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1930 case nir_intrinsic_load_subgroup_lt_mask:
1931 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1932 case nir_intrinsic_load_num_subgroups:
1933 return SYSTEM_VALUE_NUM_SUBGROUPS;
1934 case nir_intrinsic_load_subgroup_id:
1935 return SYSTEM_VALUE_SUBGROUP_ID;
1936 case nir_intrinsic_load_local_group_size:
1937 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1938 case nir_intrinsic_load_global_invocation_id:
1939 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1940 default:
1941 unreachable("intrinsic doesn't produce a system value");
1942 }
1943 }
1944
1945 /* OpenGL utility method that remaps the location attributes if they are
1946 * doubles. Not needed for vulkan due the differences on the input location
1947 * count for doubles on vulkan vs OpenGL
1948 *
1949 * The bitfield returned in dual_slot is one bit for each double input slot in
1950 * the original OpenGL single-slot input numbering. The mapping from old
1951 * locations to new locations is as follows:
1952 *
1953 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1954 */
1955 void
1956 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1957 {
1958 assert(shader->info.stage == MESA_SHADER_VERTEX);
1959
1960 *dual_slot = 0;
1961 nir_foreach_variable(var, &shader->inputs) {
1962 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1963 unsigned slots = glsl_count_attribute_slots(var->type, true);
1964 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1965 }
1966 }
1967
1968 nir_foreach_variable(var, &shader->inputs) {
1969 var->data.location +=
1970 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1971 }
1972 }
1973
1974 /* Returns an attribute mask that has been re-compacted using the given
1975 * dual_slot mask.
1976 */
1977 uint64_t
1978 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1979 {
1980 while (dual_slot) {
1981 unsigned loc = u_bit_scan64(&dual_slot);
1982 /* mask of all bits up to and including loc */
1983 uint64_t mask = BITFIELD64_MASK(loc + 1);
1984 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
1985 }
1986 return attribs;
1987 }