nir: rename nir_var_function to nir_var_function_temp
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->registers);
62 exec_list_make_empty(&shader->globals);
63 exec_list_make_empty(&shader->system_values);
64 shader->reg_alloc = 0;
65
66 shader->num_inputs = 0;
67 shader->num_outputs = 0;
68 shader->num_uniforms = 0;
69 shader->num_shared = 0;
70
71 return shader;
72 }
73
74 static nir_register *
75 reg_create(void *mem_ctx, struct exec_list *list)
76 {
77 nir_register *reg = ralloc(mem_ctx, nir_register);
78
79 list_inithead(&reg->uses);
80 list_inithead(&reg->defs);
81 list_inithead(&reg->if_uses);
82
83 reg->num_components = 0;
84 reg->bit_size = 32;
85 reg->num_array_elems = 0;
86 reg->is_packed = false;
87 reg->name = NULL;
88
89 exec_list_push_tail(list, &reg->node);
90
91 return reg;
92 }
93
94 nir_register *
95 nir_global_reg_create(nir_shader *shader)
96 {
97 nir_register *reg = reg_create(shader, &shader->registers);
98 reg->index = shader->reg_alloc++;
99 reg->is_global = true;
100
101 return reg;
102 }
103
104 nir_register *
105 nir_local_reg_create(nir_function_impl *impl)
106 {
107 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
108 reg->index = impl->reg_alloc++;
109 reg->is_global = false;
110
111 return reg;
112 }
113
114 void
115 nir_reg_remove(nir_register *reg)
116 {
117 exec_node_remove(&reg->node);
118 }
119
120 void
121 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
122 {
123 switch (var->data.mode) {
124 case nir_var_all:
125 assert(!"invalid mode");
126 break;
127
128 case nir_var_function_temp:
129 assert(!"nir_shader_add_variable cannot be used for local variables");
130 break;
131
132 case nir_var_shader_temp:
133 exec_list_push_tail(&shader->globals, &var->node);
134 break;
135
136 case nir_var_shader_in:
137 exec_list_push_tail(&shader->inputs, &var->node);
138 break;
139
140 case nir_var_shader_out:
141 exec_list_push_tail(&shader->outputs, &var->node);
142 break;
143
144 case nir_var_uniform:
145 case nir_var_ubo:
146 case nir_var_ssbo:
147 exec_list_push_tail(&shader->uniforms, &var->node);
148 break;
149
150 case nir_var_shared:
151 assert(shader->info.stage == MESA_SHADER_COMPUTE);
152 exec_list_push_tail(&shader->shared, &var->node);
153 break;
154
155 case nir_var_system_value:
156 exec_list_push_tail(&shader->system_values, &var->node);
157 break;
158 }
159 }
160
161 nir_variable *
162 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
163 const struct glsl_type *type, const char *name)
164 {
165 nir_variable *var = rzalloc(shader, nir_variable);
166 var->name = ralloc_strdup(var, name);
167 var->type = type;
168 var->data.mode = mode;
169 var->data.how_declared = nir_var_declared_normally;
170
171 if ((mode == nir_var_shader_in &&
172 shader->info.stage != MESA_SHADER_VERTEX) ||
173 (mode == nir_var_shader_out &&
174 shader->info.stage != MESA_SHADER_FRAGMENT))
175 var->data.interpolation = INTERP_MODE_SMOOTH;
176
177 if (mode == nir_var_shader_in || mode == nir_var_uniform)
178 var->data.read_only = true;
179
180 nir_shader_add_variable(shader, var);
181
182 return var;
183 }
184
185 nir_variable *
186 nir_local_variable_create(nir_function_impl *impl,
187 const struct glsl_type *type, const char *name)
188 {
189 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
190 var->name = ralloc_strdup(var, name);
191 var->type = type;
192 var->data.mode = nir_var_function_temp;
193
194 nir_function_impl_add_variable(impl, var);
195
196 return var;
197 }
198
199 nir_function *
200 nir_function_create(nir_shader *shader, const char *name)
201 {
202 nir_function *func = ralloc(shader, nir_function);
203
204 exec_list_push_tail(&shader->functions, &func->node);
205
206 func->name = ralloc_strdup(func, name);
207 func->shader = shader;
208 func->num_params = 0;
209 func->params = NULL;
210 func->impl = NULL;
211 func->is_entrypoint = false;
212
213 return func;
214 }
215
216 /* NOTE: if the instruction you are copying a src to is already added
217 * to the IR, use nir_instr_rewrite_src() instead.
218 */
219 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
220 {
221 dest->is_ssa = src->is_ssa;
222 if (src->is_ssa) {
223 dest->ssa = src->ssa;
224 } else {
225 dest->reg.base_offset = src->reg.base_offset;
226 dest->reg.reg = src->reg.reg;
227 if (src->reg.indirect) {
228 dest->reg.indirect = ralloc(mem_ctx, nir_src);
229 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
230 } else {
231 dest->reg.indirect = NULL;
232 }
233 }
234 }
235
236 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
237 {
238 /* Copying an SSA definition makes no sense whatsoever. */
239 assert(!src->is_ssa);
240
241 dest->is_ssa = false;
242
243 dest->reg.base_offset = src->reg.base_offset;
244 dest->reg.reg = src->reg.reg;
245 if (src->reg.indirect) {
246 dest->reg.indirect = ralloc(instr, nir_src);
247 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
248 } else {
249 dest->reg.indirect = NULL;
250 }
251 }
252
253 void
254 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
255 nir_alu_instr *instr)
256 {
257 nir_src_copy(&dest->src, &src->src, &instr->instr);
258 dest->abs = src->abs;
259 dest->negate = src->negate;
260 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
261 dest->swizzle[i] = src->swizzle[i];
262 }
263
264 void
265 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
266 nir_alu_instr *instr)
267 {
268 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
269 dest->write_mask = src->write_mask;
270 dest->saturate = src->saturate;
271 }
272
273
274 static void
275 cf_init(nir_cf_node *node, nir_cf_node_type type)
276 {
277 exec_node_init(&node->node);
278 node->parent = NULL;
279 node->type = type;
280 }
281
282 nir_function_impl *
283 nir_function_impl_create_bare(nir_shader *shader)
284 {
285 nir_function_impl *impl = ralloc(shader, nir_function_impl);
286
287 impl->function = NULL;
288
289 cf_init(&impl->cf_node, nir_cf_node_function);
290
291 exec_list_make_empty(&impl->body);
292 exec_list_make_empty(&impl->registers);
293 exec_list_make_empty(&impl->locals);
294 impl->reg_alloc = 0;
295 impl->ssa_alloc = 0;
296 impl->valid_metadata = nir_metadata_none;
297
298 /* create start & end blocks */
299 nir_block *start_block = nir_block_create(shader);
300 nir_block *end_block = nir_block_create(shader);
301 start_block->cf_node.parent = &impl->cf_node;
302 end_block->cf_node.parent = &impl->cf_node;
303 impl->end_block = end_block;
304
305 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
306
307 start_block->successors[0] = end_block;
308 _mesa_set_add(end_block->predecessors, start_block);
309 return impl;
310 }
311
312 nir_function_impl *
313 nir_function_impl_create(nir_function *function)
314 {
315 assert(function->impl == NULL);
316
317 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
318
319 function->impl = impl;
320 impl->function = function;
321
322 return impl;
323 }
324
325 nir_block *
326 nir_block_create(nir_shader *shader)
327 {
328 nir_block *block = rzalloc(shader, nir_block);
329
330 cf_init(&block->cf_node, nir_cf_node_block);
331
332 block->successors[0] = block->successors[1] = NULL;
333 block->predecessors = _mesa_pointer_set_create(block);
334 block->imm_dom = NULL;
335 /* XXX maybe it would be worth it to defer allocation? This
336 * way it doesn't get allocated for shader refs that never run
337 * nir_calc_dominance? For example, state-tracker creates an
338 * initial IR, clones that, runs appropriate lowering pass, passes
339 * to driver which does common lowering/opt, and then stores ref
340 * which is later used to do state specific lowering and futher
341 * opt. Do any of the references not need dominance metadata?
342 */
343 block->dom_frontier = _mesa_pointer_set_create(block);
344
345 exec_list_make_empty(&block->instr_list);
346
347 return block;
348 }
349
350 static inline void
351 src_init(nir_src *src)
352 {
353 src->is_ssa = false;
354 src->reg.reg = NULL;
355 src->reg.indirect = NULL;
356 src->reg.base_offset = 0;
357 }
358
359 nir_if *
360 nir_if_create(nir_shader *shader)
361 {
362 nir_if *if_stmt = ralloc(shader, nir_if);
363
364 cf_init(&if_stmt->cf_node, nir_cf_node_if);
365 src_init(&if_stmt->condition);
366
367 nir_block *then = nir_block_create(shader);
368 exec_list_make_empty(&if_stmt->then_list);
369 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
370 then->cf_node.parent = &if_stmt->cf_node;
371
372 nir_block *else_stmt = nir_block_create(shader);
373 exec_list_make_empty(&if_stmt->else_list);
374 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
375 else_stmt->cf_node.parent = &if_stmt->cf_node;
376
377 return if_stmt;
378 }
379
380 nir_loop *
381 nir_loop_create(nir_shader *shader)
382 {
383 nir_loop *loop = rzalloc(shader, nir_loop);
384
385 cf_init(&loop->cf_node, nir_cf_node_loop);
386
387 nir_block *body = nir_block_create(shader);
388 exec_list_make_empty(&loop->body);
389 exec_list_push_tail(&loop->body, &body->cf_node.node);
390 body->cf_node.parent = &loop->cf_node;
391
392 body->successors[0] = body;
393 _mesa_set_add(body->predecessors, body);
394
395 return loop;
396 }
397
398 static void
399 instr_init(nir_instr *instr, nir_instr_type type)
400 {
401 instr->type = type;
402 instr->block = NULL;
403 exec_node_init(&instr->node);
404 }
405
406 static void
407 dest_init(nir_dest *dest)
408 {
409 dest->is_ssa = false;
410 dest->reg.reg = NULL;
411 dest->reg.indirect = NULL;
412 dest->reg.base_offset = 0;
413 }
414
415 static void
416 alu_dest_init(nir_alu_dest *dest)
417 {
418 dest_init(&dest->dest);
419 dest->saturate = false;
420 dest->write_mask = 0xf;
421 }
422
423 static void
424 alu_src_init(nir_alu_src *src)
425 {
426 src_init(&src->src);
427 src->abs = src->negate = false;
428 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
429 src->swizzle[i] = i;
430 }
431
432 nir_alu_instr *
433 nir_alu_instr_create(nir_shader *shader, nir_op op)
434 {
435 unsigned num_srcs = nir_op_infos[op].num_inputs;
436 /* TODO: don't use rzalloc */
437 nir_alu_instr *instr =
438 rzalloc_size(shader,
439 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
440
441 instr_init(&instr->instr, nir_instr_type_alu);
442 instr->op = op;
443 alu_dest_init(&instr->dest);
444 for (unsigned i = 0; i < num_srcs; i++)
445 alu_src_init(&instr->src[i]);
446
447 return instr;
448 }
449
450 nir_deref_instr *
451 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
452 {
453 nir_deref_instr *instr =
454 rzalloc_size(shader, sizeof(nir_deref_instr));
455
456 instr_init(&instr->instr, nir_instr_type_deref);
457
458 instr->deref_type = deref_type;
459 if (deref_type != nir_deref_type_var)
460 src_init(&instr->parent);
461
462 if (deref_type == nir_deref_type_array ||
463 deref_type == nir_deref_type_ptr_as_array)
464 src_init(&instr->arr.index);
465
466 dest_init(&instr->dest);
467
468 return instr;
469 }
470
471 nir_jump_instr *
472 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
473 {
474 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
475 instr_init(&instr->instr, nir_instr_type_jump);
476 instr->type = type;
477 return instr;
478 }
479
480 nir_load_const_instr *
481 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
482 unsigned bit_size)
483 {
484 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
485 instr_init(&instr->instr, nir_instr_type_load_const);
486
487 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
488
489 return instr;
490 }
491
492 nir_intrinsic_instr *
493 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
494 {
495 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
496 /* TODO: don't use rzalloc */
497 nir_intrinsic_instr *instr =
498 rzalloc_size(shader,
499 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
500
501 instr_init(&instr->instr, nir_instr_type_intrinsic);
502 instr->intrinsic = op;
503
504 if (nir_intrinsic_infos[op].has_dest)
505 dest_init(&instr->dest);
506
507 for (unsigned i = 0; i < num_srcs; i++)
508 src_init(&instr->src[i]);
509
510 return instr;
511 }
512
513 nir_call_instr *
514 nir_call_instr_create(nir_shader *shader, nir_function *callee)
515 {
516 const unsigned num_params = callee->num_params;
517 nir_call_instr *instr =
518 rzalloc_size(shader, sizeof(*instr) +
519 num_params * sizeof(instr->params[0]));
520
521 instr_init(&instr->instr, nir_instr_type_call);
522 instr->callee = callee;
523 instr->num_params = num_params;
524 for (unsigned i = 0; i < num_params; i++)
525 src_init(&instr->params[i]);
526
527 return instr;
528 }
529
530 nir_tex_instr *
531 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
532 {
533 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
534 instr_init(&instr->instr, nir_instr_type_tex);
535
536 dest_init(&instr->dest);
537
538 instr->num_srcs = num_srcs;
539 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
540 for (unsigned i = 0; i < num_srcs; i++)
541 src_init(&instr->src[i].src);
542
543 instr->texture_index = 0;
544 instr->texture_array_size = 0;
545 instr->sampler_index = 0;
546
547 return instr;
548 }
549
550 void
551 nir_tex_instr_add_src(nir_tex_instr *tex,
552 nir_tex_src_type src_type,
553 nir_src src)
554 {
555 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
556 tex->num_srcs + 1);
557
558 for (unsigned i = 0; i < tex->num_srcs; i++) {
559 new_srcs[i].src_type = tex->src[i].src_type;
560 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
561 &tex->src[i].src);
562 }
563
564 ralloc_free(tex->src);
565 tex->src = new_srcs;
566
567 tex->src[tex->num_srcs].src_type = src_type;
568 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
569 tex->num_srcs++;
570 }
571
572 void
573 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
574 {
575 assert(src_idx < tex->num_srcs);
576
577 /* First rewrite the source to NIR_SRC_INIT */
578 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
579
580 /* Now, move all of the other sources down */
581 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
582 tex->src[i-1].src_type = tex->src[i].src_type;
583 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
584 }
585 tex->num_srcs--;
586 }
587
588 nir_phi_instr *
589 nir_phi_instr_create(nir_shader *shader)
590 {
591 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
592 instr_init(&instr->instr, nir_instr_type_phi);
593
594 dest_init(&instr->dest);
595 exec_list_make_empty(&instr->srcs);
596 return instr;
597 }
598
599 nir_parallel_copy_instr *
600 nir_parallel_copy_instr_create(nir_shader *shader)
601 {
602 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
603 instr_init(&instr->instr, nir_instr_type_parallel_copy);
604
605 exec_list_make_empty(&instr->entries);
606
607 return instr;
608 }
609
610 nir_ssa_undef_instr *
611 nir_ssa_undef_instr_create(nir_shader *shader,
612 unsigned num_components,
613 unsigned bit_size)
614 {
615 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
616 instr_init(&instr->instr, nir_instr_type_ssa_undef);
617
618 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
619
620 return instr;
621 }
622
623 static nir_const_value
624 const_value_float(double d, unsigned bit_size)
625 {
626 nir_const_value v;
627 switch (bit_size) {
628 case 16: v.u16[0] = _mesa_float_to_half(d); break;
629 case 32: v.f32[0] = d; break;
630 case 64: v.f64[0] = d; break;
631 default:
632 unreachable("Invalid bit size");
633 }
634 return v;
635 }
636
637 static nir_const_value
638 const_value_int(int64_t i, unsigned bit_size)
639 {
640 nir_const_value v;
641 switch (bit_size) {
642 case 1: v.b[0] = i & 1; break;
643 case 8: v.i8[0] = i; break;
644 case 16: v.i16[0] = i; break;
645 case 32: v.i32[0] = i; break;
646 case 64: v.i64[0] = i; break;
647 default:
648 unreachable("Invalid bit size");
649 }
650 return v;
651 }
652
653 nir_const_value
654 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
655 {
656 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
657 const int64_t min_int = -max_int - 1;
658 switch (binop) {
659 case nir_op_iadd:
660 return const_value_int(0, bit_size);
661 case nir_op_fadd:
662 return const_value_float(0, bit_size);
663 case nir_op_imul:
664 return const_value_int(1, bit_size);
665 case nir_op_fmul:
666 return const_value_float(1, bit_size);
667 case nir_op_imin:
668 return const_value_int(max_int, bit_size);
669 case nir_op_umin:
670 return const_value_int(~0ull, bit_size);
671 case nir_op_fmin:
672 return const_value_float(INFINITY, bit_size);
673 case nir_op_imax:
674 return const_value_int(min_int, bit_size);
675 case nir_op_umax:
676 return const_value_int(0, bit_size);
677 case nir_op_fmax:
678 return const_value_float(-INFINITY, bit_size);
679 case nir_op_iand:
680 return const_value_int(~0ull, bit_size);
681 case nir_op_ior:
682 return const_value_int(0, bit_size);
683 case nir_op_ixor:
684 return const_value_int(0, bit_size);
685 default:
686 unreachable("Invalid reduction operation");
687 }
688 }
689
690 nir_function_impl *
691 nir_cf_node_get_function(nir_cf_node *node)
692 {
693 while (node->type != nir_cf_node_function) {
694 node = node->parent;
695 }
696
697 return nir_cf_node_as_function(node);
698 }
699
700 /* Reduces a cursor by trying to convert everything to after and trying to
701 * go up to block granularity when possible.
702 */
703 static nir_cursor
704 reduce_cursor(nir_cursor cursor)
705 {
706 switch (cursor.option) {
707 case nir_cursor_before_block:
708 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
709 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
710 if (exec_list_is_empty(&cursor.block->instr_list)) {
711 /* Empty block. After is as good as before. */
712 cursor.option = nir_cursor_after_block;
713 }
714 return cursor;
715
716 case nir_cursor_after_block:
717 return cursor;
718
719 case nir_cursor_before_instr: {
720 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
721 if (prev_instr) {
722 /* Before this instruction is after the previous */
723 cursor.instr = prev_instr;
724 cursor.option = nir_cursor_after_instr;
725 } else {
726 /* No previous instruction. Switch to before block */
727 cursor.block = cursor.instr->block;
728 cursor.option = nir_cursor_before_block;
729 }
730 return reduce_cursor(cursor);
731 }
732
733 case nir_cursor_after_instr:
734 if (nir_instr_next(cursor.instr) == NULL) {
735 /* This is the last instruction, switch to after block */
736 cursor.option = nir_cursor_after_block;
737 cursor.block = cursor.instr->block;
738 }
739 return cursor;
740
741 default:
742 unreachable("Inavlid cursor option");
743 }
744 }
745
746 bool
747 nir_cursors_equal(nir_cursor a, nir_cursor b)
748 {
749 /* Reduced cursors should be unique */
750 a = reduce_cursor(a);
751 b = reduce_cursor(b);
752
753 return a.block == b.block && a.option == b.option;
754 }
755
756 static bool
757 add_use_cb(nir_src *src, void *state)
758 {
759 nir_instr *instr = state;
760
761 src->parent_instr = instr;
762 list_addtail(&src->use_link,
763 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
764
765 return true;
766 }
767
768 static bool
769 add_ssa_def_cb(nir_ssa_def *def, void *state)
770 {
771 nir_instr *instr = state;
772
773 if (instr->block && def->index == UINT_MAX) {
774 nir_function_impl *impl =
775 nir_cf_node_get_function(&instr->block->cf_node);
776
777 def->index = impl->ssa_alloc++;
778 }
779
780 return true;
781 }
782
783 static bool
784 add_reg_def_cb(nir_dest *dest, void *state)
785 {
786 nir_instr *instr = state;
787
788 if (!dest->is_ssa) {
789 dest->reg.parent_instr = instr;
790 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
791 }
792
793 return true;
794 }
795
796 static void
797 add_defs_uses(nir_instr *instr)
798 {
799 nir_foreach_src(instr, add_use_cb, instr);
800 nir_foreach_dest(instr, add_reg_def_cb, instr);
801 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
802 }
803
804 void
805 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
806 {
807 switch (cursor.option) {
808 case nir_cursor_before_block:
809 /* Only allow inserting jumps into empty blocks. */
810 if (instr->type == nir_instr_type_jump)
811 assert(exec_list_is_empty(&cursor.block->instr_list));
812
813 instr->block = cursor.block;
814 add_defs_uses(instr);
815 exec_list_push_head(&cursor.block->instr_list, &instr->node);
816 break;
817 case nir_cursor_after_block: {
818 /* Inserting instructions after a jump is illegal. */
819 nir_instr *last = nir_block_last_instr(cursor.block);
820 assert(last == NULL || last->type != nir_instr_type_jump);
821 (void) last;
822
823 instr->block = cursor.block;
824 add_defs_uses(instr);
825 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
826 break;
827 }
828 case nir_cursor_before_instr:
829 assert(instr->type != nir_instr_type_jump);
830 instr->block = cursor.instr->block;
831 add_defs_uses(instr);
832 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
833 break;
834 case nir_cursor_after_instr:
835 /* Inserting instructions after a jump is illegal. */
836 assert(cursor.instr->type != nir_instr_type_jump);
837
838 /* Only allow inserting jumps at the end of the block. */
839 if (instr->type == nir_instr_type_jump)
840 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
841
842 instr->block = cursor.instr->block;
843 add_defs_uses(instr);
844 exec_node_insert_after(&cursor.instr->node, &instr->node);
845 break;
846 }
847
848 if (instr->type == nir_instr_type_jump)
849 nir_handle_add_jump(instr->block);
850 }
851
852 static bool
853 src_is_valid(const nir_src *src)
854 {
855 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
856 }
857
858 static bool
859 remove_use_cb(nir_src *src, void *state)
860 {
861 (void) state;
862
863 if (src_is_valid(src))
864 list_del(&src->use_link);
865
866 return true;
867 }
868
869 static bool
870 remove_def_cb(nir_dest *dest, void *state)
871 {
872 (void) state;
873
874 if (!dest->is_ssa)
875 list_del(&dest->reg.def_link);
876
877 return true;
878 }
879
880 static void
881 remove_defs_uses(nir_instr *instr)
882 {
883 nir_foreach_dest(instr, remove_def_cb, instr);
884 nir_foreach_src(instr, remove_use_cb, instr);
885 }
886
887 void nir_instr_remove_v(nir_instr *instr)
888 {
889 remove_defs_uses(instr);
890 exec_node_remove(&instr->node);
891
892 if (instr->type == nir_instr_type_jump) {
893 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
894 nir_handle_remove_jump(instr->block, jump_instr->type);
895 }
896 }
897
898 /*@}*/
899
900 void
901 nir_index_local_regs(nir_function_impl *impl)
902 {
903 unsigned index = 0;
904 foreach_list_typed(nir_register, reg, node, &impl->registers) {
905 reg->index = index++;
906 }
907 impl->reg_alloc = index;
908 }
909
910 void
911 nir_index_global_regs(nir_shader *shader)
912 {
913 unsigned index = 0;
914 foreach_list_typed(nir_register, reg, node, &shader->registers) {
915 reg->index = index++;
916 }
917 shader->reg_alloc = index;
918 }
919
920 static bool
921 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
922 {
923 return cb(&instr->dest.dest, state);
924 }
925
926 static bool
927 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
928 {
929 return cb(&instr->dest, state);
930 }
931
932 static bool
933 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
934 void *state)
935 {
936 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
937 return cb(&instr->dest, state);
938
939 return true;
940 }
941
942 static bool
943 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
944 void *state)
945 {
946 return cb(&instr->dest, state);
947 }
948
949 static bool
950 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
951 {
952 return cb(&instr->dest, state);
953 }
954
955 static bool
956 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
957 nir_foreach_dest_cb cb, void *state)
958 {
959 nir_foreach_parallel_copy_entry(entry, instr) {
960 if (!cb(&entry->dest, state))
961 return false;
962 }
963
964 return true;
965 }
966
967 bool
968 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
969 {
970 switch (instr->type) {
971 case nir_instr_type_alu:
972 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
973 case nir_instr_type_deref:
974 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
975 case nir_instr_type_intrinsic:
976 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
977 case nir_instr_type_tex:
978 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
979 case nir_instr_type_phi:
980 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
981 case nir_instr_type_parallel_copy:
982 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
983 cb, state);
984
985 case nir_instr_type_load_const:
986 case nir_instr_type_ssa_undef:
987 case nir_instr_type_call:
988 case nir_instr_type_jump:
989 break;
990
991 default:
992 unreachable("Invalid instruction type");
993 break;
994 }
995
996 return true;
997 }
998
999 struct foreach_ssa_def_state {
1000 nir_foreach_ssa_def_cb cb;
1001 void *client_state;
1002 };
1003
1004 static inline bool
1005 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1006 {
1007 struct foreach_ssa_def_state *state = void_state;
1008
1009 if (dest->is_ssa)
1010 return state->cb(&dest->ssa, state->client_state);
1011 else
1012 return true;
1013 }
1014
1015 bool
1016 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1017 {
1018 switch (instr->type) {
1019 case nir_instr_type_alu:
1020 case nir_instr_type_deref:
1021 case nir_instr_type_tex:
1022 case nir_instr_type_intrinsic:
1023 case nir_instr_type_phi:
1024 case nir_instr_type_parallel_copy: {
1025 struct foreach_ssa_def_state foreach_state = {cb, state};
1026 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1027 }
1028
1029 case nir_instr_type_load_const:
1030 return cb(&nir_instr_as_load_const(instr)->def, state);
1031 case nir_instr_type_ssa_undef:
1032 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1033 case nir_instr_type_call:
1034 case nir_instr_type_jump:
1035 return true;
1036 default:
1037 unreachable("Invalid instruction type");
1038 }
1039 }
1040
1041 static bool
1042 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1043 {
1044 if (!cb(src, state))
1045 return false;
1046 if (!src->is_ssa && src->reg.indirect)
1047 return cb(src->reg.indirect, state);
1048 return true;
1049 }
1050
1051 static bool
1052 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1053 {
1054 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1055 if (!visit_src(&instr->src[i].src, cb, state))
1056 return false;
1057
1058 return true;
1059 }
1060
1061 static bool
1062 visit_deref_instr_src(nir_deref_instr *instr,
1063 nir_foreach_src_cb cb, void *state)
1064 {
1065 if (instr->deref_type != nir_deref_type_var) {
1066 if (!visit_src(&instr->parent, cb, state))
1067 return false;
1068 }
1069
1070 if (instr->deref_type == nir_deref_type_array ||
1071 instr->deref_type == nir_deref_type_ptr_as_array) {
1072 if (!visit_src(&instr->arr.index, cb, state))
1073 return false;
1074 }
1075
1076 return true;
1077 }
1078
1079 static bool
1080 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1081 {
1082 for (unsigned i = 0; i < instr->num_srcs; i++) {
1083 if (!visit_src(&instr->src[i].src, cb, state))
1084 return false;
1085 }
1086
1087 return true;
1088 }
1089
1090 static bool
1091 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1092 void *state)
1093 {
1094 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1095 for (unsigned i = 0; i < num_srcs; i++) {
1096 if (!visit_src(&instr->src[i], cb, state))
1097 return false;
1098 }
1099
1100 return true;
1101 }
1102
1103 static bool
1104 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1105 {
1106 for (unsigned i = 0; i < instr->num_params; i++) {
1107 if (!visit_src(&instr->params[i], cb, state))
1108 return false;
1109 }
1110
1111 return true;
1112 }
1113
1114 static bool
1115 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1116 {
1117 nir_foreach_phi_src(src, instr) {
1118 if (!visit_src(&src->src, cb, state))
1119 return false;
1120 }
1121
1122 return true;
1123 }
1124
1125 static bool
1126 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1127 nir_foreach_src_cb cb, void *state)
1128 {
1129 nir_foreach_parallel_copy_entry(entry, instr) {
1130 if (!visit_src(&entry->src, cb, state))
1131 return false;
1132 }
1133
1134 return true;
1135 }
1136
1137 typedef struct {
1138 void *state;
1139 nir_foreach_src_cb cb;
1140 } visit_dest_indirect_state;
1141
1142 static bool
1143 visit_dest_indirect(nir_dest *dest, void *_state)
1144 {
1145 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1146
1147 if (!dest->is_ssa && dest->reg.indirect)
1148 return state->cb(dest->reg.indirect, state->state);
1149
1150 return true;
1151 }
1152
1153 bool
1154 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1155 {
1156 switch (instr->type) {
1157 case nir_instr_type_alu:
1158 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1159 return false;
1160 break;
1161 case nir_instr_type_deref:
1162 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1163 return false;
1164 break;
1165 case nir_instr_type_intrinsic:
1166 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1167 return false;
1168 break;
1169 case nir_instr_type_tex:
1170 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1171 return false;
1172 break;
1173 case nir_instr_type_call:
1174 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1175 return false;
1176 break;
1177 case nir_instr_type_load_const:
1178 /* Constant load instructions have no regular sources */
1179 break;
1180 case nir_instr_type_phi:
1181 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1182 return false;
1183 break;
1184 case nir_instr_type_parallel_copy:
1185 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1186 cb, state))
1187 return false;
1188 break;
1189 case nir_instr_type_jump:
1190 case nir_instr_type_ssa_undef:
1191 return true;
1192
1193 default:
1194 unreachable("Invalid instruction type");
1195 break;
1196 }
1197
1198 visit_dest_indirect_state dest_state;
1199 dest_state.state = state;
1200 dest_state.cb = cb;
1201 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1202 }
1203
1204 int64_t
1205 nir_src_comp_as_int(nir_src src, unsigned comp)
1206 {
1207 assert(nir_src_is_const(src));
1208 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1209
1210 assert(comp < load->def.num_components);
1211 switch (load->def.bit_size) {
1212 /* int1_t uses 0/-1 convention */
1213 case 1: return -(int)load->value.b[comp];
1214 case 8: return load->value.i8[comp];
1215 case 16: return load->value.i16[comp];
1216 case 32: return load->value.i32[comp];
1217 case 64: return load->value.i64[comp];
1218 default:
1219 unreachable("Invalid bit size");
1220 }
1221 }
1222
1223 uint64_t
1224 nir_src_comp_as_uint(nir_src src, unsigned comp)
1225 {
1226 assert(nir_src_is_const(src));
1227 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1228
1229 assert(comp < load->def.num_components);
1230 switch (load->def.bit_size) {
1231 case 1: return load->value.b[comp];
1232 case 8: return load->value.u8[comp];
1233 case 16: return load->value.u16[comp];
1234 case 32: return load->value.u32[comp];
1235 case 64: return load->value.u64[comp];
1236 default:
1237 unreachable("Invalid bit size");
1238 }
1239 }
1240
1241 bool
1242 nir_src_comp_as_bool(nir_src src, unsigned comp)
1243 {
1244 int64_t i = nir_src_comp_as_int(src, comp);
1245
1246 /* Booleans of any size use 0/-1 convention */
1247 assert(i == 0 || i == -1);
1248
1249 return i;
1250 }
1251
1252 double
1253 nir_src_comp_as_float(nir_src src, unsigned comp)
1254 {
1255 assert(nir_src_is_const(src));
1256 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1257
1258 assert(comp < load->def.num_components);
1259 switch (load->def.bit_size) {
1260 case 16: return _mesa_half_to_float(load->value.u16[comp]);
1261 case 32: return load->value.f32[comp];
1262 case 64: return load->value.f64[comp];
1263 default:
1264 unreachable("Invalid bit size");
1265 }
1266 }
1267
1268 int64_t
1269 nir_src_as_int(nir_src src)
1270 {
1271 assert(nir_src_num_components(src) == 1);
1272 return nir_src_comp_as_int(src, 0);
1273 }
1274
1275 uint64_t
1276 nir_src_as_uint(nir_src src)
1277 {
1278 assert(nir_src_num_components(src) == 1);
1279 return nir_src_comp_as_uint(src, 0);
1280 }
1281
1282 bool
1283 nir_src_as_bool(nir_src src)
1284 {
1285 assert(nir_src_num_components(src) == 1);
1286 return nir_src_comp_as_bool(src, 0);
1287 }
1288
1289 double
1290 nir_src_as_float(nir_src src)
1291 {
1292 assert(nir_src_num_components(src) == 1);
1293 return nir_src_comp_as_float(src, 0);
1294 }
1295
1296 nir_const_value *
1297 nir_src_as_const_value(nir_src src)
1298 {
1299 if (!src.is_ssa)
1300 return NULL;
1301
1302 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1303 return NULL;
1304
1305 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1306
1307 return &load->value;
1308 }
1309
1310 /**
1311 * Returns true if the source is known to be dynamically uniform. Otherwise it
1312 * returns false which means it may or may not be dynamically uniform but it
1313 * can't be determined.
1314 */
1315 bool
1316 nir_src_is_dynamically_uniform(nir_src src)
1317 {
1318 if (!src.is_ssa)
1319 return false;
1320
1321 /* Constants are trivially dynamically uniform */
1322 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1323 return true;
1324
1325 /* As are uniform variables */
1326 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1327 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1328
1329 if (intr->intrinsic == nir_intrinsic_load_uniform)
1330 return true;
1331 }
1332
1333 /* XXX: this could have many more tests, such as when a sampler function is
1334 * called with dynamically uniform arguments.
1335 */
1336 return false;
1337 }
1338
1339 static void
1340 src_remove_all_uses(nir_src *src)
1341 {
1342 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1343 if (!src_is_valid(src))
1344 continue;
1345
1346 list_del(&src->use_link);
1347 }
1348 }
1349
1350 static void
1351 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1352 {
1353 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1354 if (!src_is_valid(src))
1355 continue;
1356
1357 if (parent_instr) {
1358 src->parent_instr = parent_instr;
1359 if (src->is_ssa)
1360 list_addtail(&src->use_link, &src->ssa->uses);
1361 else
1362 list_addtail(&src->use_link, &src->reg.reg->uses);
1363 } else {
1364 assert(parent_if);
1365 src->parent_if = parent_if;
1366 if (src->is_ssa)
1367 list_addtail(&src->use_link, &src->ssa->if_uses);
1368 else
1369 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1370 }
1371 }
1372 }
1373
1374 void
1375 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1376 {
1377 assert(!src_is_valid(src) || src->parent_instr == instr);
1378
1379 src_remove_all_uses(src);
1380 *src = new_src;
1381 src_add_all_uses(src, instr, NULL);
1382 }
1383
1384 void
1385 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1386 {
1387 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1388
1389 src_remove_all_uses(dest);
1390 src_remove_all_uses(src);
1391 *dest = *src;
1392 *src = NIR_SRC_INIT;
1393 src_add_all_uses(dest, dest_instr, NULL);
1394 }
1395
1396 void
1397 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1398 {
1399 nir_src *src = &if_stmt->condition;
1400 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1401
1402 src_remove_all_uses(src);
1403 *src = new_src;
1404 src_add_all_uses(src, NULL, if_stmt);
1405 }
1406
1407 void
1408 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1409 {
1410 if (dest->is_ssa) {
1411 /* We can only overwrite an SSA destination if it has no uses. */
1412 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1413 } else {
1414 list_del(&dest->reg.def_link);
1415 if (dest->reg.indirect)
1416 src_remove_all_uses(dest->reg.indirect);
1417 }
1418
1419 /* We can't re-write with an SSA def */
1420 assert(!new_dest.is_ssa);
1421
1422 nir_dest_copy(dest, &new_dest, instr);
1423
1424 dest->reg.parent_instr = instr;
1425 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1426
1427 if (dest->reg.indirect)
1428 src_add_all_uses(dest->reg.indirect, instr, NULL);
1429 }
1430
1431 /* note: does *not* take ownership of 'name' */
1432 void
1433 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1434 unsigned num_components,
1435 unsigned bit_size, const char *name)
1436 {
1437 def->name = ralloc_strdup(instr, name);
1438 def->parent_instr = instr;
1439 list_inithead(&def->uses);
1440 list_inithead(&def->if_uses);
1441 def->num_components = num_components;
1442 def->bit_size = bit_size;
1443
1444 if (instr->block) {
1445 nir_function_impl *impl =
1446 nir_cf_node_get_function(&instr->block->cf_node);
1447
1448 def->index = impl->ssa_alloc++;
1449 } else {
1450 def->index = UINT_MAX;
1451 }
1452 }
1453
1454 /* note: does *not* take ownership of 'name' */
1455 void
1456 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1457 unsigned num_components, unsigned bit_size,
1458 const char *name)
1459 {
1460 dest->is_ssa = true;
1461 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1462 }
1463
1464 void
1465 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1466 {
1467 assert(!new_src.is_ssa || def != new_src.ssa);
1468
1469 nir_foreach_use_safe(use_src, def)
1470 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1471
1472 nir_foreach_if_use_safe(use_src, def)
1473 nir_if_rewrite_condition(use_src->parent_if, new_src);
1474 }
1475
1476 static bool
1477 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1478 {
1479 assert(start->block == end->block);
1480
1481 if (between->block != start->block)
1482 return false;
1483
1484 /* Search backwards looking for "between" */
1485 while (start != end) {
1486 if (between == end)
1487 return true;
1488
1489 end = nir_instr_prev(end);
1490 assert(end);
1491 }
1492
1493 return false;
1494 }
1495
1496 /* Replaces all uses of the given SSA def with the given source but only if
1497 * the use comes after the after_me instruction. This can be useful if you
1498 * are emitting code to fix up the result of some instruction: you can freely
1499 * use the result in that code and then call rewrite_uses_after and pass the
1500 * last fixup instruction as after_me and it will replace all of the uses you
1501 * want without touching the fixup code.
1502 *
1503 * This function assumes that after_me is in the same block as
1504 * def->parent_instr and that after_me comes after def->parent_instr.
1505 */
1506 void
1507 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1508 nir_instr *after_me)
1509 {
1510 assert(!new_src.is_ssa || def != new_src.ssa);
1511
1512 nir_foreach_use_safe(use_src, def) {
1513 assert(use_src->parent_instr != def->parent_instr);
1514 /* Since def already dominates all of its uses, the only way a use can
1515 * not be dominated by after_me is if it is between def and after_me in
1516 * the instruction list.
1517 */
1518 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1519 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1520 }
1521
1522 nir_foreach_if_use_safe(use_src, def)
1523 nir_if_rewrite_condition(use_src->parent_if, new_src);
1524 }
1525
1526 nir_component_mask_t
1527 nir_ssa_def_components_read(const nir_ssa_def *def)
1528 {
1529 nir_component_mask_t read_mask = 0;
1530 nir_foreach_use(use, def) {
1531 if (use->parent_instr->type == nir_instr_type_alu) {
1532 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1533 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1534 int src_idx = alu_src - &alu->src[0];
1535 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1536 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1537 } else {
1538 return (1 << def->num_components) - 1;
1539 }
1540 }
1541
1542 if (!list_empty(&def->if_uses))
1543 read_mask |= 1;
1544
1545 return read_mask;
1546 }
1547
1548 nir_block *
1549 nir_block_cf_tree_next(nir_block *block)
1550 {
1551 if (block == NULL) {
1552 /* nir_foreach_block_safe() will call this function on a NULL block
1553 * after the last iteration, but it won't use the result so just return
1554 * NULL here.
1555 */
1556 return NULL;
1557 }
1558
1559 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1560 if (cf_next)
1561 return nir_cf_node_cf_tree_first(cf_next);
1562
1563 nir_cf_node *parent = block->cf_node.parent;
1564
1565 switch (parent->type) {
1566 case nir_cf_node_if: {
1567 /* Are we at the end of the if? Go to the beginning of the else */
1568 nir_if *if_stmt = nir_cf_node_as_if(parent);
1569 if (block == nir_if_last_then_block(if_stmt))
1570 return nir_if_first_else_block(if_stmt);
1571
1572 assert(block == nir_if_last_else_block(if_stmt));
1573 /* fall through */
1574 }
1575
1576 case nir_cf_node_loop:
1577 return nir_cf_node_as_block(nir_cf_node_next(parent));
1578
1579 case nir_cf_node_function:
1580 return NULL;
1581
1582 default:
1583 unreachable("unknown cf node type");
1584 }
1585 }
1586
1587 nir_block *
1588 nir_block_cf_tree_prev(nir_block *block)
1589 {
1590 if (block == NULL) {
1591 /* do this for consistency with nir_block_cf_tree_next() */
1592 return NULL;
1593 }
1594
1595 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1596 if (cf_prev)
1597 return nir_cf_node_cf_tree_last(cf_prev);
1598
1599 nir_cf_node *parent = block->cf_node.parent;
1600
1601 switch (parent->type) {
1602 case nir_cf_node_if: {
1603 /* Are we at the beginning of the else? Go to the end of the if */
1604 nir_if *if_stmt = nir_cf_node_as_if(parent);
1605 if (block == nir_if_first_else_block(if_stmt))
1606 return nir_if_last_then_block(if_stmt);
1607
1608 assert(block == nir_if_first_then_block(if_stmt));
1609 /* fall through */
1610 }
1611
1612 case nir_cf_node_loop:
1613 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1614
1615 case nir_cf_node_function:
1616 return NULL;
1617
1618 default:
1619 unreachable("unknown cf node type");
1620 }
1621 }
1622
1623 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1624 {
1625 switch (node->type) {
1626 case nir_cf_node_function: {
1627 nir_function_impl *impl = nir_cf_node_as_function(node);
1628 return nir_start_block(impl);
1629 }
1630
1631 case nir_cf_node_if: {
1632 nir_if *if_stmt = nir_cf_node_as_if(node);
1633 return nir_if_first_then_block(if_stmt);
1634 }
1635
1636 case nir_cf_node_loop: {
1637 nir_loop *loop = nir_cf_node_as_loop(node);
1638 return nir_loop_first_block(loop);
1639 }
1640
1641 case nir_cf_node_block: {
1642 return nir_cf_node_as_block(node);
1643 }
1644
1645 default:
1646 unreachable("unknown node type");
1647 }
1648 }
1649
1650 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1651 {
1652 switch (node->type) {
1653 case nir_cf_node_function: {
1654 nir_function_impl *impl = nir_cf_node_as_function(node);
1655 return nir_impl_last_block(impl);
1656 }
1657
1658 case nir_cf_node_if: {
1659 nir_if *if_stmt = nir_cf_node_as_if(node);
1660 return nir_if_last_else_block(if_stmt);
1661 }
1662
1663 case nir_cf_node_loop: {
1664 nir_loop *loop = nir_cf_node_as_loop(node);
1665 return nir_loop_last_block(loop);
1666 }
1667
1668 case nir_cf_node_block: {
1669 return nir_cf_node_as_block(node);
1670 }
1671
1672 default:
1673 unreachable("unknown node type");
1674 }
1675 }
1676
1677 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1678 {
1679 if (node->type == nir_cf_node_block)
1680 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1681 else if (node->type == nir_cf_node_function)
1682 return NULL;
1683 else
1684 return nir_cf_node_as_block(nir_cf_node_next(node));
1685 }
1686
1687 nir_if *
1688 nir_block_get_following_if(nir_block *block)
1689 {
1690 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1691 return NULL;
1692
1693 if (nir_cf_node_is_last(&block->cf_node))
1694 return NULL;
1695
1696 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1697
1698 if (next_node->type != nir_cf_node_if)
1699 return NULL;
1700
1701 return nir_cf_node_as_if(next_node);
1702 }
1703
1704 nir_loop *
1705 nir_block_get_following_loop(nir_block *block)
1706 {
1707 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1708 return NULL;
1709
1710 if (nir_cf_node_is_last(&block->cf_node))
1711 return NULL;
1712
1713 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1714
1715 if (next_node->type != nir_cf_node_loop)
1716 return NULL;
1717
1718 return nir_cf_node_as_loop(next_node);
1719 }
1720
1721 void
1722 nir_index_blocks(nir_function_impl *impl)
1723 {
1724 unsigned index = 0;
1725
1726 if (impl->valid_metadata & nir_metadata_block_index)
1727 return;
1728
1729 nir_foreach_block(block, impl) {
1730 block->index = index++;
1731 }
1732
1733 /* The end_block isn't really part of the program, which is why its index
1734 * is >= num_blocks.
1735 */
1736 impl->num_blocks = impl->end_block->index = index;
1737 }
1738
1739 static bool
1740 index_ssa_def_cb(nir_ssa_def *def, void *state)
1741 {
1742 unsigned *index = (unsigned *) state;
1743 def->index = (*index)++;
1744
1745 return true;
1746 }
1747
1748 /**
1749 * The indices are applied top-to-bottom which has the very nice property
1750 * that, if A dominates B, then A->index <= B->index.
1751 */
1752 void
1753 nir_index_ssa_defs(nir_function_impl *impl)
1754 {
1755 unsigned index = 0;
1756
1757 nir_foreach_block(block, impl) {
1758 nir_foreach_instr(instr, block)
1759 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1760 }
1761
1762 impl->ssa_alloc = index;
1763 }
1764
1765 /**
1766 * The indices are applied top-to-bottom which has the very nice property
1767 * that, if A dominates B, then A->index <= B->index.
1768 */
1769 unsigned
1770 nir_index_instrs(nir_function_impl *impl)
1771 {
1772 unsigned index = 0;
1773
1774 nir_foreach_block(block, impl) {
1775 nir_foreach_instr(instr, block)
1776 instr->index = index++;
1777 }
1778
1779 return index;
1780 }
1781
1782 nir_intrinsic_op
1783 nir_intrinsic_from_system_value(gl_system_value val)
1784 {
1785 switch (val) {
1786 case SYSTEM_VALUE_VERTEX_ID:
1787 return nir_intrinsic_load_vertex_id;
1788 case SYSTEM_VALUE_INSTANCE_ID:
1789 return nir_intrinsic_load_instance_id;
1790 case SYSTEM_VALUE_DRAW_ID:
1791 return nir_intrinsic_load_draw_id;
1792 case SYSTEM_VALUE_BASE_INSTANCE:
1793 return nir_intrinsic_load_base_instance;
1794 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1795 return nir_intrinsic_load_vertex_id_zero_base;
1796 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1797 return nir_intrinsic_load_is_indexed_draw;
1798 case SYSTEM_VALUE_FIRST_VERTEX:
1799 return nir_intrinsic_load_first_vertex;
1800 case SYSTEM_VALUE_BASE_VERTEX:
1801 return nir_intrinsic_load_base_vertex;
1802 case SYSTEM_VALUE_INVOCATION_ID:
1803 return nir_intrinsic_load_invocation_id;
1804 case SYSTEM_VALUE_FRAG_COORD:
1805 return nir_intrinsic_load_frag_coord;
1806 case SYSTEM_VALUE_FRONT_FACE:
1807 return nir_intrinsic_load_front_face;
1808 case SYSTEM_VALUE_SAMPLE_ID:
1809 return nir_intrinsic_load_sample_id;
1810 case SYSTEM_VALUE_SAMPLE_POS:
1811 return nir_intrinsic_load_sample_pos;
1812 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1813 return nir_intrinsic_load_sample_mask_in;
1814 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1815 return nir_intrinsic_load_local_invocation_id;
1816 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1817 return nir_intrinsic_load_local_invocation_index;
1818 case SYSTEM_VALUE_WORK_GROUP_ID:
1819 return nir_intrinsic_load_work_group_id;
1820 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1821 return nir_intrinsic_load_num_work_groups;
1822 case SYSTEM_VALUE_PRIMITIVE_ID:
1823 return nir_intrinsic_load_primitive_id;
1824 case SYSTEM_VALUE_TESS_COORD:
1825 return nir_intrinsic_load_tess_coord;
1826 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1827 return nir_intrinsic_load_tess_level_outer;
1828 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1829 return nir_intrinsic_load_tess_level_inner;
1830 case SYSTEM_VALUE_VERTICES_IN:
1831 return nir_intrinsic_load_patch_vertices_in;
1832 case SYSTEM_VALUE_HELPER_INVOCATION:
1833 return nir_intrinsic_load_helper_invocation;
1834 case SYSTEM_VALUE_VIEW_INDEX:
1835 return nir_intrinsic_load_view_index;
1836 case SYSTEM_VALUE_SUBGROUP_SIZE:
1837 return nir_intrinsic_load_subgroup_size;
1838 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1839 return nir_intrinsic_load_subgroup_invocation;
1840 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1841 return nir_intrinsic_load_subgroup_eq_mask;
1842 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1843 return nir_intrinsic_load_subgroup_ge_mask;
1844 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1845 return nir_intrinsic_load_subgroup_gt_mask;
1846 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1847 return nir_intrinsic_load_subgroup_le_mask;
1848 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1849 return nir_intrinsic_load_subgroup_lt_mask;
1850 case SYSTEM_VALUE_NUM_SUBGROUPS:
1851 return nir_intrinsic_load_num_subgroups;
1852 case SYSTEM_VALUE_SUBGROUP_ID:
1853 return nir_intrinsic_load_subgroup_id;
1854 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1855 return nir_intrinsic_load_local_group_size;
1856 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1857 return nir_intrinsic_load_global_invocation_id;
1858 case SYSTEM_VALUE_WORK_DIM:
1859 return nir_intrinsic_load_work_dim;
1860 default:
1861 unreachable("system value does not directly correspond to intrinsic");
1862 }
1863 }
1864
1865 gl_system_value
1866 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1867 {
1868 switch (intrin) {
1869 case nir_intrinsic_load_vertex_id:
1870 return SYSTEM_VALUE_VERTEX_ID;
1871 case nir_intrinsic_load_instance_id:
1872 return SYSTEM_VALUE_INSTANCE_ID;
1873 case nir_intrinsic_load_draw_id:
1874 return SYSTEM_VALUE_DRAW_ID;
1875 case nir_intrinsic_load_base_instance:
1876 return SYSTEM_VALUE_BASE_INSTANCE;
1877 case nir_intrinsic_load_vertex_id_zero_base:
1878 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1879 case nir_intrinsic_load_first_vertex:
1880 return SYSTEM_VALUE_FIRST_VERTEX;
1881 case nir_intrinsic_load_is_indexed_draw:
1882 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1883 case nir_intrinsic_load_base_vertex:
1884 return SYSTEM_VALUE_BASE_VERTEX;
1885 case nir_intrinsic_load_invocation_id:
1886 return SYSTEM_VALUE_INVOCATION_ID;
1887 case nir_intrinsic_load_frag_coord:
1888 return SYSTEM_VALUE_FRAG_COORD;
1889 case nir_intrinsic_load_front_face:
1890 return SYSTEM_VALUE_FRONT_FACE;
1891 case nir_intrinsic_load_sample_id:
1892 return SYSTEM_VALUE_SAMPLE_ID;
1893 case nir_intrinsic_load_sample_pos:
1894 return SYSTEM_VALUE_SAMPLE_POS;
1895 case nir_intrinsic_load_sample_mask_in:
1896 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1897 case nir_intrinsic_load_local_invocation_id:
1898 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1899 case nir_intrinsic_load_local_invocation_index:
1900 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1901 case nir_intrinsic_load_num_work_groups:
1902 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1903 case nir_intrinsic_load_work_group_id:
1904 return SYSTEM_VALUE_WORK_GROUP_ID;
1905 case nir_intrinsic_load_primitive_id:
1906 return SYSTEM_VALUE_PRIMITIVE_ID;
1907 case nir_intrinsic_load_tess_coord:
1908 return SYSTEM_VALUE_TESS_COORD;
1909 case nir_intrinsic_load_tess_level_outer:
1910 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1911 case nir_intrinsic_load_tess_level_inner:
1912 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1913 case nir_intrinsic_load_patch_vertices_in:
1914 return SYSTEM_VALUE_VERTICES_IN;
1915 case nir_intrinsic_load_helper_invocation:
1916 return SYSTEM_VALUE_HELPER_INVOCATION;
1917 case nir_intrinsic_load_view_index:
1918 return SYSTEM_VALUE_VIEW_INDEX;
1919 case nir_intrinsic_load_subgroup_size:
1920 return SYSTEM_VALUE_SUBGROUP_SIZE;
1921 case nir_intrinsic_load_subgroup_invocation:
1922 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1923 case nir_intrinsic_load_subgroup_eq_mask:
1924 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1925 case nir_intrinsic_load_subgroup_ge_mask:
1926 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1927 case nir_intrinsic_load_subgroup_gt_mask:
1928 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1929 case nir_intrinsic_load_subgroup_le_mask:
1930 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1931 case nir_intrinsic_load_subgroup_lt_mask:
1932 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1933 case nir_intrinsic_load_num_subgroups:
1934 return SYSTEM_VALUE_NUM_SUBGROUPS;
1935 case nir_intrinsic_load_subgroup_id:
1936 return SYSTEM_VALUE_SUBGROUP_ID;
1937 case nir_intrinsic_load_local_group_size:
1938 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1939 case nir_intrinsic_load_global_invocation_id:
1940 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1941 default:
1942 unreachable("intrinsic doesn't produce a system value");
1943 }
1944 }
1945
1946 /* OpenGL utility method that remaps the location attributes if they are
1947 * doubles. Not needed for vulkan due the differences on the input location
1948 * count for doubles on vulkan vs OpenGL
1949 *
1950 * The bitfield returned in dual_slot is one bit for each double input slot in
1951 * the original OpenGL single-slot input numbering. The mapping from old
1952 * locations to new locations is as follows:
1953 *
1954 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1955 */
1956 void
1957 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1958 {
1959 assert(shader->info.stage == MESA_SHADER_VERTEX);
1960
1961 *dual_slot = 0;
1962 nir_foreach_variable(var, &shader->inputs) {
1963 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1964 unsigned slots = glsl_count_attribute_slots(var->type, true);
1965 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1966 }
1967 }
1968
1969 nir_foreach_variable(var, &shader->inputs) {
1970 var->data.location +=
1971 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1972 }
1973 }
1974
1975 /* Returns an attribute mask that has been re-compacted using the given
1976 * dual_slot mask.
1977 */
1978 uint64_t
1979 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1980 {
1981 while (dual_slot) {
1982 unsigned loc = u_bit_scan64(&dual_slot);
1983 /* mask of all bits up to and including loc */
1984 uint64_t mask = BITFIELD64_MASK(loc + 1);
1985 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
1986 }
1987 return attribs;
1988 }