nir: Add a ptr_as_array deref type
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->registers);
62 exec_list_make_empty(&shader->globals);
63 exec_list_make_empty(&shader->system_values);
64 shader->reg_alloc = 0;
65
66 shader->num_inputs = 0;
67 shader->num_outputs = 0;
68 shader->num_uniforms = 0;
69 shader->num_shared = 0;
70
71 return shader;
72 }
73
74 static nir_register *
75 reg_create(void *mem_ctx, struct exec_list *list)
76 {
77 nir_register *reg = ralloc(mem_ctx, nir_register);
78
79 list_inithead(&reg->uses);
80 list_inithead(&reg->defs);
81 list_inithead(&reg->if_uses);
82
83 reg->num_components = 0;
84 reg->bit_size = 32;
85 reg->num_array_elems = 0;
86 reg->is_packed = false;
87 reg->name = NULL;
88
89 exec_list_push_tail(list, &reg->node);
90
91 return reg;
92 }
93
94 nir_register *
95 nir_global_reg_create(nir_shader *shader)
96 {
97 nir_register *reg = reg_create(shader, &shader->registers);
98 reg->index = shader->reg_alloc++;
99 reg->is_global = true;
100
101 return reg;
102 }
103
104 nir_register *
105 nir_local_reg_create(nir_function_impl *impl)
106 {
107 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
108 reg->index = impl->reg_alloc++;
109 reg->is_global = false;
110
111 return reg;
112 }
113
114 void
115 nir_reg_remove(nir_register *reg)
116 {
117 exec_node_remove(&reg->node);
118 }
119
120 void
121 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
122 {
123 switch (var->data.mode) {
124 case nir_var_all:
125 assert(!"invalid mode");
126 break;
127
128 case nir_var_local:
129 assert(!"nir_shader_add_variable cannot be used for local variables");
130 break;
131
132 case nir_var_global:
133 exec_list_push_tail(&shader->globals, &var->node);
134 break;
135
136 case nir_var_shader_in:
137 exec_list_push_tail(&shader->inputs, &var->node);
138 break;
139
140 case nir_var_shader_out:
141 exec_list_push_tail(&shader->outputs, &var->node);
142 break;
143
144 case nir_var_uniform:
145 case nir_var_ubo:
146 case nir_var_ssbo:
147 exec_list_push_tail(&shader->uniforms, &var->node);
148 break;
149
150 case nir_var_shared:
151 assert(shader->info.stage == MESA_SHADER_COMPUTE);
152 exec_list_push_tail(&shader->shared, &var->node);
153 break;
154
155 case nir_var_system_value:
156 exec_list_push_tail(&shader->system_values, &var->node);
157 break;
158 }
159 }
160
161 nir_variable *
162 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
163 const struct glsl_type *type, const char *name)
164 {
165 nir_variable *var = rzalloc(shader, nir_variable);
166 var->name = ralloc_strdup(var, name);
167 var->type = type;
168 var->data.mode = mode;
169 var->data.how_declared = nir_var_declared_normally;
170
171 if ((mode == nir_var_shader_in &&
172 shader->info.stage != MESA_SHADER_VERTEX) ||
173 (mode == nir_var_shader_out &&
174 shader->info.stage != MESA_SHADER_FRAGMENT))
175 var->data.interpolation = INTERP_MODE_SMOOTH;
176
177 if (mode == nir_var_shader_in || mode == nir_var_uniform)
178 var->data.read_only = true;
179
180 nir_shader_add_variable(shader, var);
181
182 return var;
183 }
184
185 nir_variable *
186 nir_local_variable_create(nir_function_impl *impl,
187 const struct glsl_type *type, const char *name)
188 {
189 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
190 var->name = ralloc_strdup(var, name);
191 var->type = type;
192 var->data.mode = nir_var_local;
193
194 nir_function_impl_add_variable(impl, var);
195
196 return var;
197 }
198
199 nir_function *
200 nir_function_create(nir_shader *shader, const char *name)
201 {
202 nir_function *func = ralloc(shader, nir_function);
203
204 exec_list_push_tail(&shader->functions, &func->node);
205
206 func->name = ralloc_strdup(func, name);
207 func->shader = shader;
208 func->num_params = 0;
209 func->params = NULL;
210 func->impl = NULL;
211
212 return func;
213 }
214
215 /* NOTE: if the instruction you are copying a src to is already added
216 * to the IR, use nir_instr_rewrite_src() instead.
217 */
218 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
219 {
220 dest->is_ssa = src->is_ssa;
221 if (src->is_ssa) {
222 dest->ssa = src->ssa;
223 } else {
224 dest->reg.base_offset = src->reg.base_offset;
225 dest->reg.reg = src->reg.reg;
226 if (src->reg.indirect) {
227 dest->reg.indirect = ralloc(mem_ctx, nir_src);
228 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
229 } else {
230 dest->reg.indirect = NULL;
231 }
232 }
233 }
234
235 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
236 {
237 /* Copying an SSA definition makes no sense whatsoever. */
238 assert(!src->is_ssa);
239
240 dest->is_ssa = false;
241
242 dest->reg.base_offset = src->reg.base_offset;
243 dest->reg.reg = src->reg.reg;
244 if (src->reg.indirect) {
245 dest->reg.indirect = ralloc(instr, nir_src);
246 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
247 } else {
248 dest->reg.indirect = NULL;
249 }
250 }
251
252 void
253 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
254 nir_alu_instr *instr)
255 {
256 nir_src_copy(&dest->src, &src->src, &instr->instr);
257 dest->abs = src->abs;
258 dest->negate = src->negate;
259 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
260 dest->swizzle[i] = src->swizzle[i];
261 }
262
263 void
264 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
265 nir_alu_instr *instr)
266 {
267 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
268 dest->write_mask = src->write_mask;
269 dest->saturate = src->saturate;
270 }
271
272
273 static void
274 cf_init(nir_cf_node *node, nir_cf_node_type type)
275 {
276 exec_node_init(&node->node);
277 node->parent = NULL;
278 node->type = type;
279 }
280
281 nir_function_impl *
282 nir_function_impl_create_bare(nir_shader *shader)
283 {
284 nir_function_impl *impl = ralloc(shader, nir_function_impl);
285
286 impl->function = NULL;
287
288 cf_init(&impl->cf_node, nir_cf_node_function);
289
290 exec_list_make_empty(&impl->body);
291 exec_list_make_empty(&impl->registers);
292 exec_list_make_empty(&impl->locals);
293 impl->reg_alloc = 0;
294 impl->ssa_alloc = 0;
295 impl->valid_metadata = nir_metadata_none;
296
297 /* create start & end blocks */
298 nir_block *start_block = nir_block_create(shader);
299 nir_block *end_block = nir_block_create(shader);
300 start_block->cf_node.parent = &impl->cf_node;
301 end_block->cf_node.parent = &impl->cf_node;
302 impl->end_block = end_block;
303
304 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
305
306 start_block->successors[0] = end_block;
307 _mesa_set_add(end_block->predecessors, start_block);
308 return impl;
309 }
310
311 nir_function_impl *
312 nir_function_impl_create(nir_function *function)
313 {
314 assert(function->impl == NULL);
315
316 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
317
318 function->impl = impl;
319 impl->function = function;
320
321 return impl;
322 }
323
324 nir_block *
325 nir_block_create(nir_shader *shader)
326 {
327 nir_block *block = rzalloc(shader, nir_block);
328
329 cf_init(&block->cf_node, nir_cf_node_block);
330
331 block->successors[0] = block->successors[1] = NULL;
332 block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
333 _mesa_key_pointer_equal);
334 block->imm_dom = NULL;
335 /* XXX maybe it would be worth it to defer allocation? This
336 * way it doesn't get allocated for shader refs that never run
337 * nir_calc_dominance? For example, state-tracker creates an
338 * initial IR, clones that, runs appropriate lowering pass, passes
339 * to driver which does common lowering/opt, and then stores ref
340 * which is later used to do state specific lowering and futher
341 * opt. Do any of the references not need dominance metadata?
342 */
343 block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
344 _mesa_key_pointer_equal);
345
346 exec_list_make_empty(&block->instr_list);
347
348 return block;
349 }
350
351 static inline void
352 src_init(nir_src *src)
353 {
354 src->is_ssa = false;
355 src->reg.reg = NULL;
356 src->reg.indirect = NULL;
357 src->reg.base_offset = 0;
358 }
359
360 nir_if *
361 nir_if_create(nir_shader *shader)
362 {
363 nir_if *if_stmt = ralloc(shader, nir_if);
364
365 cf_init(&if_stmt->cf_node, nir_cf_node_if);
366 src_init(&if_stmt->condition);
367
368 nir_block *then = nir_block_create(shader);
369 exec_list_make_empty(&if_stmt->then_list);
370 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
371 then->cf_node.parent = &if_stmt->cf_node;
372
373 nir_block *else_stmt = nir_block_create(shader);
374 exec_list_make_empty(&if_stmt->else_list);
375 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
376 else_stmt->cf_node.parent = &if_stmt->cf_node;
377
378 return if_stmt;
379 }
380
381 nir_loop *
382 nir_loop_create(nir_shader *shader)
383 {
384 nir_loop *loop = rzalloc(shader, nir_loop);
385
386 cf_init(&loop->cf_node, nir_cf_node_loop);
387
388 nir_block *body = nir_block_create(shader);
389 exec_list_make_empty(&loop->body);
390 exec_list_push_tail(&loop->body, &body->cf_node.node);
391 body->cf_node.parent = &loop->cf_node;
392
393 body->successors[0] = body;
394 _mesa_set_add(body->predecessors, body);
395
396 return loop;
397 }
398
399 static void
400 instr_init(nir_instr *instr, nir_instr_type type)
401 {
402 instr->type = type;
403 instr->block = NULL;
404 exec_node_init(&instr->node);
405 }
406
407 static void
408 dest_init(nir_dest *dest)
409 {
410 dest->is_ssa = false;
411 dest->reg.reg = NULL;
412 dest->reg.indirect = NULL;
413 dest->reg.base_offset = 0;
414 }
415
416 static void
417 alu_dest_init(nir_alu_dest *dest)
418 {
419 dest_init(&dest->dest);
420 dest->saturate = false;
421 dest->write_mask = 0xf;
422 }
423
424 static void
425 alu_src_init(nir_alu_src *src)
426 {
427 src_init(&src->src);
428 src->abs = src->negate = false;
429 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
430 src->swizzle[i] = i;
431 }
432
433 nir_alu_instr *
434 nir_alu_instr_create(nir_shader *shader, nir_op op)
435 {
436 unsigned num_srcs = nir_op_infos[op].num_inputs;
437 /* TODO: don't use rzalloc */
438 nir_alu_instr *instr =
439 rzalloc_size(shader,
440 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
441
442 instr_init(&instr->instr, nir_instr_type_alu);
443 instr->op = op;
444 alu_dest_init(&instr->dest);
445 for (unsigned i = 0; i < num_srcs; i++)
446 alu_src_init(&instr->src[i]);
447
448 return instr;
449 }
450
451 nir_deref_instr *
452 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
453 {
454 nir_deref_instr *instr =
455 rzalloc_size(shader, sizeof(nir_deref_instr));
456
457 instr_init(&instr->instr, nir_instr_type_deref);
458
459 instr->deref_type = deref_type;
460 if (deref_type != nir_deref_type_var)
461 src_init(&instr->parent);
462
463 if (deref_type == nir_deref_type_array ||
464 deref_type == nir_deref_type_ptr_as_array)
465 src_init(&instr->arr.index);
466
467 dest_init(&instr->dest);
468
469 return instr;
470 }
471
472 nir_jump_instr *
473 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
474 {
475 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
476 instr_init(&instr->instr, nir_instr_type_jump);
477 instr->type = type;
478 return instr;
479 }
480
481 nir_load_const_instr *
482 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
483 unsigned bit_size)
484 {
485 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
486 instr_init(&instr->instr, nir_instr_type_load_const);
487
488 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
489
490 return instr;
491 }
492
493 nir_intrinsic_instr *
494 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
495 {
496 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
497 /* TODO: don't use rzalloc */
498 nir_intrinsic_instr *instr =
499 rzalloc_size(shader,
500 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
501
502 instr_init(&instr->instr, nir_instr_type_intrinsic);
503 instr->intrinsic = op;
504
505 if (nir_intrinsic_infos[op].has_dest)
506 dest_init(&instr->dest);
507
508 for (unsigned i = 0; i < num_srcs; i++)
509 src_init(&instr->src[i]);
510
511 return instr;
512 }
513
514 nir_call_instr *
515 nir_call_instr_create(nir_shader *shader, nir_function *callee)
516 {
517 const unsigned num_params = callee->num_params;
518 nir_call_instr *instr =
519 rzalloc_size(shader, sizeof(*instr) +
520 num_params * sizeof(instr->params[0]));
521
522 instr_init(&instr->instr, nir_instr_type_call);
523 instr->callee = callee;
524 instr->num_params = num_params;
525 for (unsigned i = 0; i < num_params; i++)
526 src_init(&instr->params[i]);
527
528 return instr;
529 }
530
531 nir_tex_instr *
532 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
533 {
534 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
535 instr_init(&instr->instr, nir_instr_type_tex);
536
537 dest_init(&instr->dest);
538
539 instr->num_srcs = num_srcs;
540 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
541 for (unsigned i = 0; i < num_srcs; i++)
542 src_init(&instr->src[i].src);
543
544 instr->texture_index = 0;
545 instr->texture_array_size = 0;
546 instr->sampler_index = 0;
547
548 return instr;
549 }
550
551 void
552 nir_tex_instr_add_src(nir_tex_instr *tex,
553 nir_tex_src_type src_type,
554 nir_src src)
555 {
556 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
557 tex->num_srcs + 1);
558
559 for (unsigned i = 0; i < tex->num_srcs; i++) {
560 new_srcs[i].src_type = tex->src[i].src_type;
561 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
562 &tex->src[i].src);
563 }
564
565 ralloc_free(tex->src);
566 tex->src = new_srcs;
567
568 tex->src[tex->num_srcs].src_type = src_type;
569 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
570 tex->num_srcs++;
571 }
572
573 void
574 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
575 {
576 assert(src_idx < tex->num_srcs);
577
578 /* First rewrite the source to NIR_SRC_INIT */
579 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
580
581 /* Now, move all of the other sources down */
582 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
583 tex->src[i-1].src_type = tex->src[i].src_type;
584 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
585 }
586 tex->num_srcs--;
587 }
588
589 nir_phi_instr *
590 nir_phi_instr_create(nir_shader *shader)
591 {
592 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
593 instr_init(&instr->instr, nir_instr_type_phi);
594
595 dest_init(&instr->dest);
596 exec_list_make_empty(&instr->srcs);
597 return instr;
598 }
599
600 nir_parallel_copy_instr *
601 nir_parallel_copy_instr_create(nir_shader *shader)
602 {
603 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
604 instr_init(&instr->instr, nir_instr_type_parallel_copy);
605
606 exec_list_make_empty(&instr->entries);
607
608 return instr;
609 }
610
611 nir_ssa_undef_instr *
612 nir_ssa_undef_instr_create(nir_shader *shader,
613 unsigned num_components,
614 unsigned bit_size)
615 {
616 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
617 instr_init(&instr->instr, nir_instr_type_ssa_undef);
618
619 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
620
621 return instr;
622 }
623
624 static nir_const_value
625 const_value_float(double d, unsigned bit_size)
626 {
627 nir_const_value v;
628 switch (bit_size) {
629 case 16: v.u16[0] = _mesa_float_to_half(d); break;
630 case 32: v.f32[0] = d; break;
631 case 64: v.f64[0] = d; break;
632 default:
633 unreachable("Invalid bit size");
634 }
635 return v;
636 }
637
638 static nir_const_value
639 const_value_int(int64_t i, unsigned bit_size)
640 {
641 nir_const_value v;
642 switch (bit_size) {
643 case 1: v.b[0] = i & 1; break;
644 case 8: v.i8[0] = i; break;
645 case 16: v.i16[0] = i; break;
646 case 32: v.i32[0] = i; break;
647 case 64: v.i64[0] = i; break;
648 default:
649 unreachable("Invalid bit size");
650 }
651 return v;
652 }
653
654 nir_const_value
655 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
656 {
657 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
658 const int64_t min_int = -max_int - 1;
659 switch (binop) {
660 case nir_op_iadd:
661 return const_value_int(0, bit_size);
662 case nir_op_fadd:
663 return const_value_float(0, bit_size);
664 case nir_op_imul:
665 return const_value_int(1, bit_size);
666 case nir_op_fmul:
667 return const_value_float(1, bit_size);
668 case nir_op_imin:
669 return const_value_int(max_int, bit_size);
670 case nir_op_umin:
671 return const_value_int(~0ull, bit_size);
672 case nir_op_fmin:
673 return const_value_float(INFINITY, bit_size);
674 case nir_op_imax:
675 return const_value_int(min_int, bit_size);
676 case nir_op_umax:
677 return const_value_int(0, bit_size);
678 case nir_op_fmax:
679 return const_value_float(-INFINITY, bit_size);
680 case nir_op_iand:
681 return const_value_int(~0ull, bit_size);
682 case nir_op_ior:
683 return const_value_int(0, bit_size);
684 case nir_op_ixor:
685 return const_value_int(0, bit_size);
686 default:
687 unreachable("Invalid reduction operation");
688 }
689 }
690
691 nir_function_impl *
692 nir_cf_node_get_function(nir_cf_node *node)
693 {
694 while (node->type != nir_cf_node_function) {
695 node = node->parent;
696 }
697
698 return nir_cf_node_as_function(node);
699 }
700
701 /* Reduces a cursor by trying to convert everything to after and trying to
702 * go up to block granularity when possible.
703 */
704 static nir_cursor
705 reduce_cursor(nir_cursor cursor)
706 {
707 switch (cursor.option) {
708 case nir_cursor_before_block:
709 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
710 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
711 if (exec_list_is_empty(&cursor.block->instr_list)) {
712 /* Empty block. After is as good as before. */
713 cursor.option = nir_cursor_after_block;
714 }
715 return cursor;
716
717 case nir_cursor_after_block:
718 return cursor;
719
720 case nir_cursor_before_instr: {
721 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
722 if (prev_instr) {
723 /* Before this instruction is after the previous */
724 cursor.instr = prev_instr;
725 cursor.option = nir_cursor_after_instr;
726 } else {
727 /* No previous instruction. Switch to before block */
728 cursor.block = cursor.instr->block;
729 cursor.option = nir_cursor_before_block;
730 }
731 return reduce_cursor(cursor);
732 }
733
734 case nir_cursor_after_instr:
735 if (nir_instr_next(cursor.instr) == NULL) {
736 /* This is the last instruction, switch to after block */
737 cursor.option = nir_cursor_after_block;
738 cursor.block = cursor.instr->block;
739 }
740 return cursor;
741
742 default:
743 unreachable("Inavlid cursor option");
744 }
745 }
746
747 bool
748 nir_cursors_equal(nir_cursor a, nir_cursor b)
749 {
750 /* Reduced cursors should be unique */
751 a = reduce_cursor(a);
752 b = reduce_cursor(b);
753
754 return a.block == b.block && a.option == b.option;
755 }
756
757 static bool
758 add_use_cb(nir_src *src, void *state)
759 {
760 nir_instr *instr = state;
761
762 src->parent_instr = instr;
763 list_addtail(&src->use_link,
764 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
765
766 return true;
767 }
768
769 static bool
770 add_ssa_def_cb(nir_ssa_def *def, void *state)
771 {
772 nir_instr *instr = state;
773
774 if (instr->block && def->index == UINT_MAX) {
775 nir_function_impl *impl =
776 nir_cf_node_get_function(&instr->block->cf_node);
777
778 def->index = impl->ssa_alloc++;
779 }
780
781 return true;
782 }
783
784 static bool
785 add_reg_def_cb(nir_dest *dest, void *state)
786 {
787 nir_instr *instr = state;
788
789 if (!dest->is_ssa) {
790 dest->reg.parent_instr = instr;
791 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
792 }
793
794 return true;
795 }
796
797 static void
798 add_defs_uses(nir_instr *instr)
799 {
800 nir_foreach_src(instr, add_use_cb, instr);
801 nir_foreach_dest(instr, add_reg_def_cb, instr);
802 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
803 }
804
805 void
806 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
807 {
808 switch (cursor.option) {
809 case nir_cursor_before_block:
810 /* Only allow inserting jumps into empty blocks. */
811 if (instr->type == nir_instr_type_jump)
812 assert(exec_list_is_empty(&cursor.block->instr_list));
813
814 instr->block = cursor.block;
815 add_defs_uses(instr);
816 exec_list_push_head(&cursor.block->instr_list, &instr->node);
817 break;
818 case nir_cursor_after_block: {
819 /* Inserting instructions after a jump is illegal. */
820 nir_instr *last = nir_block_last_instr(cursor.block);
821 assert(last == NULL || last->type != nir_instr_type_jump);
822 (void) last;
823
824 instr->block = cursor.block;
825 add_defs_uses(instr);
826 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
827 break;
828 }
829 case nir_cursor_before_instr:
830 assert(instr->type != nir_instr_type_jump);
831 instr->block = cursor.instr->block;
832 add_defs_uses(instr);
833 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
834 break;
835 case nir_cursor_after_instr:
836 /* Inserting instructions after a jump is illegal. */
837 assert(cursor.instr->type != nir_instr_type_jump);
838
839 /* Only allow inserting jumps at the end of the block. */
840 if (instr->type == nir_instr_type_jump)
841 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
842
843 instr->block = cursor.instr->block;
844 add_defs_uses(instr);
845 exec_node_insert_after(&cursor.instr->node, &instr->node);
846 break;
847 }
848
849 if (instr->type == nir_instr_type_jump)
850 nir_handle_add_jump(instr->block);
851 }
852
853 static bool
854 src_is_valid(const nir_src *src)
855 {
856 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
857 }
858
859 static bool
860 remove_use_cb(nir_src *src, void *state)
861 {
862 (void) state;
863
864 if (src_is_valid(src))
865 list_del(&src->use_link);
866
867 return true;
868 }
869
870 static bool
871 remove_def_cb(nir_dest *dest, void *state)
872 {
873 (void) state;
874
875 if (!dest->is_ssa)
876 list_del(&dest->reg.def_link);
877
878 return true;
879 }
880
881 static void
882 remove_defs_uses(nir_instr *instr)
883 {
884 nir_foreach_dest(instr, remove_def_cb, instr);
885 nir_foreach_src(instr, remove_use_cb, instr);
886 }
887
888 void nir_instr_remove_v(nir_instr *instr)
889 {
890 remove_defs_uses(instr);
891 exec_node_remove(&instr->node);
892
893 if (instr->type == nir_instr_type_jump) {
894 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
895 nir_handle_remove_jump(instr->block, jump_instr->type);
896 }
897 }
898
899 /*@}*/
900
901 void
902 nir_index_local_regs(nir_function_impl *impl)
903 {
904 unsigned index = 0;
905 foreach_list_typed(nir_register, reg, node, &impl->registers) {
906 reg->index = index++;
907 }
908 impl->reg_alloc = index;
909 }
910
911 void
912 nir_index_global_regs(nir_shader *shader)
913 {
914 unsigned index = 0;
915 foreach_list_typed(nir_register, reg, node, &shader->registers) {
916 reg->index = index++;
917 }
918 shader->reg_alloc = index;
919 }
920
921 static bool
922 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
923 {
924 return cb(&instr->dest.dest, state);
925 }
926
927 static bool
928 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
929 {
930 return cb(&instr->dest, state);
931 }
932
933 static bool
934 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
935 void *state)
936 {
937 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
938 return cb(&instr->dest, state);
939
940 return true;
941 }
942
943 static bool
944 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
945 void *state)
946 {
947 return cb(&instr->dest, state);
948 }
949
950 static bool
951 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
952 {
953 return cb(&instr->dest, state);
954 }
955
956 static bool
957 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
958 nir_foreach_dest_cb cb, void *state)
959 {
960 nir_foreach_parallel_copy_entry(entry, instr) {
961 if (!cb(&entry->dest, state))
962 return false;
963 }
964
965 return true;
966 }
967
968 bool
969 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
970 {
971 switch (instr->type) {
972 case nir_instr_type_alu:
973 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
974 case nir_instr_type_deref:
975 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
976 case nir_instr_type_intrinsic:
977 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
978 case nir_instr_type_tex:
979 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
980 case nir_instr_type_phi:
981 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
982 case nir_instr_type_parallel_copy:
983 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
984 cb, state);
985
986 case nir_instr_type_load_const:
987 case nir_instr_type_ssa_undef:
988 case nir_instr_type_call:
989 case nir_instr_type_jump:
990 break;
991
992 default:
993 unreachable("Invalid instruction type");
994 break;
995 }
996
997 return true;
998 }
999
1000 struct foreach_ssa_def_state {
1001 nir_foreach_ssa_def_cb cb;
1002 void *client_state;
1003 };
1004
1005 static inline bool
1006 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1007 {
1008 struct foreach_ssa_def_state *state = void_state;
1009
1010 if (dest->is_ssa)
1011 return state->cb(&dest->ssa, state->client_state);
1012 else
1013 return true;
1014 }
1015
1016 bool
1017 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1018 {
1019 switch (instr->type) {
1020 case nir_instr_type_alu:
1021 case nir_instr_type_deref:
1022 case nir_instr_type_tex:
1023 case nir_instr_type_intrinsic:
1024 case nir_instr_type_phi:
1025 case nir_instr_type_parallel_copy: {
1026 struct foreach_ssa_def_state foreach_state = {cb, state};
1027 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1028 }
1029
1030 case nir_instr_type_load_const:
1031 return cb(&nir_instr_as_load_const(instr)->def, state);
1032 case nir_instr_type_ssa_undef:
1033 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1034 case nir_instr_type_call:
1035 case nir_instr_type_jump:
1036 return true;
1037 default:
1038 unreachable("Invalid instruction type");
1039 }
1040 }
1041
1042 static bool
1043 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1044 {
1045 if (!cb(src, state))
1046 return false;
1047 if (!src->is_ssa && src->reg.indirect)
1048 return cb(src->reg.indirect, state);
1049 return true;
1050 }
1051
1052 static bool
1053 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1054 {
1055 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1056 if (!visit_src(&instr->src[i].src, cb, state))
1057 return false;
1058
1059 return true;
1060 }
1061
1062 static bool
1063 visit_deref_instr_src(nir_deref_instr *instr,
1064 nir_foreach_src_cb cb, void *state)
1065 {
1066 if (instr->deref_type != nir_deref_type_var) {
1067 if (!visit_src(&instr->parent, cb, state))
1068 return false;
1069 }
1070
1071 if (instr->deref_type == nir_deref_type_array ||
1072 instr->deref_type == nir_deref_type_ptr_as_array) {
1073 if (!visit_src(&instr->arr.index, cb, state))
1074 return false;
1075 }
1076
1077 return true;
1078 }
1079
1080 static bool
1081 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1082 {
1083 for (unsigned i = 0; i < instr->num_srcs; i++) {
1084 if (!visit_src(&instr->src[i].src, cb, state))
1085 return false;
1086 }
1087
1088 return true;
1089 }
1090
1091 static bool
1092 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1093 void *state)
1094 {
1095 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1096 for (unsigned i = 0; i < num_srcs; i++) {
1097 if (!visit_src(&instr->src[i], cb, state))
1098 return false;
1099 }
1100
1101 return true;
1102 }
1103
1104 static bool
1105 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1106 {
1107 for (unsigned i = 0; i < instr->num_params; i++) {
1108 if (!visit_src(&instr->params[i], cb, state))
1109 return false;
1110 }
1111
1112 return true;
1113 }
1114
1115 static bool
1116 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1117 {
1118 nir_foreach_phi_src(src, instr) {
1119 if (!visit_src(&src->src, cb, state))
1120 return false;
1121 }
1122
1123 return true;
1124 }
1125
1126 static bool
1127 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1128 nir_foreach_src_cb cb, void *state)
1129 {
1130 nir_foreach_parallel_copy_entry(entry, instr) {
1131 if (!visit_src(&entry->src, cb, state))
1132 return false;
1133 }
1134
1135 return true;
1136 }
1137
1138 typedef struct {
1139 void *state;
1140 nir_foreach_src_cb cb;
1141 } visit_dest_indirect_state;
1142
1143 static bool
1144 visit_dest_indirect(nir_dest *dest, void *_state)
1145 {
1146 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1147
1148 if (!dest->is_ssa && dest->reg.indirect)
1149 return state->cb(dest->reg.indirect, state->state);
1150
1151 return true;
1152 }
1153
1154 bool
1155 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1156 {
1157 switch (instr->type) {
1158 case nir_instr_type_alu:
1159 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1160 return false;
1161 break;
1162 case nir_instr_type_deref:
1163 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1164 return false;
1165 break;
1166 case nir_instr_type_intrinsic:
1167 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1168 return false;
1169 break;
1170 case nir_instr_type_tex:
1171 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1172 return false;
1173 break;
1174 case nir_instr_type_call:
1175 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1176 return false;
1177 break;
1178 case nir_instr_type_load_const:
1179 /* Constant load instructions have no regular sources */
1180 break;
1181 case nir_instr_type_phi:
1182 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1183 return false;
1184 break;
1185 case nir_instr_type_parallel_copy:
1186 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1187 cb, state))
1188 return false;
1189 break;
1190 case nir_instr_type_jump:
1191 case nir_instr_type_ssa_undef:
1192 return true;
1193
1194 default:
1195 unreachable("Invalid instruction type");
1196 break;
1197 }
1198
1199 visit_dest_indirect_state dest_state;
1200 dest_state.state = state;
1201 dest_state.cb = cb;
1202 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1203 }
1204
1205 int64_t
1206 nir_src_comp_as_int(nir_src src, unsigned comp)
1207 {
1208 assert(nir_src_is_const(src));
1209 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1210
1211 assert(comp < load->def.num_components);
1212 switch (load->def.bit_size) {
1213 /* int1_t uses 0/-1 convention */
1214 case 1: return -(int)load->value.b[comp];
1215 case 8: return load->value.i8[comp];
1216 case 16: return load->value.i16[comp];
1217 case 32: return load->value.i32[comp];
1218 case 64: return load->value.i64[comp];
1219 default:
1220 unreachable("Invalid bit size");
1221 }
1222 }
1223
1224 uint64_t
1225 nir_src_comp_as_uint(nir_src src, unsigned comp)
1226 {
1227 assert(nir_src_is_const(src));
1228 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1229
1230 assert(comp < load->def.num_components);
1231 switch (load->def.bit_size) {
1232 case 1: return load->value.b[comp];
1233 case 8: return load->value.u8[comp];
1234 case 16: return load->value.u16[comp];
1235 case 32: return load->value.u32[comp];
1236 case 64: return load->value.u64[comp];
1237 default:
1238 unreachable("Invalid bit size");
1239 }
1240 }
1241
1242 bool
1243 nir_src_comp_as_bool(nir_src src, unsigned comp)
1244 {
1245 int64_t i = nir_src_comp_as_int(src, comp);
1246
1247 /* Booleans of any size use 0/-1 convention */
1248 assert(i == 0 || i == -1);
1249
1250 return i;
1251 }
1252
1253 double
1254 nir_src_comp_as_float(nir_src src, unsigned comp)
1255 {
1256 assert(nir_src_is_const(src));
1257 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1258
1259 assert(comp < load->def.num_components);
1260 switch (load->def.bit_size) {
1261 case 16: return _mesa_half_to_float(load->value.u16[comp]);
1262 case 32: return load->value.f32[comp];
1263 case 64: return load->value.f64[comp];
1264 default:
1265 unreachable("Invalid bit size");
1266 }
1267 }
1268
1269 int64_t
1270 nir_src_as_int(nir_src src)
1271 {
1272 assert(nir_src_num_components(src) == 1);
1273 return nir_src_comp_as_int(src, 0);
1274 }
1275
1276 uint64_t
1277 nir_src_as_uint(nir_src src)
1278 {
1279 assert(nir_src_num_components(src) == 1);
1280 return nir_src_comp_as_uint(src, 0);
1281 }
1282
1283 bool
1284 nir_src_as_bool(nir_src src)
1285 {
1286 assert(nir_src_num_components(src) == 1);
1287 return nir_src_comp_as_bool(src, 0);
1288 }
1289
1290 double
1291 nir_src_as_float(nir_src src)
1292 {
1293 assert(nir_src_num_components(src) == 1);
1294 return nir_src_comp_as_float(src, 0);
1295 }
1296
1297 nir_const_value *
1298 nir_src_as_const_value(nir_src src)
1299 {
1300 if (!src.is_ssa)
1301 return NULL;
1302
1303 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1304 return NULL;
1305
1306 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1307
1308 return &load->value;
1309 }
1310
1311 /**
1312 * Returns true if the source is known to be dynamically uniform. Otherwise it
1313 * returns false which means it may or may not be dynamically uniform but it
1314 * can't be determined.
1315 */
1316 bool
1317 nir_src_is_dynamically_uniform(nir_src src)
1318 {
1319 if (!src.is_ssa)
1320 return false;
1321
1322 /* Constants are trivially dynamically uniform */
1323 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1324 return true;
1325
1326 /* As are uniform variables */
1327 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1328 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1329
1330 if (intr->intrinsic == nir_intrinsic_load_uniform)
1331 return true;
1332 }
1333
1334 /* XXX: this could have many more tests, such as when a sampler function is
1335 * called with dynamically uniform arguments.
1336 */
1337 return false;
1338 }
1339
1340 static void
1341 src_remove_all_uses(nir_src *src)
1342 {
1343 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1344 if (!src_is_valid(src))
1345 continue;
1346
1347 list_del(&src->use_link);
1348 }
1349 }
1350
1351 static void
1352 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1353 {
1354 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1355 if (!src_is_valid(src))
1356 continue;
1357
1358 if (parent_instr) {
1359 src->parent_instr = parent_instr;
1360 if (src->is_ssa)
1361 list_addtail(&src->use_link, &src->ssa->uses);
1362 else
1363 list_addtail(&src->use_link, &src->reg.reg->uses);
1364 } else {
1365 assert(parent_if);
1366 src->parent_if = parent_if;
1367 if (src->is_ssa)
1368 list_addtail(&src->use_link, &src->ssa->if_uses);
1369 else
1370 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1371 }
1372 }
1373 }
1374
1375 void
1376 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1377 {
1378 assert(!src_is_valid(src) || src->parent_instr == instr);
1379
1380 src_remove_all_uses(src);
1381 *src = new_src;
1382 src_add_all_uses(src, instr, NULL);
1383 }
1384
1385 void
1386 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1387 {
1388 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1389
1390 src_remove_all_uses(dest);
1391 src_remove_all_uses(src);
1392 *dest = *src;
1393 *src = NIR_SRC_INIT;
1394 src_add_all_uses(dest, dest_instr, NULL);
1395 }
1396
1397 void
1398 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1399 {
1400 nir_src *src = &if_stmt->condition;
1401 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1402
1403 src_remove_all_uses(src);
1404 *src = new_src;
1405 src_add_all_uses(src, NULL, if_stmt);
1406 }
1407
1408 void
1409 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1410 {
1411 if (dest->is_ssa) {
1412 /* We can only overwrite an SSA destination if it has no uses. */
1413 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1414 } else {
1415 list_del(&dest->reg.def_link);
1416 if (dest->reg.indirect)
1417 src_remove_all_uses(dest->reg.indirect);
1418 }
1419
1420 /* We can't re-write with an SSA def */
1421 assert(!new_dest.is_ssa);
1422
1423 nir_dest_copy(dest, &new_dest, instr);
1424
1425 dest->reg.parent_instr = instr;
1426 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1427
1428 if (dest->reg.indirect)
1429 src_add_all_uses(dest->reg.indirect, instr, NULL);
1430 }
1431
1432 /* note: does *not* take ownership of 'name' */
1433 void
1434 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1435 unsigned num_components,
1436 unsigned bit_size, const char *name)
1437 {
1438 def->name = ralloc_strdup(instr, name);
1439 def->parent_instr = instr;
1440 list_inithead(&def->uses);
1441 list_inithead(&def->if_uses);
1442 def->num_components = num_components;
1443 def->bit_size = bit_size;
1444
1445 if (instr->block) {
1446 nir_function_impl *impl =
1447 nir_cf_node_get_function(&instr->block->cf_node);
1448
1449 def->index = impl->ssa_alloc++;
1450 } else {
1451 def->index = UINT_MAX;
1452 }
1453 }
1454
1455 /* note: does *not* take ownership of 'name' */
1456 void
1457 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1458 unsigned num_components, unsigned bit_size,
1459 const char *name)
1460 {
1461 dest->is_ssa = true;
1462 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1463 }
1464
1465 void
1466 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1467 {
1468 assert(!new_src.is_ssa || def != new_src.ssa);
1469
1470 nir_foreach_use_safe(use_src, def)
1471 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1472
1473 nir_foreach_if_use_safe(use_src, def)
1474 nir_if_rewrite_condition(use_src->parent_if, new_src);
1475 }
1476
1477 static bool
1478 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1479 {
1480 assert(start->block == end->block);
1481
1482 if (between->block != start->block)
1483 return false;
1484
1485 /* Search backwards looking for "between" */
1486 while (start != end) {
1487 if (between == end)
1488 return true;
1489
1490 end = nir_instr_prev(end);
1491 assert(end);
1492 }
1493
1494 return false;
1495 }
1496
1497 /* Replaces all uses of the given SSA def with the given source but only if
1498 * the use comes after the after_me instruction. This can be useful if you
1499 * are emitting code to fix up the result of some instruction: you can freely
1500 * use the result in that code and then call rewrite_uses_after and pass the
1501 * last fixup instruction as after_me and it will replace all of the uses you
1502 * want without touching the fixup code.
1503 *
1504 * This function assumes that after_me is in the same block as
1505 * def->parent_instr and that after_me comes after def->parent_instr.
1506 */
1507 void
1508 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1509 nir_instr *after_me)
1510 {
1511 assert(!new_src.is_ssa || def != new_src.ssa);
1512
1513 nir_foreach_use_safe(use_src, def) {
1514 assert(use_src->parent_instr != def->parent_instr);
1515 /* Since def already dominates all of its uses, the only way a use can
1516 * not be dominated by after_me is if it is between def and after_me in
1517 * the instruction list.
1518 */
1519 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1520 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1521 }
1522
1523 nir_foreach_if_use_safe(use_src, def)
1524 nir_if_rewrite_condition(use_src->parent_if, new_src);
1525 }
1526
1527 nir_component_mask_t
1528 nir_ssa_def_components_read(const nir_ssa_def *def)
1529 {
1530 nir_component_mask_t read_mask = 0;
1531 nir_foreach_use(use, def) {
1532 if (use->parent_instr->type == nir_instr_type_alu) {
1533 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1534 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1535 int src_idx = alu_src - &alu->src[0];
1536 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1537 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1538 } else {
1539 return (1 << def->num_components) - 1;
1540 }
1541 }
1542
1543 if (!list_empty(&def->if_uses))
1544 read_mask |= 1;
1545
1546 return read_mask;
1547 }
1548
1549 nir_block *
1550 nir_block_cf_tree_next(nir_block *block)
1551 {
1552 if (block == NULL) {
1553 /* nir_foreach_block_safe() will call this function on a NULL block
1554 * after the last iteration, but it won't use the result so just return
1555 * NULL here.
1556 */
1557 return NULL;
1558 }
1559
1560 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1561 if (cf_next)
1562 return nir_cf_node_cf_tree_first(cf_next);
1563
1564 nir_cf_node *parent = block->cf_node.parent;
1565
1566 switch (parent->type) {
1567 case nir_cf_node_if: {
1568 /* Are we at the end of the if? Go to the beginning of the else */
1569 nir_if *if_stmt = nir_cf_node_as_if(parent);
1570 if (block == nir_if_last_then_block(if_stmt))
1571 return nir_if_first_else_block(if_stmt);
1572
1573 assert(block == nir_if_last_else_block(if_stmt));
1574 /* fall through */
1575 }
1576
1577 case nir_cf_node_loop:
1578 return nir_cf_node_as_block(nir_cf_node_next(parent));
1579
1580 case nir_cf_node_function:
1581 return NULL;
1582
1583 default:
1584 unreachable("unknown cf node type");
1585 }
1586 }
1587
1588 nir_block *
1589 nir_block_cf_tree_prev(nir_block *block)
1590 {
1591 if (block == NULL) {
1592 /* do this for consistency with nir_block_cf_tree_next() */
1593 return NULL;
1594 }
1595
1596 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1597 if (cf_prev)
1598 return nir_cf_node_cf_tree_last(cf_prev);
1599
1600 nir_cf_node *parent = block->cf_node.parent;
1601
1602 switch (parent->type) {
1603 case nir_cf_node_if: {
1604 /* Are we at the beginning of the else? Go to the end of the if */
1605 nir_if *if_stmt = nir_cf_node_as_if(parent);
1606 if (block == nir_if_first_else_block(if_stmt))
1607 return nir_if_last_then_block(if_stmt);
1608
1609 assert(block == nir_if_first_then_block(if_stmt));
1610 /* fall through */
1611 }
1612
1613 case nir_cf_node_loop:
1614 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1615
1616 case nir_cf_node_function:
1617 return NULL;
1618
1619 default:
1620 unreachable("unknown cf node type");
1621 }
1622 }
1623
1624 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1625 {
1626 switch (node->type) {
1627 case nir_cf_node_function: {
1628 nir_function_impl *impl = nir_cf_node_as_function(node);
1629 return nir_start_block(impl);
1630 }
1631
1632 case nir_cf_node_if: {
1633 nir_if *if_stmt = nir_cf_node_as_if(node);
1634 return nir_if_first_then_block(if_stmt);
1635 }
1636
1637 case nir_cf_node_loop: {
1638 nir_loop *loop = nir_cf_node_as_loop(node);
1639 return nir_loop_first_block(loop);
1640 }
1641
1642 case nir_cf_node_block: {
1643 return nir_cf_node_as_block(node);
1644 }
1645
1646 default:
1647 unreachable("unknown node type");
1648 }
1649 }
1650
1651 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1652 {
1653 switch (node->type) {
1654 case nir_cf_node_function: {
1655 nir_function_impl *impl = nir_cf_node_as_function(node);
1656 return nir_impl_last_block(impl);
1657 }
1658
1659 case nir_cf_node_if: {
1660 nir_if *if_stmt = nir_cf_node_as_if(node);
1661 return nir_if_last_else_block(if_stmt);
1662 }
1663
1664 case nir_cf_node_loop: {
1665 nir_loop *loop = nir_cf_node_as_loop(node);
1666 return nir_loop_last_block(loop);
1667 }
1668
1669 case nir_cf_node_block: {
1670 return nir_cf_node_as_block(node);
1671 }
1672
1673 default:
1674 unreachable("unknown node type");
1675 }
1676 }
1677
1678 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1679 {
1680 if (node->type == nir_cf_node_block)
1681 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1682 else if (node->type == nir_cf_node_function)
1683 return NULL;
1684 else
1685 return nir_cf_node_as_block(nir_cf_node_next(node));
1686 }
1687
1688 nir_if *
1689 nir_block_get_following_if(nir_block *block)
1690 {
1691 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1692 return NULL;
1693
1694 if (nir_cf_node_is_last(&block->cf_node))
1695 return NULL;
1696
1697 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1698
1699 if (next_node->type != nir_cf_node_if)
1700 return NULL;
1701
1702 return nir_cf_node_as_if(next_node);
1703 }
1704
1705 nir_loop *
1706 nir_block_get_following_loop(nir_block *block)
1707 {
1708 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1709 return NULL;
1710
1711 if (nir_cf_node_is_last(&block->cf_node))
1712 return NULL;
1713
1714 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1715
1716 if (next_node->type != nir_cf_node_loop)
1717 return NULL;
1718
1719 return nir_cf_node_as_loop(next_node);
1720 }
1721
1722 void
1723 nir_index_blocks(nir_function_impl *impl)
1724 {
1725 unsigned index = 0;
1726
1727 if (impl->valid_metadata & nir_metadata_block_index)
1728 return;
1729
1730 nir_foreach_block(block, impl) {
1731 block->index = index++;
1732 }
1733
1734 /* The end_block isn't really part of the program, which is why its index
1735 * is >= num_blocks.
1736 */
1737 impl->num_blocks = impl->end_block->index = index;
1738 }
1739
1740 static bool
1741 index_ssa_def_cb(nir_ssa_def *def, void *state)
1742 {
1743 unsigned *index = (unsigned *) state;
1744 def->index = (*index)++;
1745
1746 return true;
1747 }
1748
1749 /**
1750 * The indices are applied top-to-bottom which has the very nice property
1751 * that, if A dominates B, then A->index <= B->index.
1752 */
1753 void
1754 nir_index_ssa_defs(nir_function_impl *impl)
1755 {
1756 unsigned index = 0;
1757
1758 nir_foreach_block(block, impl) {
1759 nir_foreach_instr(instr, block)
1760 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1761 }
1762
1763 impl->ssa_alloc = index;
1764 }
1765
1766 /**
1767 * The indices are applied top-to-bottom which has the very nice property
1768 * that, if A dominates B, then A->index <= B->index.
1769 */
1770 unsigned
1771 nir_index_instrs(nir_function_impl *impl)
1772 {
1773 unsigned index = 0;
1774
1775 nir_foreach_block(block, impl) {
1776 nir_foreach_instr(instr, block)
1777 instr->index = index++;
1778 }
1779
1780 return index;
1781 }
1782
1783 nir_intrinsic_op
1784 nir_intrinsic_from_system_value(gl_system_value val)
1785 {
1786 switch (val) {
1787 case SYSTEM_VALUE_VERTEX_ID:
1788 return nir_intrinsic_load_vertex_id;
1789 case SYSTEM_VALUE_INSTANCE_ID:
1790 return nir_intrinsic_load_instance_id;
1791 case SYSTEM_VALUE_DRAW_ID:
1792 return nir_intrinsic_load_draw_id;
1793 case SYSTEM_VALUE_BASE_INSTANCE:
1794 return nir_intrinsic_load_base_instance;
1795 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1796 return nir_intrinsic_load_vertex_id_zero_base;
1797 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1798 return nir_intrinsic_load_is_indexed_draw;
1799 case SYSTEM_VALUE_FIRST_VERTEX:
1800 return nir_intrinsic_load_first_vertex;
1801 case SYSTEM_VALUE_BASE_VERTEX:
1802 return nir_intrinsic_load_base_vertex;
1803 case SYSTEM_VALUE_INVOCATION_ID:
1804 return nir_intrinsic_load_invocation_id;
1805 case SYSTEM_VALUE_FRAG_COORD:
1806 return nir_intrinsic_load_frag_coord;
1807 case SYSTEM_VALUE_FRONT_FACE:
1808 return nir_intrinsic_load_front_face;
1809 case SYSTEM_VALUE_SAMPLE_ID:
1810 return nir_intrinsic_load_sample_id;
1811 case SYSTEM_VALUE_SAMPLE_POS:
1812 return nir_intrinsic_load_sample_pos;
1813 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1814 return nir_intrinsic_load_sample_mask_in;
1815 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1816 return nir_intrinsic_load_local_invocation_id;
1817 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1818 return nir_intrinsic_load_local_invocation_index;
1819 case SYSTEM_VALUE_WORK_GROUP_ID:
1820 return nir_intrinsic_load_work_group_id;
1821 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1822 return nir_intrinsic_load_num_work_groups;
1823 case SYSTEM_VALUE_PRIMITIVE_ID:
1824 return nir_intrinsic_load_primitive_id;
1825 case SYSTEM_VALUE_TESS_COORD:
1826 return nir_intrinsic_load_tess_coord;
1827 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1828 return nir_intrinsic_load_tess_level_outer;
1829 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1830 return nir_intrinsic_load_tess_level_inner;
1831 case SYSTEM_VALUE_VERTICES_IN:
1832 return nir_intrinsic_load_patch_vertices_in;
1833 case SYSTEM_VALUE_HELPER_INVOCATION:
1834 return nir_intrinsic_load_helper_invocation;
1835 case SYSTEM_VALUE_VIEW_INDEX:
1836 return nir_intrinsic_load_view_index;
1837 case SYSTEM_VALUE_SUBGROUP_SIZE:
1838 return nir_intrinsic_load_subgroup_size;
1839 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1840 return nir_intrinsic_load_subgroup_invocation;
1841 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1842 return nir_intrinsic_load_subgroup_eq_mask;
1843 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1844 return nir_intrinsic_load_subgroup_ge_mask;
1845 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1846 return nir_intrinsic_load_subgroup_gt_mask;
1847 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1848 return nir_intrinsic_load_subgroup_le_mask;
1849 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1850 return nir_intrinsic_load_subgroup_lt_mask;
1851 case SYSTEM_VALUE_NUM_SUBGROUPS:
1852 return nir_intrinsic_load_num_subgroups;
1853 case SYSTEM_VALUE_SUBGROUP_ID:
1854 return nir_intrinsic_load_subgroup_id;
1855 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1856 return nir_intrinsic_load_local_group_size;
1857 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1858 return nir_intrinsic_load_global_invocation_id;
1859 case SYSTEM_VALUE_WORK_DIM:
1860 return nir_intrinsic_load_work_dim;
1861 default:
1862 unreachable("system value does not directly correspond to intrinsic");
1863 }
1864 }
1865
1866 gl_system_value
1867 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1868 {
1869 switch (intrin) {
1870 case nir_intrinsic_load_vertex_id:
1871 return SYSTEM_VALUE_VERTEX_ID;
1872 case nir_intrinsic_load_instance_id:
1873 return SYSTEM_VALUE_INSTANCE_ID;
1874 case nir_intrinsic_load_draw_id:
1875 return SYSTEM_VALUE_DRAW_ID;
1876 case nir_intrinsic_load_base_instance:
1877 return SYSTEM_VALUE_BASE_INSTANCE;
1878 case nir_intrinsic_load_vertex_id_zero_base:
1879 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1880 case nir_intrinsic_load_first_vertex:
1881 return SYSTEM_VALUE_FIRST_VERTEX;
1882 case nir_intrinsic_load_is_indexed_draw:
1883 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1884 case nir_intrinsic_load_base_vertex:
1885 return SYSTEM_VALUE_BASE_VERTEX;
1886 case nir_intrinsic_load_invocation_id:
1887 return SYSTEM_VALUE_INVOCATION_ID;
1888 case nir_intrinsic_load_frag_coord:
1889 return SYSTEM_VALUE_FRAG_COORD;
1890 case nir_intrinsic_load_front_face:
1891 return SYSTEM_VALUE_FRONT_FACE;
1892 case nir_intrinsic_load_sample_id:
1893 return SYSTEM_VALUE_SAMPLE_ID;
1894 case nir_intrinsic_load_sample_pos:
1895 return SYSTEM_VALUE_SAMPLE_POS;
1896 case nir_intrinsic_load_sample_mask_in:
1897 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1898 case nir_intrinsic_load_local_invocation_id:
1899 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1900 case nir_intrinsic_load_local_invocation_index:
1901 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1902 case nir_intrinsic_load_num_work_groups:
1903 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1904 case nir_intrinsic_load_work_group_id:
1905 return SYSTEM_VALUE_WORK_GROUP_ID;
1906 case nir_intrinsic_load_primitive_id:
1907 return SYSTEM_VALUE_PRIMITIVE_ID;
1908 case nir_intrinsic_load_tess_coord:
1909 return SYSTEM_VALUE_TESS_COORD;
1910 case nir_intrinsic_load_tess_level_outer:
1911 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1912 case nir_intrinsic_load_tess_level_inner:
1913 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1914 case nir_intrinsic_load_patch_vertices_in:
1915 return SYSTEM_VALUE_VERTICES_IN;
1916 case nir_intrinsic_load_helper_invocation:
1917 return SYSTEM_VALUE_HELPER_INVOCATION;
1918 case nir_intrinsic_load_view_index:
1919 return SYSTEM_VALUE_VIEW_INDEX;
1920 case nir_intrinsic_load_subgroup_size:
1921 return SYSTEM_VALUE_SUBGROUP_SIZE;
1922 case nir_intrinsic_load_subgroup_invocation:
1923 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1924 case nir_intrinsic_load_subgroup_eq_mask:
1925 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1926 case nir_intrinsic_load_subgroup_ge_mask:
1927 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1928 case nir_intrinsic_load_subgroup_gt_mask:
1929 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1930 case nir_intrinsic_load_subgroup_le_mask:
1931 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1932 case nir_intrinsic_load_subgroup_lt_mask:
1933 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1934 case nir_intrinsic_load_num_subgroups:
1935 return SYSTEM_VALUE_NUM_SUBGROUPS;
1936 case nir_intrinsic_load_subgroup_id:
1937 return SYSTEM_VALUE_SUBGROUP_ID;
1938 case nir_intrinsic_load_local_group_size:
1939 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1940 case nir_intrinsic_load_global_invocation_id:
1941 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1942 default:
1943 unreachable("intrinsic doesn't produce a system value");
1944 }
1945 }
1946
1947 /* OpenGL utility method that remaps the location attributes if they are
1948 * doubles. Not needed for vulkan due the differences on the input location
1949 * count for doubles on vulkan vs OpenGL
1950 *
1951 * The bitfield returned in dual_slot is one bit for each double input slot in
1952 * the original OpenGL single-slot input numbering. The mapping from old
1953 * locations to new locations is as follows:
1954 *
1955 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1956 */
1957 void
1958 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1959 {
1960 assert(shader->info.stage == MESA_SHADER_VERTEX);
1961
1962 *dual_slot = 0;
1963 nir_foreach_variable(var, &shader->inputs) {
1964 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1965 unsigned slots = glsl_count_attribute_slots(var->type, true);
1966 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1967 }
1968 }
1969
1970 nir_foreach_variable(var, &shader->inputs) {
1971 var->data.location +=
1972 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1973 }
1974 }
1975
1976 /* Returns an attribute mask that has been re-compacted using the given
1977 * dual_slot mask.
1978 */
1979 uint64_t
1980 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1981 {
1982 while (dual_slot) {
1983 unsigned loc = u_bit_scan64(&dual_slot);
1984 /* mask of all bits up to and including loc */
1985 uint64_t mask = BITFIELD64_MASK(loc + 1);
1986 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
1987 }
1988 return attribs;
1989 }