nir: Get rid of global registers
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->globals);
62 exec_list_make_empty(&shader->system_values);
63
64 shader->num_inputs = 0;
65 shader->num_outputs = 0;
66 shader->num_uniforms = 0;
67 shader->num_shared = 0;
68
69 return shader;
70 }
71
72 static nir_register *
73 reg_create(void *mem_ctx, struct exec_list *list)
74 {
75 nir_register *reg = ralloc(mem_ctx, nir_register);
76
77 list_inithead(&reg->uses);
78 list_inithead(&reg->defs);
79 list_inithead(&reg->if_uses);
80
81 reg->num_components = 0;
82 reg->bit_size = 32;
83 reg->num_array_elems = 0;
84 reg->name = NULL;
85
86 exec_list_push_tail(list, &reg->node);
87
88 return reg;
89 }
90
91 nir_register *
92 nir_local_reg_create(nir_function_impl *impl)
93 {
94 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
95 reg->index = impl->reg_alloc++;
96
97 return reg;
98 }
99
100 void
101 nir_reg_remove(nir_register *reg)
102 {
103 exec_node_remove(&reg->node);
104 }
105
106 void
107 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
108 {
109 switch (var->data.mode) {
110 case nir_var_all:
111 assert(!"invalid mode");
112 break;
113
114 case nir_var_function_temp:
115 assert(!"nir_shader_add_variable cannot be used for local variables");
116 break;
117
118 case nir_var_shader_temp:
119 exec_list_push_tail(&shader->globals, &var->node);
120 break;
121
122 case nir_var_shader_in:
123 exec_list_push_tail(&shader->inputs, &var->node);
124 break;
125
126 case nir_var_shader_out:
127 exec_list_push_tail(&shader->outputs, &var->node);
128 break;
129
130 case nir_var_uniform:
131 case nir_var_mem_ubo:
132 case nir_var_mem_ssbo:
133 exec_list_push_tail(&shader->uniforms, &var->node);
134 break;
135
136 case nir_var_mem_shared:
137 assert(gl_shader_stage_is_compute(shader->info.stage));
138 exec_list_push_tail(&shader->shared, &var->node);
139 break;
140
141 case nir_var_mem_global:
142 assert(!"nir_shader_add_variable cannot be used for global memory");
143 break;
144
145 case nir_var_system_value:
146 exec_list_push_tail(&shader->system_values, &var->node);
147 break;
148 }
149 }
150
151 nir_variable *
152 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
153 const struct glsl_type *type, const char *name)
154 {
155 nir_variable *var = rzalloc(shader, nir_variable);
156 var->name = ralloc_strdup(var, name);
157 var->type = type;
158 var->data.mode = mode;
159 var->data.how_declared = nir_var_declared_normally;
160
161 if ((mode == nir_var_shader_in &&
162 shader->info.stage != MESA_SHADER_VERTEX) ||
163 (mode == nir_var_shader_out &&
164 shader->info.stage != MESA_SHADER_FRAGMENT))
165 var->data.interpolation = INTERP_MODE_SMOOTH;
166
167 if (mode == nir_var_shader_in || mode == nir_var_uniform)
168 var->data.read_only = true;
169
170 nir_shader_add_variable(shader, var);
171
172 return var;
173 }
174
175 nir_variable *
176 nir_local_variable_create(nir_function_impl *impl,
177 const struct glsl_type *type, const char *name)
178 {
179 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
180 var->name = ralloc_strdup(var, name);
181 var->type = type;
182 var->data.mode = nir_var_function_temp;
183
184 nir_function_impl_add_variable(impl, var);
185
186 return var;
187 }
188
189 nir_function *
190 nir_function_create(nir_shader *shader, const char *name)
191 {
192 nir_function *func = ralloc(shader, nir_function);
193
194 exec_list_push_tail(&shader->functions, &func->node);
195
196 func->name = ralloc_strdup(func, name);
197 func->shader = shader;
198 func->num_params = 0;
199 func->params = NULL;
200 func->impl = NULL;
201 func->is_entrypoint = false;
202
203 return func;
204 }
205
206 /* NOTE: if the instruction you are copying a src to is already added
207 * to the IR, use nir_instr_rewrite_src() instead.
208 */
209 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
210 {
211 dest->is_ssa = src->is_ssa;
212 if (src->is_ssa) {
213 dest->ssa = src->ssa;
214 } else {
215 dest->reg.base_offset = src->reg.base_offset;
216 dest->reg.reg = src->reg.reg;
217 if (src->reg.indirect) {
218 dest->reg.indirect = ralloc(mem_ctx, nir_src);
219 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
220 } else {
221 dest->reg.indirect = NULL;
222 }
223 }
224 }
225
226 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
227 {
228 /* Copying an SSA definition makes no sense whatsoever. */
229 assert(!src->is_ssa);
230
231 dest->is_ssa = false;
232
233 dest->reg.base_offset = src->reg.base_offset;
234 dest->reg.reg = src->reg.reg;
235 if (src->reg.indirect) {
236 dest->reg.indirect = ralloc(instr, nir_src);
237 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
238 } else {
239 dest->reg.indirect = NULL;
240 }
241 }
242
243 void
244 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
245 nir_alu_instr *instr)
246 {
247 nir_src_copy(&dest->src, &src->src, &instr->instr);
248 dest->abs = src->abs;
249 dest->negate = src->negate;
250 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
251 dest->swizzle[i] = src->swizzle[i];
252 }
253
254 void
255 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
256 nir_alu_instr *instr)
257 {
258 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
259 dest->write_mask = src->write_mask;
260 dest->saturate = src->saturate;
261 }
262
263
264 static void
265 cf_init(nir_cf_node *node, nir_cf_node_type type)
266 {
267 exec_node_init(&node->node);
268 node->parent = NULL;
269 node->type = type;
270 }
271
272 nir_function_impl *
273 nir_function_impl_create_bare(nir_shader *shader)
274 {
275 nir_function_impl *impl = ralloc(shader, nir_function_impl);
276
277 impl->function = NULL;
278
279 cf_init(&impl->cf_node, nir_cf_node_function);
280
281 exec_list_make_empty(&impl->body);
282 exec_list_make_empty(&impl->registers);
283 exec_list_make_empty(&impl->locals);
284 impl->reg_alloc = 0;
285 impl->ssa_alloc = 0;
286 impl->valid_metadata = nir_metadata_none;
287
288 /* create start & end blocks */
289 nir_block *start_block = nir_block_create(shader);
290 nir_block *end_block = nir_block_create(shader);
291 start_block->cf_node.parent = &impl->cf_node;
292 end_block->cf_node.parent = &impl->cf_node;
293 impl->end_block = end_block;
294
295 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
296
297 start_block->successors[0] = end_block;
298 _mesa_set_add(end_block->predecessors, start_block);
299 return impl;
300 }
301
302 nir_function_impl *
303 nir_function_impl_create(nir_function *function)
304 {
305 assert(function->impl == NULL);
306
307 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
308
309 function->impl = impl;
310 impl->function = function;
311
312 return impl;
313 }
314
315 nir_block *
316 nir_block_create(nir_shader *shader)
317 {
318 nir_block *block = rzalloc(shader, nir_block);
319
320 cf_init(&block->cf_node, nir_cf_node_block);
321
322 block->successors[0] = block->successors[1] = NULL;
323 block->predecessors = _mesa_pointer_set_create(block);
324 block->imm_dom = NULL;
325 /* XXX maybe it would be worth it to defer allocation? This
326 * way it doesn't get allocated for shader refs that never run
327 * nir_calc_dominance? For example, state-tracker creates an
328 * initial IR, clones that, runs appropriate lowering pass, passes
329 * to driver which does common lowering/opt, and then stores ref
330 * which is later used to do state specific lowering and futher
331 * opt. Do any of the references not need dominance metadata?
332 */
333 block->dom_frontier = _mesa_pointer_set_create(block);
334
335 exec_list_make_empty(&block->instr_list);
336
337 return block;
338 }
339
340 static inline void
341 src_init(nir_src *src)
342 {
343 src->is_ssa = false;
344 src->reg.reg = NULL;
345 src->reg.indirect = NULL;
346 src->reg.base_offset = 0;
347 }
348
349 nir_if *
350 nir_if_create(nir_shader *shader)
351 {
352 nir_if *if_stmt = ralloc(shader, nir_if);
353
354 if_stmt->control = nir_selection_control_none;
355
356 cf_init(&if_stmt->cf_node, nir_cf_node_if);
357 src_init(&if_stmt->condition);
358
359 nir_block *then = nir_block_create(shader);
360 exec_list_make_empty(&if_stmt->then_list);
361 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
362 then->cf_node.parent = &if_stmt->cf_node;
363
364 nir_block *else_stmt = nir_block_create(shader);
365 exec_list_make_empty(&if_stmt->else_list);
366 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
367 else_stmt->cf_node.parent = &if_stmt->cf_node;
368
369 return if_stmt;
370 }
371
372 nir_loop *
373 nir_loop_create(nir_shader *shader)
374 {
375 nir_loop *loop = rzalloc(shader, nir_loop);
376
377 cf_init(&loop->cf_node, nir_cf_node_loop);
378
379 nir_block *body = nir_block_create(shader);
380 exec_list_make_empty(&loop->body);
381 exec_list_push_tail(&loop->body, &body->cf_node.node);
382 body->cf_node.parent = &loop->cf_node;
383
384 body->successors[0] = body;
385 _mesa_set_add(body->predecessors, body);
386
387 return loop;
388 }
389
390 static void
391 instr_init(nir_instr *instr, nir_instr_type type)
392 {
393 instr->type = type;
394 instr->block = NULL;
395 exec_node_init(&instr->node);
396 }
397
398 static void
399 dest_init(nir_dest *dest)
400 {
401 dest->is_ssa = false;
402 dest->reg.reg = NULL;
403 dest->reg.indirect = NULL;
404 dest->reg.base_offset = 0;
405 }
406
407 static void
408 alu_dest_init(nir_alu_dest *dest)
409 {
410 dest_init(&dest->dest);
411 dest->saturate = false;
412 dest->write_mask = 0xf;
413 }
414
415 static void
416 alu_src_init(nir_alu_src *src)
417 {
418 src_init(&src->src);
419 src->abs = src->negate = false;
420 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
421 src->swizzle[i] = i;
422 }
423
424 nir_alu_instr *
425 nir_alu_instr_create(nir_shader *shader, nir_op op)
426 {
427 unsigned num_srcs = nir_op_infos[op].num_inputs;
428 /* TODO: don't use rzalloc */
429 nir_alu_instr *instr =
430 rzalloc_size(shader,
431 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
432
433 instr_init(&instr->instr, nir_instr_type_alu);
434 instr->op = op;
435 alu_dest_init(&instr->dest);
436 for (unsigned i = 0; i < num_srcs; i++)
437 alu_src_init(&instr->src[i]);
438
439 return instr;
440 }
441
442 nir_deref_instr *
443 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
444 {
445 nir_deref_instr *instr =
446 rzalloc_size(shader, sizeof(nir_deref_instr));
447
448 instr_init(&instr->instr, nir_instr_type_deref);
449
450 instr->deref_type = deref_type;
451 if (deref_type != nir_deref_type_var)
452 src_init(&instr->parent);
453
454 if (deref_type == nir_deref_type_array ||
455 deref_type == nir_deref_type_ptr_as_array)
456 src_init(&instr->arr.index);
457
458 dest_init(&instr->dest);
459
460 return instr;
461 }
462
463 nir_jump_instr *
464 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
465 {
466 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
467 instr_init(&instr->instr, nir_instr_type_jump);
468 instr->type = type;
469 return instr;
470 }
471
472 nir_load_const_instr *
473 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
474 unsigned bit_size)
475 {
476 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
477 instr_init(&instr->instr, nir_instr_type_load_const);
478
479 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
480
481 return instr;
482 }
483
484 nir_intrinsic_instr *
485 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
486 {
487 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
488 /* TODO: don't use rzalloc */
489 nir_intrinsic_instr *instr =
490 rzalloc_size(shader,
491 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
492
493 instr_init(&instr->instr, nir_instr_type_intrinsic);
494 instr->intrinsic = op;
495
496 if (nir_intrinsic_infos[op].has_dest)
497 dest_init(&instr->dest);
498
499 for (unsigned i = 0; i < num_srcs; i++)
500 src_init(&instr->src[i]);
501
502 return instr;
503 }
504
505 nir_call_instr *
506 nir_call_instr_create(nir_shader *shader, nir_function *callee)
507 {
508 const unsigned num_params = callee->num_params;
509 nir_call_instr *instr =
510 rzalloc_size(shader, sizeof(*instr) +
511 num_params * sizeof(instr->params[0]));
512
513 instr_init(&instr->instr, nir_instr_type_call);
514 instr->callee = callee;
515 instr->num_params = num_params;
516 for (unsigned i = 0; i < num_params; i++)
517 src_init(&instr->params[i]);
518
519 return instr;
520 }
521
522 static int8_t default_tg4_offsets[4][2] =
523 {
524 { 0, 1 },
525 { 1, 1 },
526 { 1, 0 },
527 { 0, 0 },
528 };
529
530 nir_tex_instr *
531 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
532 {
533 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
534 instr_init(&instr->instr, nir_instr_type_tex);
535
536 dest_init(&instr->dest);
537
538 instr->num_srcs = num_srcs;
539 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
540 for (unsigned i = 0; i < num_srcs; i++)
541 src_init(&instr->src[i].src);
542
543 instr->texture_index = 0;
544 instr->texture_array_size = 0;
545 instr->sampler_index = 0;
546 memcpy(instr->tg4_offsets, default_tg4_offsets, sizeof(instr->tg4_offsets));
547
548 return instr;
549 }
550
551 void
552 nir_tex_instr_add_src(nir_tex_instr *tex,
553 nir_tex_src_type src_type,
554 nir_src src)
555 {
556 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
557 tex->num_srcs + 1);
558
559 for (unsigned i = 0; i < tex->num_srcs; i++) {
560 new_srcs[i].src_type = tex->src[i].src_type;
561 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
562 &tex->src[i].src);
563 }
564
565 ralloc_free(tex->src);
566 tex->src = new_srcs;
567
568 tex->src[tex->num_srcs].src_type = src_type;
569 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
570 tex->num_srcs++;
571 }
572
573 void
574 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
575 {
576 assert(src_idx < tex->num_srcs);
577
578 /* First rewrite the source to NIR_SRC_INIT */
579 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
580
581 /* Now, move all of the other sources down */
582 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
583 tex->src[i-1].src_type = tex->src[i].src_type;
584 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
585 }
586 tex->num_srcs--;
587 }
588
589 bool
590 nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex)
591 {
592 if (tex->op != nir_texop_tg4)
593 return false;
594 return memcmp(tex->tg4_offsets, default_tg4_offsets,
595 sizeof(tex->tg4_offsets)) != 0;
596 }
597
598 nir_phi_instr *
599 nir_phi_instr_create(nir_shader *shader)
600 {
601 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
602 instr_init(&instr->instr, nir_instr_type_phi);
603
604 dest_init(&instr->dest);
605 exec_list_make_empty(&instr->srcs);
606 return instr;
607 }
608
609 nir_parallel_copy_instr *
610 nir_parallel_copy_instr_create(nir_shader *shader)
611 {
612 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
613 instr_init(&instr->instr, nir_instr_type_parallel_copy);
614
615 exec_list_make_empty(&instr->entries);
616
617 return instr;
618 }
619
620 nir_ssa_undef_instr *
621 nir_ssa_undef_instr_create(nir_shader *shader,
622 unsigned num_components,
623 unsigned bit_size)
624 {
625 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
626 instr_init(&instr->instr, nir_instr_type_ssa_undef);
627
628 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
629
630 return instr;
631 }
632
633 static nir_const_value
634 const_value_float(double d, unsigned bit_size)
635 {
636 nir_const_value v;
637 switch (bit_size) {
638 case 16: v.u16[0] = _mesa_float_to_half(d); break;
639 case 32: v.f32[0] = d; break;
640 case 64: v.f64[0] = d; break;
641 default:
642 unreachable("Invalid bit size");
643 }
644 return v;
645 }
646
647 static nir_const_value
648 const_value_int(int64_t i, unsigned bit_size)
649 {
650 nir_const_value v;
651 switch (bit_size) {
652 case 1: v.b[0] = i & 1; break;
653 case 8: v.i8[0] = i; break;
654 case 16: v.i16[0] = i; break;
655 case 32: v.i32[0] = i; break;
656 case 64: v.i64[0] = i; break;
657 default:
658 unreachable("Invalid bit size");
659 }
660 return v;
661 }
662
663 nir_const_value
664 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
665 {
666 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
667 const int64_t min_int = -max_int - 1;
668 switch (binop) {
669 case nir_op_iadd:
670 return const_value_int(0, bit_size);
671 case nir_op_fadd:
672 return const_value_float(0, bit_size);
673 case nir_op_imul:
674 return const_value_int(1, bit_size);
675 case nir_op_fmul:
676 return const_value_float(1, bit_size);
677 case nir_op_imin:
678 return const_value_int(max_int, bit_size);
679 case nir_op_umin:
680 return const_value_int(~0ull, bit_size);
681 case nir_op_fmin:
682 return const_value_float(INFINITY, bit_size);
683 case nir_op_imax:
684 return const_value_int(min_int, bit_size);
685 case nir_op_umax:
686 return const_value_int(0, bit_size);
687 case nir_op_fmax:
688 return const_value_float(-INFINITY, bit_size);
689 case nir_op_iand:
690 return const_value_int(~0ull, bit_size);
691 case nir_op_ior:
692 return const_value_int(0, bit_size);
693 case nir_op_ixor:
694 return const_value_int(0, bit_size);
695 default:
696 unreachable("Invalid reduction operation");
697 }
698 }
699
700 nir_function_impl *
701 nir_cf_node_get_function(nir_cf_node *node)
702 {
703 while (node->type != nir_cf_node_function) {
704 node = node->parent;
705 }
706
707 return nir_cf_node_as_function(node);
708 }
709
710 /* Reduces a cursor by trying to convert everything to after and trying to
711 * go up to block granularity when possible.
712 */
713 static nir_cursor
714 reduce_cursor(nir_cursor cursor)
715 {
716 switch (cursor.option) {
717 case nir_cursor_before_block:
718 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
719 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
720 if (exec_list_is_empty(&cursor.block->instr_list)) {
721 /* Empty block. After is as good as before. */
722 cursor.option = nir_cursor_after_block;
723 }
724 return cursor;
725
726 case nir_cursor_after_block:
727 return cursor;
728
729 case nir_cursor_before_instr: {
730 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
731 if (prev_instr) {
732 /* Before this instruction is after the previous */
733 cursor.instr = prev_instr;
734 cursor.option = nir_cursor_after_instr;
735 } else {
736 /* No previous instruction. Switch to before block */
737 cursor.block = cursor.instr->block;
738 cursor.option = nir_cursor_before_block;
739 }
740 return reduce_cursor(cursor);
741 }
742
743 case nir_cursor_after_instr:
744 if (nir_instr_next(cursor.instr) == NULL) {
745 /* This is the last instruction, switch to after block */
746 cursor.option = nir_cursor_after_block;
747 cursor.block = cursor.instr->block;
748 }
749 return cursor;
750
751 default:
752 unreachable("Inavlid cursor option");
753 }
754 }
755
756 bool
757 nir_cursors_equal(nir_cursor a, nir_cursor b)
758 {
759 /* Reduced cursors should be unique */
760 a = reduce_cursor(a);
761 b = reduce_cursor(b);
762
763 return a.block == b.block && a.option == b.option;
764 }
765
766 static bool
767 add_use_cb(nir_src *src, void *state)
768 {
769 nir_instr *instr = state;
770
771 src->parent_instr = instr;
772 list_addtail(&src->use_link,
773 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
774
775 return true;
776 }
777
778 static bool
779 add_ssa_def_cb(nir_ssa_def *def, void *state)
780 {
781 nir_instr *instr = state;
782
783 if (instr->block && def->index == UINT_MAX) {
784 nir_function_impl *impl =
785 nir_cf_node_get_function(&instr->block->cf_node);
786
787 def->index = impl->ssa_alloc++;
788 }
789
790 return true;
791 }
792
793 static bool
794 add_reg_def_cb(nir_dest *dest, void *state)
795 {
796 nir_instr *instr = state;
797
798 if (!dest->is_ssa) {
799 dest->reg.parent_instr = instr;
800 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
801 }
802
803 return true;
804 }
805
806 static void
807 add_defs_uses(nir_instr *instr)
808 {
809 nir_foreach_src(instr, add_use_cb, instr);
810 nir_foreach_dest(instr, add_reg_def_cb, instr);
811 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
812 }
813
814 void
815 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
816 {
817 switch (cursor.option) {
818 case nir_cursor_before_block:
819 /* Only allow inserting jumps into empty blocks. */
820 if (instr->type == nir_instr_type_jump)
821 assert(exec_list_is_empty(&cursor.block->instr_list));
822
823 instr->block = cursor.block;
824 add_defs_uses(instr);
825 exec_list_push_head(&cursor.block->instr_list, &instr->node);
826 break;
827 case nir_cursor_after_block: {
828 /* Inserting instructions after a jump is illegal. */
829 nir_instr *last = nir_block_last_instr(cursor.block);
830 assert(last == NULL || last->type != nir_instr_type_jump);
831 (void) last;
832
833 instr->block = cursor.block;
834 add_defs_uses(instr);
835 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
836 break;
837 }
838 case nir_cursor_before_instr:
839 assert(instr->type != nir_instr_type_jump);
840 instr->block = cursor.instr->block;
841 add_defs_uses(instr);
842 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
843 break;
844 case nir_cursor_after_instr:
845 /* Inserting instructions after a jump is illegal. */
846 assert(cursor.instr->type != nir_instr_type_jump);
847
848 /* Only allow inserting jumps at the end of the block. */
849 if (instr->type == nir_instr_type_jump)
850 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
851
852 instr->block = cursor.instr->block;
853 add_defs_uses(instr);
854 exec_node_insert_after(&cursor.instr->node, &instr->node);
855 break;
856 }
857
858 if (instr->type == nir_instr_type_jump)
859 nir_handle_add_jump(instr->block);
860 }
861
862 static bool
863 src_is_valid(const nir_src *src)
864 {
865 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
866 }
867
868 static bool
869 remove_use_cb(nir_src *src, void *state)
870 {
871 (void) state;
872
873 if (src_is_valid(src))
874 list_del(&src->use_link);
875
876 return true;
877 }
878
879 static bool
880 remove_def_cb(nir_dest *dest, void *state)
881 {
882 (void) state;
883
884 if (!dest->is_ssa)
885 list_del(&dest->reg.def_link);
886
887 return true;
888 }
889
890 static void
891 remove_defs_uses(nir_instr *instr)
892 {
893 nir_foreach_dest(instr, remove_def_cb, instr);
894 nir_foreach_src(instr, remove_use_cb, instr);
895 }
896
897 void nir_instr_remove_v(nir_instr *instr)
898 {
899 remove_defs_uses(instr);
900 exec_node_remove(&instr->node);
901
902 if (instr->type == nir_instr_type_jump) {
903 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
904 nir_handle_remove_jump(instr->block, jump_instr->type);
905 }
906 }
907
908 /*@}*/
909
910 void
911 nir_index_local_regs(nir_function_impl *impl)
912 {
913 unsigned index = 0;
914 foreach_list_typed(nir_register, reg, node, &impl->registers) {
915 reg->index = index++;
916 }
917 impl->reg_alloc = index;
918 }
919
920 static bool
921 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
922 {
923 return cb(&instr->dest.dest, state);
924 }
925
926 static bool
927 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
928 {
929 return cb(&instr->dest, state);
930 }
931
932 static bool
933 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
934 void *state)
935 {
936 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
937 return cb(&instr->dest, state);
938
939 return true;
940 }
941
942 static bool
943 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
944 void *state)
945 {
946 return cb(&instr->dest, state);
947 }
948
949 static bool
950 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
951 {
952 return cb(&instr->dest, state);
953 }
954
955 static bool
956 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
957 nir_foreach_dest_cb cb, void *state)
958 {
959 nir_foreach_parallel_copy_entry(entry, instr) {
960 if (!cb(&entry->dest, state))
961 return false;
962 }
963
964 return true;
965 }
966
967 bool
968 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
969 {
970 switch (instr->type) {
971 case nir_instr_type_alu:
972 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
973 case nir_instr_type_deref:
974 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
975 case nir_instr_type_intrinsic:
976 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
977 case nir_instr_type_tex:
978 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
979 case nir_instr_type_phi:
980 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
981 case nir_instr_type_parallel_copy:
982 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
983 cb, state);
984
985 case nir_instr_type_load_const:
986 case nir_instr_type_ssa_undef:
987 case nir_instr_type_call:
988 case nir_instr_type_jump:
989 break;
990
991 default:
992 unreachable("Invalid instruction type");
993 break;
994 }
995
996 return true;
997 }
998
999 struct foreach_ssa_def_state {
1000 nir_foreach_ssa_def_cb cb;
1001 void *client_state;
1002 };
1003
1004 static inline bool
1005 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1006 {
1007 struct foreach_ssa_def_state *state = void_state;
1008
1009 if (dest->is_ssa)
1010 return state->cb(&dest->ssa, state->client_state);
1011 else
1012 return true;
1013 }
1014
1015 bool
1016 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1017 {
1018 switch (instr->type) {
1019 case nir_instr_type_alu:
1020 case nir_instr_type_deref:
1021 case nir_instr_type_tex:
1022 case nir_instr_type_intrinsic:
1023 case nir_instr_type_phi:
1024 case nir_instr_type_parallel_copy: {
1025 struct foreach_ssa_def_state foreach_state = {cb, state};
1026 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1027 }
1028
1029 case nir_instr_type_load_const:
1030 return cb(&nir_instr_as_load_const(instr)->def, state);
1031 case nir_instr_type_ssa_undef:
1032 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1033 case nir_instr_type_call:
1034 case nir_instr_type_jump:
1035 return true;
1036 default:
1037 unreachable("Invalid instruction type");
1038 }
1039 }
1040
1041 static bool
1042 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1043 {
1044 if (!cb(src, state))
1045 return false;
1046 if (!src->is_ssa && src->reg.indirect)
1047 return cb(src->reg.indirect, state);
1048 return true;
1049 }
1050
1051 static bool
1052 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1053 {
1054 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1055 if (!visit_src(&instr->src[i].src, cb, state))
1056 return false;
1057
1058 return true;
1059 }
1060
1061 static bool
1062 visit_deref_instr_src(nir_deref_instr *instr,
1063 nir_foreach_src_cb cb, void *state)
1064 {
1065 if (instr->deref_type != nir_deref_type_var) {
1066 if (!visit_src(&instr->parent, cb, state))
1067 return false;
1068 }
1069
1070 if (instr->deref_type == nir_deref_type_array ||
1071 instr->deref_type == nir_deref_type_ptr_as_array) {
1072 if (!visit_src(&instr->arr.index, cb, state))
1073 return false;
1074 }
1075
1076 return true;
1077 }
1078
1079 static bool
1080 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1081 {
1082 for (unsigned i = 0; i < instr->num_srcs; i++) {
1083 if (!visit_src(&instr->src[i].src, cb, state))
1084 return false;
1085 }
1086
1087 return true;
1088 }
1089
1090 static bool
1091 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1092 void *state)
1093 {
1094 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1095 for (unsigned i = 0; i < num_srcs; i++) {
1096 if (!visit_src(&instr->src[i], cb, state))
1097 return false;
1098 }
1099
1100 return true;
1101 }
1102
1103 static bool
1104 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1105 {
1106 for (unsigned i = 0; i < instr->num_params; i++) {
1107 if (!visit_src(&instr->params[i], cb, state))
1108 return false;
1109 }
1110
1111 return true;
1112 }
1113
1114 static bool
1115 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1116 {
1117 nir_foreach_phi_src(src, instr) {
1118 if (!visit_src(&src->src, cb, state))
1119 return false;
1120 }
1121
1122 return true;
1123 }
1124
1125 static bool
1126 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1127 nir_foreach_src_cb cb, void *state)
1128 {
1129 nir_foreach_parallel_copy_entry(entry, instr) {
1130 if (!visit_src(&entry->src, cb, state))
1131 return false;
1132 }
1133
1134 return true;
1135 }
1136
1137 typedef struct {
1138 void *state;
1139 nir_foreach_src_cb cb;
1140 } visit_dest_indirect_state;
1141
1142 static bool
1143 visit_dest_indirect(nir_dest *dest, void *_state)
1144 {
1145 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1146
1147 if (!dest->is_ssa && dest->reg.indirect)
1148 return state->cb(dest->reg.indirect, state->state);
1149
1150 return true;
1151 }
1152
1153 bool
1154 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1155 {
1156 switch (instr->type) {
1157 case nir_instr_type_alu:
1158 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1159 return false;
1160 break;
1161 case nir_instr_type_deref:
1162 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1163 return false;
1164 break;
1165 case nir_instr_type_intrinsic:
1166 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1167 return false;
1168 break;
1169 case nir_instr_type_tex:
1170 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1171 return false;
1172 break;
1173 case nir_instr_type_call:
1174 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1175 return false;
1176 break;
1177 case nir_instr_type_load_const:
1178 /* Constant load instructions have no regular sources */
1179 break;
1180 case nir_instr_type_phi:
1181 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1182 return false;
1183 break;
1184 case nir_instr_type_parallel_copy:
1185 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1186 cb, state))
1187 return false;
1188 break;
1189 case nir_instr_type_jump:
1190 case nir_instr_type_ssa_undef:
1191 return true;
1192
1193 default:
1194 unreachable("Invalid instruction type");
1195 break;
1196 }
1197
1198 visit_dest_indirect_state dest_state;
1199 dest_state.state = state;
1200 dest_state.cb = cb;
1201 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1202 }
1203
1204 int64_t
1205 nir_src_comp_as_int(nir_src src, unsigned comp)
1206 {
1207 assert(nir_src_is_const(src));
1208 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1209
1210 assert(comp < load->def.num_components);
1211 switch (load->def.bit_size) {
1212 /* int1_t uses 0/-1 convention */
1213 case 1: return -(int)load->value.b[comp];
1214 case 8: return load->value.i8[comp];
1215 case 16: return load->value.i16[comp];
1216 case 32: return load->value.i32[comp];
1217 case 64: return load->value.i64[comp];
1218 default:
1219 unreachable("Invalid bit size");
1220 }
1221 }
1222
1223 uint64_t
1224 nir_src_comp_as_uint(nir_src src, unsigned comp)
1225 {
1226 assert(nir_src_is_const(src));
1227 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1228
1229 assert(comp < load->def.num_components);
1230 switch (load->def.bit_size) {
1231 case 1: return load->value.b[comp];
1232 case 8: return load->value.u8[comp];
1233 case 16: return load->value.u16[comp];
1234 case 32: return load->value.u32[comp];
1235 case 64: return load->value.u64[comp];
1236 default:
1237 unreachable("Invalid bit size");
1238 }
1239 }
1240
1241 bool
1242 nir_src_comp_as_bool(nir_src src, unsigned comp)
1243 {
1244 int64_t i = nir_src_comp_as_int(src, comp);
1245
1246 /* Booleans of any size use 0/-1 convention */
1247 assert(i == 0 || i == -1);
1248
1249 return i;
1250 }
1251
1252 double
1253 nir_src_comp_as_float(nir_src src, unsigned comp)
1254 {
1255 assert(nir_src_is_const(src));
1256 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1257
1258 assert(comp < load->def.num_components);
1259 switch (load->def.bit_size) {
1260 case 16: return _mesa_half_to_float(load->value.u16[comp]);
1261 case 32: return load->value.f32[comp];
1262 case 64: return load->value.f64[comp];
1263 default:
1264 unreachable("Invalid bit size");
1265 }
1266 }
1267
1268 int64_t
1269 nir_src_as_int(nir_src src)
1270 {
1271 assert(nir_src_num_components(src) == 1);
1272 return nir_src_comp_as_int(src, 0);
1273 }
1274
1275 uint64_t
1276 nir_src_as_uint(nir_src src)
1277 {
1278 assert(nir_src_num_components(src) == 1);
1279 return nir_src_comp_as_uint(src, 0);
1280 }
1281
1282 bool
1283 nir_src_as_bool(nir_src src)
1284 {
1285 assert(nir_src_num_components(src) == 1);
1286 return nir_src_comp_as_bool(src, 0);
1287 }
1288
1289 double
1290 nir_src_as_float(nir_src src)
1291 {
1292 assert(nir_src_num_components(src) == 1);
1293 return nir_src_comp_as_float(src, 0);
1294 }
1295
1296 nir_const_value *
1297 nir_src_as_const_value(nir_src src)
1298 {
1299 if (!src.is_ssa)
1300 return NULL;
1301
1302 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1303 return NULL;
1304
1305 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1306
1307 return &load->value;
1308 }
1309
1310 /**
1311 * Returns true if the source is known to be dynamically uniform. Otherwise it
1312 * returns false which means it may or may not be dynamically uniform but it
1313 * can't be determined.
1314 */
1315 bool
1316 nir_src_is_dynamically_uniform(nir_src src)
1317 {
1318 if (!src.is_ssa)
1319 return false;
1320
1321 /* Constants are trivially dynamically uniform */
1322 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1323 return true;
1324
1325 /* As are uniform variables */
1326 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1327 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1328
1329 if (intr->intrinsic == nir_intrinsic_load_uniform)
1330 return true;
1331 }
1332
1333 /* XXX: this could have many more tests, such as when a sampler function is
1334 * called with dynamically uniform arguments.
1335 */
1336 return false;
1337 }
1338
1339 static void
1340 src_remove_all_uses(nir_src *src)
1341 {
1342 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1343 if (!src_is_valid(src))
1344 continue;
1345
1346 list_del(&src->use_link);
1347 }
1348 }
1349
1350 static void
1351 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1352 {
1353 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1354 if (!src_is_valid(src))
1355 continue;
1356
1357 if (parent_instr) {
1358 src->parent_instr = parent_instr;
1359 if (src->is_ssa)
1360 list_addtail(&src->use_link, &src->ssa->uses);
1361 else
1362 list_addtail(&src->use_link, &src->reg.reg->uses);
1363 } else {
1364 assert(parent_if);
1365 src->parent_if = parent_if;
1366 if (src->is_ssa)
1367 list_addtail(&src->use_link, &src->ssa->if_uses);
1368 else
1369 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1370 }
1371 }
1372 }
1373
1374 void
1375 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1376 {
1377 assert(!src_is_valid(src) || src->parent_instr == instr);
1378
1379 src_remove_all_uses(src);
1380 *src = new_src;
1381 src_add_all_uses(src, instr, NULL);
1382 }
1383
1384 void
1385 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1386 {
1387 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1388
1389 src_remove_all_uses(dest);
1390 src_remove_all_uses(src);
1391 *dest = *src;
1392 *src = NIR_SRC_INIT;
1393 src_add_all_uses(dest, dest_instr, NULL);
1394 }
1395
1396 void
1397 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1398 {
1399 nir_src *src = &if_stmt->condition;
1400 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1401
1402 src_remove_all_uses(src);
1403 *src = new_src;
1404 src_add_all_uses(src, NULL, if_stmt);
1405 }
1406
1407 void
1408 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1409 {
1410 if (dest->is_ssa) {
1411 /* We can only overwrite an SSA destination if it has no uses. */
1412 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1413 } else {
1414 list_del(&dest->reg.def_link);
1415 if (dest->reg.indirect)
1416 src_remove_all_uses(dest->reg.indirect);
1417 }
1418
1419 /* We can't re-write with an SSA def */
1420 assert(!new_dest.is_ssa);
1421
1422 nir_dest_copy(dest, &new_dest, instr);
1423
1424 dest->reg.parent_instr = instr;
1425 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1426
1427 if (dest->reg.indirect)
1428 src_add_all_uses(dest->reg.indirect, instr, NULL);
1429 }
1430
1431 /* note: does *not* take ownership of 'name' */
1432 void
1433 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1434 unsigned num_components,
1435 unsigned bit_size, const char *name)
1436 {
1437 def->name = ralloc_strdup(instr, name);
1438 def->parent_instr = instr;
1439 list_inithead(&def->uses);
1440 list_inithead(&def->if_uses);
1441 def->num_components = num_components;
1442 def->bit_size = bit_size;
1443
1444 if (instr->block) {
1445 nir_function_impl *impl =
1446 nir_cf_node_get_function(&instr->block->cf_node);
1447
1448 def->index = impl->ssa_alloc++;
1449 } else {
1450 def->index = UINT_MAX;
1451 }
1452 }
1453
1454 /* note: does *not* take ownership of 'name' */
1455 void
1456 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1457 unsigned num_components, unsigned bit_size,
1458 const char *name)
1459 {
1460 dest->is_ssa = true;
1461 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1462 }
1463
1464 void
1465 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1466 {
1467 assert(!new_src.is_ssa || def != new_src.ssa);
1468
1469 nir_foreach_use_safe(use_src, def)
1470 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1471
1472 nir_foreach_if_use_safe(use_src, def)
1473 nir_if_rewrite_condition(use_src->parent_if, new_src);
1474 }
1475
1476 static bool
1477 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1478 {
1479 assert(start->block == end->block);
1480
1481 if (between->block != start->block)
1482 return false;
1483
1484 /* Search backwards looking for "between" */
1485 while (start != end) {
1486 if (between == end)
1487 return true;
1488
1489 end = nir_instr_prev(end);
1490 assert(end);
1491 }
1492
1493 return false;
1494 }
1495
1496 /* Replaces all uses of the given SSA def with the given source but only if
1497 * the use comes after the after_me instruction. This can be useful if you
1498 * are emitting code to fix up the result of some instruction: you can freely
1499 * use the result in that code and then call rewrite_uses_after and pass the
1500 * last fixup instruction as after_me and it will replace all of the uses you
1501 * want without touching the fixup code.
1502 *
1503 * This function assumes that after_me is in the same block as
1504 * def->parent_instr and that after_me comes after def->parent_instr.
1505 */
1506 void
1507 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1508 nir_instr *after_me)
1509 {
1510 if (new_src.is_ssa && def == new_src.ssa)
1511 return;
1512
1513 nir_foreach_use_safe(use_src, def) {
1514 assert(use_src->parent_instr != def->parent_instr);
1515 /* Since def already dominates all of its uses, the only way a use can
1516 * not be dominated by after_me is if it is between def and after_me in
1517 * the instruction list.
1518 */
1519 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1520 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1521 }
1522
1523 nir_foreach_if_use_safe(use_src, def)
1524 nir_if_rewrite_condition(use_src->parent_if, new_src);
1525 }
1526
1527 nir_component_mask_t
1528 nir_ssa_def_components_read(const nir_ssa_def *def)
1529 {
1530 nir_component_mask_t read_mask = 0;
1531 nir_foreach_use(use, def) {
1532 if (use->parent_instr->type == nir_instr_type_alu) {
1533 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1534 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1535 int src_idx = alu_src - &alu->src[0];
1536 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1537 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1538 } else {
1539 return (1 << def->num_components) - 1;
1540 }
1541 }
1542
1543 if (!list_empty(&def->if_uses))
1544 read_mask |= 1;
1545
1546 return read_mask;
1547 }
1548
1549 nir_block *
1550 nir_block_cf_tree_next(nir_block *block)
1551 {
1552 if (block == NULL) {
1553 /* nir_foreach_block_safe() will call this function on a NULL block
1554 * after the last iteration, but it won't use the result so just return
1555 * NULL here.
1556 */
1557 return NULL;
1558 }
1559
1560 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1561 if (cf_next)
1562 return nir_cf_node_cf_tree_first(cf_next);
1563
1564 nir_cf_node *parent = block->cf_node.parent;
1565
1566 switch (parent->type) {
1567 case nir_cf_node_if: {
1568 /* Are we at the end of the if? Go to the beginning of the else */
1569 nir_if *if_stmt = nir_cf_node_as_if(parent);
1570 if (block == nir_if_last_then_block(if_stmt))
1571 return nir_if_first_else_block(if_stmt);
1572
1573 assert(block == nir_if_last_else_block(if_stmt));
1574 /* fall through */
1575 }
1576
1577 case nir_cf_node_loop:
1578 return nir_cf_node_as_block(nir_cf_node_next(parent));
1579
1580 case nir_cf_node_function:
1581 return NULL;
1582
1583 default:
1584 unreachable("unknown cf node type");
1585 }
1586 }
1587
1588 nir_block *
1589 nir_block_cf_tree_prev(nir_block *block)
1590 {
1591 if (block == NULL) {
1592 /* do this for consistency with nir_block_cf_tree_next() */
1593 return NULL;
1594 }
1595
1596 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1597 if (cf_prev)
1598 return nir_cf_node_cf_tree_last(cf_prev);
1599
1600 nir_cf_node *parent = block->cf_node.parent;
1601
1602 switch (parent->type) {
1603 case nir_cf_node_if: {
1604 /* Are we at the beginning of the else? Go to the end of the if */
1605 nir_if *if_stmt = nir_cf_node_as_if(parent);
1606 if (block == nir_if_first_else_block(if_stmt))
1607 return nir_if_last_then_block(if_stmt);
1608
1609 assert(block == nir_if_first_then_block(if_stmt));
1610 /* fall through */
1611 }
1612
1613 case nir_cf_node_loop:
1614 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1615
1616 case nir_cf_node_function:
1617 return NULL;
1618
1619 default:
1620 unreachable("unknown cf node type");
1621 }
1622 }
1623
1624 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1625 {
1626 switch (node->type) {
1627 case nir_cf_node_function: {
1628 nir_function_impl *impl = nir_cf_node_as_function(node);
1629 return nir_start_block(impl);
1630 }
1631
1632 case nir_cf_node_if: {
1633 nir_if *if_stmt = nir_cf_node_as_if(node);
1634 return nir_if_first_then_block(if_stmt);
1635 }
1636
1637 case nir_cf_node_loop: {
1638 nir_loop *loop = nir_cf_node_as_loop(node);
1639 return nir_loop_first_block(loop);
1640 }
1641
1642 case nir_cf_node_block: {
1643 return nir_cf_node_as_block(node);
1644 }
1645
1646 default:
1647 unreachable("unknown node type");
1648 }
1649 }
1650
1651 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1652 {
1653 switch (node->type) {
1654 case nir_cf_node_function: {
1655 nir_function_impl *impl = nir_cf_node_as_function(node);
1656 return nir_impl_last_block(impl);
1657 }
1658
1659 case nir_cf_node_if: {
1660 nir_if *if_stmt = nir_cf_node_as_if(node);
1661 return nir_if_last_else_block(if_stmt);
1662 }
1663
1664 case nir_cf_node_loop: {
1665 nir_loop *loop = nir_cf_node_as_loop(node);
1666 return nir_loop_last_block(loop);
1667 }
1668
1669 case nir_cf_node_block: {
1670 return nir_cf_node_as_block(node);
1671 }
1672
1673 default:
1674 unreachable("unknown node type");
1675 }
1676 }
1677
1678 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1679 {
1680 if (node->type == nir_cf_node_block)
1681 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1682 else if (node->type == nir_cf_node_function)
1683 return NULL;
1684 else
1685 return nir_cf_node_as_block(nir_cf_node_next(node));
1686 }
1687
1688 nir_if *
1689 nir_block_get_following_if(nir_block *block)
1690 {
1691 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1692 return NULL;
1693
1694 if (nir_cf_node_is_last(&block->cf_node))
1695 return NULL;
1696
1697 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1698
1699 if (next_node->type != nir_cf_node_if)
1700 return NULL;
1701
1702 return nir_cf_node_as_if(next_node);
1703 }
1704
1705 nir_loop *
1706 nir_block_get_following_loop(nir_block *block)
1707 {
1708 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1709 return NULL;
1710
1711 if (nir_cf_node_is_last(&block->cf_node))
1712 return NULL;
1713
1714 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1715
1716 if (next_node->type != nir_cf_node_loop)
1717 return NULL;
1718
1719 return nir_cf_node_as_loop(next_node);
1720 }
1721
1722 void
1723 nir_index_blocks(nir_function_impl *impl)
1724 {
1725 unsigned index = 0;
1726
1727 if (impl->valid_metadata & nir_metadata_block_index)
1728 return;
1729
1730 nir_foreach_block(block, impl) {
1731 block->index = index++;
1732 }
1733
1734 /* The end_block isn't really part of the program, which is why its index
1735 * is >= num_blocks.
1736 */
1737 impl->num_blocks = impl->end_block->index = index;
1738 }
1739
1740 static bool
1741 index_ssa_def_cb(nir_ssa_def *def, void *state)
1742 {
1743 unsigned *index = (unsigned *) state;
1744 def->index = (*index)++;
1745
1746 return true;
1747 }
1748
1749 /**
1750 * The indices are applied top-to-bottom which has the very nice property
1751 * that, if A dominates B, then A->index <= B->index.
1752 */
1753 void
1754 nir_index_ssa_defs(nir_function_impl *impl)
1755 {
1756 unsigned index = 0;
1757
1758 nir_foreach_block(block, impl) {
1759 nir_foreach_instr(instr, block)
1760 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1761 }
1762
1763 impl->ssa_alloc = index;
1764 }
1765
1766 /**
1767 * The indices are applied top-to-bottom which has the very nice property
1768 * that, if A dominates B, then A->index <= B->index.
1769 */
1770 unsigned
1771 nir_index_instrs(nir_function_impl *impl)
1772 {
1773 unsigned index = 0;
1774
1775 nir_foreach_block(block, impl) {
1776 nir_foreach_instr(instr, block)
1777 instr->index = index++;
1778 }
1779
1780 return index;
1781 }
1782
1783 nir_intrinsic_op
1784 nir_intrinsic_from_system_value(gl_system_value val)
1785 {
1786 switch (val) {
1787 case SYSTEM_VALUE_VERTEX_ID:
1788 return nir_intrinsic_load_vertex_id;
1789 case SYSTEM_VALUE_INSTANCE_ID:
1790 return nir_intrinsic_load_instance_id;
1791 case SYSTEM_VALUE_DRAW_ID:
1792 return nir_intrinsic_load_draw_id;
1793 case SYSTEM_VALUE_BASE_INSTANCE:
1794 return nir_intrinsic_load_base_instance;
1795 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1796 return nir_intrinsic_load_vertex_id_zero_base;
1797 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1798 return nir_intrinsic_load_is_indexed_draw;
1799 case SYSTEM_VALUE_FIRST_VERTEX:
1800 return nir_intrinsic_load_first_vertex;
1801 case SYSTEM_VALUE_BASE_VERTEX:
1802 return nir_intrinsic_load_base_vertex;
1803 case SYSTEM_VALUE_INVOCATION_ID:
1804 return nir_intrinsic_load_invocation_id;
1805 case SYSTEM_VALUE_FRAG_COORD:
1806 return nir_intrinsic_load_frag_coord;
1807 case SYSTEM_VALUE_FRONT_FACE:
1808 return nir_intrinsic_load_front_face;
1809 case SYSTEM_VALUE_SAMPLE_ID:
1810 return nir_intrinsic_load_sample_id;
1811 case SYSTEM_VALUE_SAMPLE_POS:
1812 return nir_intrinsic_load_sample_pos;
1813 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1814 return nir_intrinsic_load_sample_mask_in;
1815 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1816 return nir_intrinsic_load_local_invocation_id;
1817 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1818 return nir_intrinsic_load_local_invocation_index;
1819 case SYSTEM_VALUE_WORK_GROUP_ID:
1820 return nir_intrinsic_load_work_group_id;
1821 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1822 return nir_intrinsic_load_num_work_groups;
1823 case SYSTEM_VALUE_PRIMITIVE_ID:
1824 return nir_intrinsic_load_primitive_id;
1825 case SYSTEM_VALUE_TESS_COORD:
1826 return nir_intrinsic_load_tess_coord;
1827 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1828 return nir_intrinsic_load_tess_level_outer;
1829 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1830 return nir_intrinsic_load_tess_level_inner;
1831 case SYSTEM_VALUE_VERTICES_IN:
1832 return nir_intrinsic_load_patch_vertices_in;
1833 case SYSTEM_VALUE_HELPER_INVOCATION:
1834 return nir_intrinsic_load_helper_invocation;
1835 case SYSTEM_VALUE_VIEW_INDEX:
1836 return nir_intrinsic_load_view_index;
1837 case SYSTEM_VALUE_SUBGROUP_SIZE:
1838 return nir_intrinsic_load_subgroup_size;
1839 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1840 return nir_intrinsic_load_subgroup_invocation;
1841 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1842 return nir_intrinsic_load_subgroup_eq_mask;
1843 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1844 return nir_intrinsic_load_subgroup_ge_mask;
1845 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1846 return nir_intrinsic_load_subgroup_gt_mask;
1847 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1848 return nir_intrinsic_load_subgroup_le_mask;
1849 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1850 return nir_intrinsic_load_subgroup_lt_mask;
1851 case SYSTEM_VALUE_NUM_SUBGROUPS:
1852 return nir_intrinsic_load_num_subgroups;
1853 case SYSTEM_VALUE_SUBGROUP_ID:
1854 return nir_intrinsic_load_subgroup_id;
1855 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1856 return nir_intrinsic_load_local_group_size;
1857 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1858 return nir_intrinsic_load_global_invocation_id;
1859 case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX:
1860 return nir_intrinsic_load_global_invocation_index;
1861 case SYSTEM_VALUE_WORK_DIM:
1862 return nir_intrinsic_load_work_dim;
1863 default:
1864 unreachable("system value does not directly correspond to intrinsic");
1865 }
1866 }
1867
1868 gl_system_value
1869 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1870 {
1871 switch (intrin) {
1872 case nir_intrinsic_load_vertex_id:
1873 return SYSTEM_VALUE_VERTEX_ID;
1874 case nir_intrinsic_load_instance_id:
1875 return SYSTEM_VALUE_INSTANCE_ID;
1876 case nir_intrinsic_load_draw_id:
1877 return SYSTEM_VALUE_DRAW_ID;
1878 case nir_intrinsic_load_base_instance:
1879 return SYSTEM_VALUE_BASE_INSTANCE;
1880 case nir_intrinsic_load_vertex_id_zero_base:
1881 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1882 case nir_intrinsic_load_first_vertex:
1883 return SYSTEM_VALUE_FIRST_VERTEX;
1884 case nir_intrinsic_load_is_indexed_draw:
1885 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1886 case nir_intrinsic_load_base_vertex:
1887 return SYSTEM_VALUE_BASE_VERTEX;
1888 case nir_intrinsic_load_invocation_id:
1889 return SYSTEM_VALUE_INVOCATION_ID;
1890 case nir_intrinsic_load_frag_coord:
1891 return SYSTEM_VALUE_FRAG_COORD;
1892 case nir_intrinsic_load_front_face:
1893 return SYSTEM_VALUE_FRONT_FACE;
1894 case nir_intrinsic_load_sample_id:
1895 return SYSTEM_VALUE_SAMPLE_ID;
1896 case nir_intrinsic_load_sample_pos:
1897 return SYSTEM_VALUE_SAMPLE_POS;
1898 case nir_intrinsic_load_sample_mask_in:
1899 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1900 case nir_intrinsic_load_local_invocation_id:
1901 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1902 case nir_intrinsic_load_local_invocation_index:
1903 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1904 case nir_intrinsic_load_num_work_groups:
1905 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1906 case nir_intrinsic_load_work_group_id:
1907 return SYSTEM_VALUE_WORK_GROUP_ID;
1908 case nir_intrinsic_load_primitive_id:
1909 return SYSTEM_VALUE_PRIMITIVE_ID;
1910 case nir_intrinsic_load_tess_coord:
1911 return SYSTEM_VALUE_TESS_COORD;
1912 case nir_intrinsic_load_tess_level_outer:
1913 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1914 case nir_intrinsic_load_tess_level_inner:
1915 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1916 case nir_intrinsic_load_patch_vertices_in:
1917 return SYSTEM_VALUE_VERTICES_IN;
1918 case nir_intrinsic_load_helper_invocation:
1919 return SYSTEM_VALUE_HELPER_INVOCATION;
1920 case nir_intrinsic_load_view_index:
1921 return SYSTEM_VALUE_VIEW_INDEX;
1922 case nir_intrinsic_load_subgroup_size:
1923 return SYSTEM_VALUE_SUBGROUP_SIZE;
1924 case nir_intrinsic_load_subgroup_invocation:
1925 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1926 case nir_intrinsic_load_subgroup_eq_mask:
1927 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1928 case nir_intrinsic_load_subgroup_ge_mask:
1929 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1930 case nir_intrinsic_load_subgroup_gt_mask:
1931 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1932 case nir_intrinsic_load_subgroup_le_mask:
1933 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1934 case nir_intrinsic_load_subgroup_lt_mask:
1935 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1936 case nir_intrinsic_load_num_subgroups:
1937 return SYSTEM_VALUE_NUM_SUBGROUPS;
1938 case nir_intrinsic_load_subgroup_id:
1939 return SYSTEM_VALUE_SUBGROUP_ID;
1940 case nir_intrinsic_load_local_group_size:
1941 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1942 case nir_intrinsic_load_global_invocation_id:
1943 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1944 default:
1945 unreachable("intrinsic doesn't produce a system value");
1946 }
1947 }
1948
1949 /* OpenGL utility method that remaps the location attributes if they are
1950 * doubles. Not needed for vulkan due the differences on the input location
1951 * count for doubles on vulkan vs OpenGL
1952 *
1953 * The bitfield returned in dual_slot is one bit for each double input slot in
1954 * the original OpenGL single-slot input numbering. The mapping from old
1955 * locations to new locations is as follows:
1956 *
1957 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1958 */
1959 void
1960 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1961 {
1962 assert(shader->info.stage == MESA_SHADER_VERTEX);
1963
1964 *dual_slot = 0;
1965 nir_foreach_variable(var, &shader->inputs) {
1966 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1967 unsigned slots = glsl_count_attribute_slots(var->type, true);
1968 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1969 }
1970 }
1971
1972 nir_foreach_variable(var, &shader->inputs) {
1973 var->data.location +=
1974 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1975 }
1976 }
1977
1978 /* Returns an attribute mask that has been re-compacted using the given
1979 * dual_slot mask.
1980 */
1981 uint64_t
1982 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1983 {
1984 while (dual_slot) {
1985 unsigned loc = u_bit_scan64(&dual_slot);
1986 /* mask of all bits up to and including loc */
1987 uint64_t mask = BITFIELD64_MASK(loc + 1);
1988 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
1989 }
1990 return attribs;
1991 }