nir: Add a helper for fetching the SSA def from an instruction
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34 #include "util/u_math.h"
35
36 #include "main/menums.h" /* BITFIELD64_MASK */
37
38 nir_shader *
39 nir_shader_create(void *mem_ctx,
40 gl_shader_stage stage,
41 const nir_shader_compiler_options *options,
42 shader_info *si)
43 {
44 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
45
46 exec_list_make_empty(&shader->uniforms);
47 exec_list_make_empty(&shader->inputs);
48 exec_list_make_empty(&shader->outputs);
49 exec_list_make_empty(&shader->shared);
50
51 shader->options = options;
52
53 if (si) {
54 assert(si->stage == stage);
55 shader->info = *si;
56 } else {
57 shader->info.stage = stage;
58 }
59
60 exec_list_make_empty(&shader->functions);
61 exec_list_make_empty(&shader->globals);
62 exec_list_make_empty(&shader->system_values);
63
64 shader->num_inputs = 0;
65 shader->num_outputs = 0;
66 shader->num_uniforms = 0;
67 shader->num_shared = 0;
68
69 return shader;
70 }
71
72 static nir_register *
73 reg_create(void *mem_ctx, struct exec_list *list)
74 {
75 nir_register *reg = ralloc(mem_ctx, nir_register);
76
77 list_inithead(&reg->uses);
78 list_inithead(&reg->defs);
79 list_inithead(&reg->if_uses);
80
81 reg->num_components = 0;
82 reg->bit_size = 32;
83 reg->num_array_elems = 0;
84 reg->name = NULL;
85
86 exec_list_push_tail(list, &reg->node);
87
88 return reg;
89 }
90
91 nir_register *
92 nir_local_reg_create(nir_function_impl *impl)
93 {
94 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
95 reg->index = impl->reg_alloc++;
96
97 return reg;
98 }
99
100 void
101 nir_reg_remove(nir_register *reg)
102 {
103 exec_node_remove(&reg->node);
104 }
105
106 void
107 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
108 {
109 switch (var->data.mode) {
110 case nir_var_all:
111 assert(!"invalid mode");
112 break;
113
114 case nir_var_function_temp:
115 assert(!"nir_shader_add_variable cannot be used for local variables");
116 break;
117
118 case nir_var_shader_temp:
119 exec_list_push_tail(&shader->globals, &var->node);
120 break;
121
122 case nir_var_shader_in:
123 exec_list_push_tail(&shader->inputs, &var->node);
124 break;
125
126 case nir_var_shader_out:
127 exec_list_push_tail(&shader->outputs, &var->node);
128 break;
129
130 case nir_var_uniform:
131 case nir_var_mem_ubo:
132 case nir_var_mem_ssbo:
133 exec_list_push_tail(&shader->uniforms, &var->node);
134 break;
135
136 case nir_var_mem_shared:
137 assert(gl_shader_stage_is_compute(shader->info.stage));
138 exec_list_push_tail(&shader->shared, &var->node);
139 break;
140
141 case nir_var_mem_global:
142 assert(!"nir_shader_add_variable cannot be used for global memory");
143 break;
144
145 case nir_var_system_value:
146 exec_list_push_tail(&shader->system_values, &var->node);
147 break;
148 }
149 }
150
151 nir_variable *
152 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
153 const struct glsl_type *type, const char *name)
154 {
155 nir_variable *var = rzalloc(shader, nir_variable);
156 var->name = ralloc_strdup(var, name);
157 var->type = type;
158 var->data.mode = mode;
159 var->data.how_declared = nir_var_declared_normally;
160
161 if ((mode == nir_var_shader_in &&
162 shader->info.stage != MESA_SHADER_VERTEX) ||
163 (mode == nir_var_shader_out &&
164 shader->info.stage != MESA_SHADER_FRAGMENT))
165 var->data.interpolation = INTERP_MODE_SMOOTH;
166
167 if (mode == nir_var_shader_in || mode == nir_var_uniform)
168 var->data.read_only = true;
169
170 nir_shader_add_variable(shader, var);
171
172 return var;
173 }
174
175 nir_variable *
176 nir_local_variable_create(nir_function_impl *impl,
177 const struct glsl_type *type, const char *name)
178 {
179 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
180 var->name = ralloc_strdup(var, name);
181 var->type = type;
182 var->data.mode = nir_var_function_temp;
183
184 nir_function_impl_add_variable(impl, var);
185
186 return var;
187 }
188
189 nir_function *
190 nir_function_create(nir_shader *shader, const char *name)
191 {
192 nir_function *func = ralloc(shader, nir_function);
193
194 exec_list_push_tail(&shader->functions, &func->node);
195
196 func->name = ralloc_strdup(func, name);
197 func->shader = shader;
198 func->num_params = 0;
199 func->params = NULL;
200 func->impl = NULL;
201 func->is_entrypoint = false;
202
203 return func;
204 }
205
206 /* NOTE: if the instruction you are copying a src to is already added
207 * to the IR, use nir_instr_rewrite_src() instead.
208 */
209 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
210 {
211 dest->is_ssa = src->is_ssa;
212 if (src->is_ssa) {
213 dest->ssa = src->ssa;
214 } else {
215 dest->reg.base_offset = src->reg.base_offset;
216 dest->reg.reg = src->reg.reg;
217 if (src->reg.indirect) {
218 dest->reg.indirect = ralloc(mem_ctx, nir_src);
219 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
220 } else {
221 dest->reg.indirect = NULL;
222 }
223 }
224 }
225
226 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
227 {
228 /* Copying an SSA definition makes no sense whatsoever. */
229 assert(!src->is_ssa);
230
231 dest->is_ssa = false;
232
233 dest->reg.base_offset = src->reg.base_offset;
234 dest->reg.reg = src->reg.reg;
235 if (src->reg.indirect) {
236 dest->reg.indirect = ralloc(instr, nir_src);
237 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
238 } else {
239 dest->reg.indirect = NULL;
240 }
241 }
242
243 void
244 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
245 nir_alu_instr *instr)
246 {
247 nir_src_copy(&dest->src, &src->src, &instr->instr);
248 dest->abs = src->abs;
249 dest->negate = src->negate;
250 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
251 dest->swizzle[i] = src->swizzle[i];
252 }
253
254 void
255 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
256 nir_alu_instr *instr)
257 {
258 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
259 dest->write_mask = src->write_mask;
260 dest->saturate = src->saturate;
261 }
262
263
264 static void
265 cf_init(nir_cf_node *node, nir_cf_node_type type)
266 {
267 exec_node_init(&node->node);
268 node->parent = NULL;
269 node->type = type;
270 }
271
272 nir_function_impl *
273 nir_function_impl_create_bare(nir_shader *shader)
274 {
275 nir_function_impl *impl = ralloc(shader, nir_function_impl);
276
277 impl->function = NULL;
278
279 cf_init(&impl->cf_node, nir_cf_node_function);
280
281 exec_list_make_empty(&impl->body);
282 exec_list_make_empty(&impl->registers);
283 exec_list_make_empty(&impl->locals);
284 impl->reg_alloc = 0;
285 impl->ssa_alloc = 0;
286 impl->valid_metadata = nir_metadata_none;
287
288 /* create start & end blocks */
289 nir_block *start_block = nir_block_create(shader);
290 nir_block *end_block = nir_block_create(shader);
291 start_block->cf_node.parent = &impl->cf_node;
292 end_block->cf_node.parent = &impl->cf_node;
293 impl->end_block = end_block;
294
295 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
296
297 start_block->successors[0] = end_block;
298 _mesa_set_add(end_block->predecessors, start_block);
299 return impl;
300 }
301
302 nir_function_impl *
303 nir_function_impl_create(nir_function *function)
304 {
305 assert(function->impl == NULL);
306
307 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
308
309 function->impl = impl;
310 impl->function = function;
311
312 return impl;
313 }
314
315 nir_block *
316 nir_block_create(nir_shader *shader)
317 {
318 nir_block *block = rzalloc(shader, nir_block);
319
320 cf_init(&block->cf_node, nir_cf_node_block);
321
322 block->successors[0] = block->successors[1] = NULL;
323 block->predecessors = _mesa_pointer_set_create(block);
324 block->imm_dom = NULL;
325 /* XXX maybe it would be worth it to defer allocation? This
326 * way it doesn't get allocated for shader refs that never run
327 * nir_calc_dominance? For example, state-tracker creates an
328 * initial IR, clones that, runs appropriate lowering pass, passes
329 * to driver which does common lowering/opt, and then stores ref
330 * which is later used to do state specific lowering and futher
331 * opt. Do any of the references not need dominance metadata?
332 */
333 block->dom_frontier = _mesa_pointer_set_create(block);
334
335 exec_list_make_empty(&block->instr_list);
336
337 return block;
338 }
339
340 static inline void
341 src_init(nir_src *src)
342 {
343 src->is_ssa = false;
344 src->reg.reg = NULL;
345 src->reg.indirect = NULL;
346 src->reg.base_offset = 0;
347 }
348
349 nir_if *
350 nir_if_create(nir_shader *shader)
351 {
352 nir_if *if_stmt = ralloc(shader, nir_if);
353
354 if_stmt->control = nir_selection_control_none;
355
356 cf_init(&if_stmt->cf_node, nir_cf_node_if);
357 src_init(&if_stmt->condition);
358
359 nir_block *then = nir_block_create(shader);
360 exec_list_make_empty(&if_stmt->then_list);
361 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
362 then->cf_node.parent = &if_stmt->cf_node;
363
364 nir_block *else_stmt = nir_block_create(shader);
365 exec_list_make_empty(&if_stmt->else_list);
366 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
367 else_stmt->cf_node.parent = &if_stmt->cf_node;
368
369 return if_stmt;
370 }
371
372 nir_loop *
373 nir_loop_create(nir_shader *shader)
374 {
375 nir_loop *loop = rzalloc(shader, nir_loop);
376
377 cf_init(&loop->cf_node, nir_cf_node_loop);
378
379 nir_block *body = nir_block_create(shader);
380 exec_list_make_empty(&loop->body);
381 exec_list_push_tail(&loop->body, &body->cf_node.node);
382 body->cf_node.parent = &loop->cf_node;
383
384 body->successors[0] = body;
385 _mesa_set_add(body->predecessors, body);
386
387 return loop;
388 }
389
390 static void
391 instr_init(nir_instr *instr, nir_instr_type type)
392 {
393 instr->type = type;
394 instr->block = NULL;
395 exec_node_init(&instr->node);
396 }
397
398 static void
399 dest_init(nir_dest *dest)
400 {
401 dest->is_ssa = false;
402 dest->reg.reg = NULL;
403 dest->reg.indirect = NULL;
404 dest->reg.base_offset = 0;
405 }
406
407 static void
408 alu_dest_init(nir_alu_dest *dest)
409 {
410 dest_init(&dest->dest);
411 dest->saturate = false;
412 dest->write_mask = 0xf;
413 }
414
415 static void
416 alu_src_init(nir_alu_src *src)
417 {
418 src_init(&src->src);
419 src->abs = src->negate = false;
420 for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
421 src->swizzle[i] = i;
422 }
423
424 nir_alu_instr *
425 nir_alu_instr_create(nir_shader *shader, nir_op op)
426 {
427 unsigned num_srcs = nir_op_infos[op].num_inputs;
428 /* TODO: don't use rzalloc */
429 nir_alu_instr *instr =
430 rzalloc_size(shader,
431 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
432
433 instr_init(&instr->instr, nir_instr_type_alu);
434 instr->op = op;
435 alu_dest_init(&instr->dest);
436 for (unsigned i = 0; i < num_srcs; i++)
437 alu_src_init(&instr->src[i]);
438
439 return instr;
440 }
441
442 nir_deref_instr *
443 nir_deref_instr_create(nir_shader *shader, nir_deref_type deref_type)
444 {
445 nir_deref_instr *instr =
446 rzalloc_size(shader, sizeof(nir_deref_instr));
447
448 instr_init(&instr->instr, nir_instr_type_deref);
449
450 instr->deref_type = deref_type;
451 if (deref_type != nir_deref_type_var)
452 src_init(&instr->parent);
453
454 if (deref_type == nir_deref_type_array ||
455 deref_type == nir_deref_type_ptr_as_array)
456 src_init(&instr->arr.index);
457
458 dest_init(&instr->dest);
459
460 return instr;
461 }
462
463 nir_jump_instr *
464 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
465 {
466 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
467 instr_init(&instr->instr, nir_instr_type_jump);
468 instr->type = type;
469 return instr;
470 }
471
472 nir_load_const_instr *
473 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
474 unsigned bit_size)
475 {
476 nir_load_const_instr *instr =
477 rzalloc_size(shader, sizeof(*instr) + num_components * sizeof(*instr->value));
478 instr_init(&instr->instr, nir_instr_type_load_const);
479
480 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
481
482 return instr;
483 }
484
485 nir_intrinsic_instr *
486 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
487 {
488 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
489 /* TODO: don't use rzalloc */
490 nir_intrinsic_instr *instr =
491 rzalloc_size(shader,
492 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
493
494 instr_init(&instr->instr, nir_instr_type_intrinsic);
495 instr->intrinsic = op;
496
497 if (nir_intrinsic_infos[op].has_dest)
498 dest_init(&instr->dest);
499
500 for (unsigned i = 0; i < num_srcs; i++)
501 src_init(&instr->src[i]);
502
503 return instr;
504 }
505
506 nir_call_instr *
507 nir_call_instr_create(nir_shader *shader, nir_function *callee)
508 {
509 const unsigned num_params = callee->num_params;
510 nir_call_instr *instr =
511 rzalloc_size(shader, sizeof(*instr) +
512 num_params * sizeof(instr->params[0]));
513
514 instr_init(&instr->instr, nir_instr_type_call);
515 instr->callee = callee;
516 instr->num_params = num_params;
517 for (unsigned i = 0; i < num_params; i++)
518 src_init(&instr->params[i]);
519
520 return instr;
521 }
522
523 static int8_t default_tg4_offsets[4][2] =
524 {
525 { 0, 1 },
526 { 1, 1 },
527 { 1, 0 },
528 { 0, 0 },
529 };
530
531 nir_tex_instr *
532 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
533 {
534 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
535 instr_init(&instr->instr, nir_instr_type_tex);
536
537 dest_init(&instr->dest);
538
539 instr->num_srcs = num_srcs;
540 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
541 for (unsigned i = 0; i < num_srcs; i++)
542 src_init(&instr->src[i].src);
543
544 instr->texture_index = 0;
545 instr->texture_array_size = 0;
546 instr->sampler_index = 0;
547 memcpy(instr->tg4_offsets, default_tg4_offsets, sizeof(instr->tg4_offsets));
548
549 return instr;
550 }
551
552 void
553 nir_tex_instr_add_src(nir_tex_instr *tex,
554 nir_tex_src_type src_type,
555 nir_src src)
556 {
557 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
558 tex->num_srcs + 1);
559
560 for (unsigned i = 0; i < tex->num_srcs; i++) {
561 new_srcs[i].src_type = tex->src[i].src_type;
562 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
563 &tex->src[i].src);
564 }
565
566 ralloc_free(tex->src);
567 tex->src = new_srcs;
568
569 tex->src[tex->num_srcs].src_type = src_type;
570 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
571 tex->num_srcs++;
572 }
573
574 void
575 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
576 {
577 assert(src_idx < tex->num_srcs);
578
579 /* First rewrite the source to NIR_SRC_INIT */
580 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
581
582 /* Now, move all of the other sources down */
583 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
584 tex->src[i-1].src_type = tex->src[i].src_type;
585 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
586 }
587 tex->num_srcs--;
588 }
589
590 bool
591 nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex)
592 {
593 if (tex->op != nir_texop_tg4)
594 return false;
595 return memcmp(tex->tg4_offsets, default_tg4_offsets,
596 sizeof(tex->tg4_offsets)) != 0;
597 }
598
599 nir_phi_instr *
600 nir_phi_instr_create(nir_shader *shader)
601 {
602 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
603 instr_init(&instr->instr, nir_instr_type_phi);
604
605 dest_init(&instr->dest);
606 exec_list_make_empty(&instr->srcs);
607 return instr;
608 }
609
610 nir_parallel_copy_instr *
611 nir_parallel_copy_instr_create(nir_shader *shader)
612 {
613 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
614 instr_init(&instr->instr, nir_instr_type_parallel_copy);
615
616 exec_list_make_empty(&instr->entries);
617
618 return instr;
619 }
620
621 nir_ssa_undef_instr *
622 nir_ssa_undef_instr_create(nir_shader *shader,
623 unsigned num_components,
624 unsigned bit_size)
625 {
626 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
627 instr_init(&instr->instr, nir_instr_type_ssa_undef);
628
629 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
630
631 return instr;
632 }
633
634 static nir_const_value
635 const_value_float(double d, unsigned bit_size)
636 {
637 nir_const_value v;
638 memset(&v, 0, sizeof(v));
639 switch (bit_size) {
640 case 16: v.u16 = _mesa_float_to_half(d); break;
641 case 32: v.f32 = d; break;
642 case 64: v.f64 = d; break;
643 default:
644 unreachable("Invalid bit size");
645 }
646 return v;
647 }
648
649 static nir_const_value
650 const_value_int(int64_t i, unsigned bit_size)
651 {
652 nir_const_value v;
653 memset(&v, 0, sizeof(v));
654 switch (bit_size) {
655 case 1: v.b = i & 1; break;
656 case 8: v.i8 = i; break;
657 case 16: v.i16 = i; break;
658 case 32: v.i32 = i; break;
659 case 64: v.i64 = i; break;
660 default:
661 unreachable("Invalid bit size");
662 }
663 return v;
664 }
665
666 nir_const_value
667 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
668 {
669 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
670 const int64_t min_int = -max_int - 1;
671 switch (binop) {
672 case nir_op_iadd:
673 return const_value_int(0, bit_size);
674 case nir_op_fadd:
675 return const_value_float(0, bit_size);
676 case nir_op_imul:
677 return const_value_int(1, bit_size);
678 case nir_op_fmul:
679 return const_value_float(1, bit_size);
680 case nir_op_imin:
681 return const_value_int(max_int, bit_size);
682 case nir_op_umin:
683 return const_value_int(~0ull, bit_size);
684 case nir_op_fmin:
685 return const_value_float(INFINITY, bit_size);
686 case nir_op_imax:
687 return const_value_int(min_int, bit_size);
688 case nir_op_umax:
689 return const_value_int(0, bit_size);
690 case nir_op_fmax:
691 return const_value_float(-INFINITY, bit_size);
692 case nir_op_iand:
693 return const_value_int(~0ull, bit_size);
694 case nir_op_ior:
695 return const_value_int(0, bit_size);
696 case nir_op_ixor:
697 return const_value_int(0, bit_size);
698 default:
699 unreachable("Invalid reduction operation");
700 }
701 }
702
703 nir_function_impl *
704 nir_cf_node_get_function(nir_cf_node *node)
705 {
706 while (node->type != nir_cf_node_function) {
707 node = node->parent;
708 }
709
710 return nir_cf_node_as_function(node);
711 }
712
713 /* Reduces a cursor by trying to convert everything to after and trying to
714 * go up to block granularity when possible.
715 */
716 static nir_cursor
717 reduce_cursor(nir_cursor cursor)
718 {
719 switch (cursor.option) {
720 case nir_cursor_before_block:
721 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
722 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
723 if (exec_list_is_empty(&cursor.block->instr_list)) {
724 /* Empty block. After is as good as before. */
725 cursor.option = nir_cursor_after_block;
726 }
727 return cursor;
728
729 case nir_cursor_after_block:
730 return cursor;
731
732 case nir_cursor_before_instr: {
733 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
734 if (prev_instr) {
735 /* Before this instruction is after the previous */
736 cursor.instr = prev_instr;
737 cursor.option = nir_cursor_after_instr;
738 } else {
739 /* No previous instruction. Switch to before block */
740 cursor.block = cursor.instr->block;
741 cursor.option = nir_cursor_before_block;
742 }
743 return reduce_cursor(cursor);
744 }
745
746 case nir_cursor_after_instr:
747 if (nir_instr_next(cursor.instr) == NULL) {
748 /* This is the last instruction, switch to after block */
749 cursor.option = nir_cursor_after_block;
750 cursor.block = cursor.instr->block;
751 }
752 return cursor;
753
754 default:
755 unreachable("Inavlid cursor option");
756 }
757 }
758
759 bool
760 nir_cursors_equal(nir_cursor a, nir_cursor b)
761 {
762 /* Reduced cursors should be unique */
763 a = reduce_cursor(a);
764 b = reduce_cursor(b);
765
766 return a.block == b.block && a.option == b.option;
767 }
768
769 static bool
770 add_use_cb(nir_src *src, void *state)
771 {
772 nir_instr *instr = state;
773
774 src->parent_instr = instr;
775 list_addtail(&src->use_link,
776 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
777
778 return true;
779 }
780
781 static bool
782 add_ssa_def_cb(nir_ssa_def *def, void *state)
783 {
784 nir_instr *instr = state;
785
786 if (instr->block && def->index == UINT_MAX) {
787 nir_function_impl *impl =
788 nir_cf_node_get_function(&instr->block->cf_node);
789
790 def->index = impl->ssa_alloc++;
791 }
792
793 return true;
794 }
795
796 static bool
797 add_reg_def_cb(nir_dest *dest, void *state)
798 {
799 nir_instr *instr = state;
800
801 if (!dest->is_ssa) {
802 dest->reg.parent_instr = instr;
803 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
804 }
805
806 return true;
807 }
808
809 static void
810 add_defs_uses(nir_instr *instr)
811 {
812 nir_foreach_src(instr, add_use_cb, instr);
813 nir_foreach_dest(instr, add_reg_def_cb, instr);
814 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
815 }
816
817 void
818 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
819 {
820 switch (cursor.option) {
821 case nir_cursor_before_block:
822 /* Only allow inserting jumps into empty blocks. */
823 if (instr->type == nir_instr_type_jump)
824 assert(exec_list_is_empty(&cursor.block->instr_list));
825
826 instr->block = cursor.block;
827 add_defs_uses(instr);
828 exec_list_push_head(&cursor.block->instr_list, &instr->node);
829 break;
830 case nir_cursor_after_block: {
831 /* Inserting instructions after a jump is illegal. */
832 nir_instr *last = nir_block_last_instr(cursor.block);
833 assert(last == NULL || last->type != nir_instr_type_jump);
834 (void) last;
835
836 instr->block = cursor.block;
837 add_defs_uses(instr);
838 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
839 break;
840 }
841 case nir_cursor_before_instr:
842 assert(instr->type != nir_instr_type_jump);
843 instr->block = cursor.instr->block;
844 add_defs_uses(instr);
845 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
846 break;
847 case nir_cursor_after_instr:
848 /* Inserting instructions after a jump is illegal. */
849 assert(cursor.instr->type != nir_instr_type_jump);
850
851 /* Only allow inserting jumps at the end of the block. */
852 if (instr->type == nir_instr_type_jump)
853 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
854
855 instr->block = cursor.instr->block;
856 add_defs_uses(instr);
857 exec_node_insert_after(&cursor.instr->node, &instr->node);
858 break;
859 }
860
861 if (instr->type == nir_instr_type_jump)
862 nir_handle_add_jump(instr->block);
863 }
864
865 static bool
866 src_is_valid(const nir_src *src)
867 {
868 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
869 }
870
871 static bool
872 remove_use_cb(nir_src *src, void *state)
873 {
874 (void) state;
875
876 if (src_is_valid(src))
877 list_del(&src->use_link);
878
879 return true;
880 }
881
882 static bool
883 remove_def_cb(nir_dest *dest, void *state)
884 {
885 (void) state;
886
887 if (!dest->is_ssa)
888 list_del(&dest->reg.def_link);
889
890 return true;
891 }
892
893 static void
894 remove_defs_uses(nir_instr *instr)
895 {
896 nir_foreach_dest(instr, remove_def_cb, instr);
897 nir_foreach_src(instr, remove_use_cb, instr);
898 }
899
900 void nir_instr_remove_v(nir_instr *instr)
901 {
902 remove_defs_uses(instr);
903 exec_node_remove(&instr->node);
904
905 if (instr->type == nir_instr_type_jump) {
906 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
907 nir_handle_remove_jump(instr->block, jump_instr->type);
908 }
909 }
910
911 /*@}*/
912
913 void
914 nir_index_local_regs(nir_function_impl *impl)
915 {
916 unsigned index = 0;
917 foreach_list_typed(nir_register, reg, node, &impl->registers) {
918 reg->index = index++;
919 }
920 impl->reg_alloc = index;
921 }
922
923 static bool
924 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
925 {
926 return cb(&instr->dest.dest, state);
927 }
928
929 static bool
930 visit_deref_dest(nir_deref_instr *instr, nir_foreach_dest_cb cb, void *state)
931 {
932 return cb(&instr->dest, state);
933 }
934
935 static bool
936 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
937 void *state)
938 {
939 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
940 return cb(&instr->dest, state);
941
942 return true;
943 }
944
945 static bool
946 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
947 void *state)
948 {
949 return cb(&instr->dest, state);
950 }
951
952 static bool
953 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
954 {
955 return cb(&instr->dest, state);
956 }
957
958 static bool
959 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
960 nir_foreach_dest_cb cb, void *state)
961 {
962 nir_foreach_parallel_copy_entry(entry, instr) {
963 if (!cb(&entry->dest, state))
964 return false;
965 }
966
967 return true;
968 }
969
970 bool
971 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
972 {
973 switch (instr->type) {
974 case nir_instr_type_alu:
975 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
976 case nir_instr_type_deref:
977 return visit_deref_dest(nir_instr_as_deref(instr), cb, state);
978 case nir_instr_type_intrinsic:
979 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
980 case nir_instr_type_tex:
981 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
982 case nir_instr_type_phi:
983 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
984 case nir_instr_type_parallel_copy:
985 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
986 cb, state);
987
988 case nir_instr_type_load_const:
989 case nir_instr_type_ssa_undef:
990 case nir_instr_type_call:
991 case nir_instr_type_jump:
992 break;
993
994 default:
995 unreachable("Invalid instruction type");
996 break;
997 }
998
999 return true;
1000 }
1001
1002 struct foreach_ssa_def_state {
1003 nir_foreach_ssa_def_cb cb;
1004 void *client_state;
1005 };
1006
1007 static inline bool
1008 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1009 {
1010 struct foreach_ssa_def_state *state = void_state;
1011
1012 if (dest->is_ssa)
1013 return state->cb(&dest->ssa, state->client_state);
1014 else
1015 return true;
1016 }
1017
1018 bool
1019 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1020 {
1021 switch (instr->type) {
1022 case nir_instr_type_alu:
1023 case nir_instr_type_deref:
1024 case nir_instr_type_tex:
1025 case nir_instr_type_intrinsic:
1026 case nir_instr_type_phi:
1027 case nir_instr_type_parallel_copy: {
1028 struct foreach_ssa_def_state foreach_state = {cb, state};
1029 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1030 }
1031
1032 case nir_instr_type_load_const:
1033 return cb(&nir_instr_as_load_const(instr)->def, state);
1034 case nir_instr_type_ssa_undef:
1035 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1036 case nir_instr_type_call:
1037 case nir_instr_type_jump:
1038 return true;
1039 default:
1040 unreachable("Invalid instruction type");
1041 }
1042 }
1043
1044 nir_ssa_def *
1045 nir_instr_ssa_def(nir_instr *instr)
1046 {
1047 switch (instr->type) {
1048 case nir_instr_type_alu:
1049 assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
1050 return &nir_instr_as_alu(instr)->dest.dest.ssa;
1051
1052 case nir_instr_type_deref:
1053 assert(nir_instr_as_deref(instr)->dest.is_ssa);
1054 return &nir_instr_as_deref(instr)->dest.ssa;
1055
1056 case nir_instr_type_tex:
1057 assert(nir_instr_as_tex(instr)->dest.is_ssa);
1058 return &nir_instr_as_tex(instr)->dest.ssa;
1059
1060 case nir_instr_type_intrinsic: {
1061 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1062 if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
1063 assert(intrin->dest.is_ssa);
1064 return &intrin->dest.ssa;
1065 } else {
1066 return NULL;
1067 }
1068 }
1069
1070 case nir_instr_type_phi:
1071 assert(nir_instr_as_phi(instr)->dest.is_ssa);
1072 return &nir_instr_as_phi(instr)->dest.ssa;
1073
1074 case nir_instr_type_parallel_copy:
1075 unreachable("Parallel copies are unsupported by this function");
1076
1077 case nir_instr_type_load_const:
1078 return &nir_instr_as_load_const(instr)->def;
1079
1080 case nir_instr_type_ssa_undef:
1081 return &nir_instr_as_ssa_undef(instr)->def;
1082
1083 case nir_instr_type_call:
1084 case nir_instr_type_jump:
1085 return NULL;
1086 }
1087
1088 unreachable("Invalid instruction type");
1089 }
1090
1091 static bool
1092 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1093 {
1094 if (!cb(src, state))
1095 return false;
1096 if (!src->is_ssa && src->reg.indirect)
1097 return cb(src->reg.indirect, state);
1098 return true;
1099 }
1100
1101 static bool
1102 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1103 {
1104 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1105 if (!visit_src(&instr->src[i].src, cb, state))
1106 return false;
1107
1108 return true;
1109 }
1110
1111 static bool
1112 visit_deref_instr_src(nir_deref_instr *instr,
1113 nir_foreach_src_cb cb, void *state)
1114 {
1115 if (instr->deref_type != nir_deref_type_var) {
1116 if (!visit_src(&instr->parent, cb, state))
1117 return false;
1118 }
1119
1120 if (instr->deref_type == nir_deref_type_array ||
1121 instr->deref_type == nir_deref_type_ptr_as_array) {
1122 if (!visit_src(&instr->arr.index, cb, state))
1123 return false;
1124 }
1125
1126 return true;
1127 }
1128
1129 static bool
1130 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1131 {
1132 for (unsigned i = 0; i < instr->num_srcs; i++) {
1133 if (!visit_src(&instr->src[i].src, cb, state))
1134 return false;
1135 }
1136
1137 return true;
1138 }
1139
1140 static bool
1141 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1142 void *state)
1143 {
1144 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1145 for (unsigned i = 0; i < num_srcs; i++) {
1146 if (!visit_src(&instr->src[i], cb, state))
1147 return false;
1148 }
1149
1150 return true;
1151 }
1152
1153 static bool
1154 visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
1155 {
1156 for (unsigned i = 0; i < instr->num_params; i++) {
1157 if (!visit_src(&instr->params[i], cb, state))
1158 return false;
1159 }
1160
1161 return true;
1162 }
1163
1164 static bool
1165 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1166 {
1167 nir_foreach_phi_src(src, instr) {
1168 if (!visit_src(&src->src, cb, state))
1169 return false;
1170 }
1171
1172 return true;
1173 }
1174
1175 static bool
1176 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1177 nir_foreach_src_cb cb, void *state)
1178 {
1179 nir_foreach_parallel_copy_entry(entry, instr) {
1180 if (!visit_src(&entry->src, cb, state))
1181 return false;
1182 }
1183
1184 return true;
1185 }
1186
1187 typedef struct {
1188 void *state;
1189 nir_foreach_src_cb cb;
1190 } visit_dest_indirect_state;
1191
1192 static bool
1193 visit_dest_indirect(nir_dest *dest, void *_state)
1194 {
1195 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1196
1197 if (!dest->is_ssa && dest->reg.indirect)
1198 return state->cb(dest->reg.indirect, state->state);
1199
1200 return true;
1201 }
1202
1203 bool
1204 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1205 {
1206 switch (instr->type) {
1207 case nir_instr_type_alu:
1208 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1209 return false;
1210 break;
1211 case nir_instr_type_deref:
1212 if (!visit_deref_instr_src(nir_instr_as_deref(instr), cb, state))
1213 return false;
1214 break;
1215 case nir_instr_type_intrinsic:
1216 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1217 return false;
1218 break;
1219 case nir_instr_type_tex:
1220 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1221 return false;
1222 break;
1223 case nir_instr_type_call:
1224 if (!visit_call_src(nir_instr_as_call(instr), cb, state))
1225 return false;
1226 break;
1227 case nir_instr_type_load_const:
1228 /* Constant load instructions have no regular sources */
1229 break;
1230 case nir_instr_type_phi:
1231 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1232 return false;
1233 break;
1234 case nir_instr_type_parallel_copy:
1235 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1236 cb, state))
1237 return false;
1238 break;
1239 case nir_instr_type_jump:
1240 case nir_instr_type_ssa_undef:
1241 return true;
1242
1243 default:
1244 unreachable("Invalid instruction type");
1245 break;
1246 }
1247
1248 visit_dest_indirect_state dest_state;
1249 dest_state.state = state;
1250 dest_state.cb = cb;
1251 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1252 }
1253
1254 nir_const_value
1255 nir_const_value_for_float(double f, unsigned bit_size)
1256 {
1257 nir_const_value v;
1258 memset(&v, 0, sizeof(v));
1259
1260 switch (bit_size) {
1261 case 16:
1262 v.u16 = _mesa_float_to_half(f);
1263 break;
1264 case 32:
1265 v.f32 = f;
1266 break;
1267 case 64:
1268 v.f64 = f;
1269 break;
1270 default:
1271 unreachable("Invalid bit size");
1272 }
1273
1274 return v;
1275 }
1276
1277 double
1278 nir_const_value_as_float(nir_const_value value, unsigned bit_size)
1279 {
1280 switch (bit_size) {
1281 case 16: return _mesa_half_to_float(value.u16);
1282 case 32: return value.f32;
1283 case 64: return value.f64;
1284 default:
1285 unreachable("Invalid bit size");
1286 }
1287 }
1288
1289 nir_const_value *
1290 nir_src_as_const_value(nir_src src)
1291 {
1292 if (!src.is_ssa)
1293 return NULL;
1294
1295 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1296 return NULL;
1297
1298 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1299
1300 return load->value;
1301 }
1302
1303 /**
1304 * Returns true if the source is known to be dynamically uniform. Otherwise it
1305 * returns false which means it may or may not be dynamically uniform but it
1306 * can't be determined.
1307 */
1308 bool
1309 nir_src_is_dynamically_uniform(nir_src src)
1310 {
1311 if (!src.is_ssa)
1312 return false;
1313
1314 /* Constants are trivially dynamically uniform */
1315 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1316 return true;
1317
1318 /* As are uniform variables */
1319 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1320 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1321
1322 if (intr->intrinsic == nir_intrinsic_load_uniform)
1323 return true;
1324 }
1325
1326 /* Operating together dynamically uniform expressions produces a
1327 * dynamically uniform result
1328 */
1329 if (src.ssa->parent_instr->type == nir_instr_type_alu) {
1330 nir_alu_instr *alu = nir_instr_as_alu(src.ssa->parent_instr);
1331 for (int i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1332 if (!nir_src_is_dynamically_uniform(alu->src[i].src))
1333 return false;
1334 }
1335
1336 return true;
1337 }
1338
1339 /* XXX: this could have many more tests, such as when a sampler function is
1340 * called with dynamically uniform arguments.
1341 */
1342 return false;
1343 }
1344
1345 static void
1346 src_remove_all_uses(nir_src *src)
1347 {
1348 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1349 if (!src_is_valid(src))
1350 continue;
1351
1352 list_del(&src->use_link);
1353 }
1354 }
1355
1356 static void
1357 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1358 {
1359 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1360 if (!src_is_valid(src))
1361 continue;
1362
1363 if (parent_instr) {
1364 src->parent_instr = parent_instr;
1365 if (src->is_ssa)
1366 list_addtail(&src->use_link, &src->ssa->uses);
1367 else
1368 list_addtail(&src->use_link, &src->reg.reg->uses);
1369 } else {
1370 assert(parent_if);
1371 src->parent_if = parent_if;
1372 if (src->is_ssa)
1373 list_addtail(&src->use_link, &src->ssa->if_uses);
1374 else
1375 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1376 }
1377 }
1378 }
1379
1380 void
1381 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1382 {
1383 assert(!src_is_valid(src) || src->parent_instr == instr);
1384
1385 src_remove_all_uses(src);
1386 *src = new_src;
1387 src_add_all_uses(src, instr, NULL);
1388 }
1389
1390 void
1391 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1392 {
1393 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1394
1395 src_remove_all_uses(dest);
1396 src_remove_all_uses(src);
1397 *dest = *src;
1398 *src = NIR_SRC_INIT;
1399 src_add_all_uses(dest, dest_instr, NULL);
1400 }
1401
1402 void
1403 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1404 {
1405 nir_src *src = &if_stmt->condition;
1406 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1407
1408 src_remove_all_uses(src);
1409 *src = new_src;
1410 src_add_all_uses(src, NULL, if_stmt);
1411 }
1412
1413 void
1414 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1415 {
1416 if (dest->is_ssa) {
1417 /* We can only overwrite an SSA destination if it has no uses. */
1418 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1419 } else {
1420 list_del(&dest->reg.def_link);
1421 if (dest->reg.indirect)
1422 src_remove_all_uses(dest->reg.indirect);
1423 }
1424
1425 /* We can't re-write with an SSA def */
1426 assert(!new_dest.is_ssa);
1427
1428 nir_dest_copy(dest, &new_dest, instr);
1429
1430 dest->reg.parent_instr = instr;
1431 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1432
1433 if (dest->reg.indirect)
1434 src_add_all_uses(dest->reg.indirect, instr, NULL);
1435 }
1436
1437 /* note: does *not* take ownership of 'name' */
1438 void
1439 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1440 unsigned num_components,
1441 unsigned bit_size, const char *name)
1442 {
1443 def->name = ralloc_strdup(instr, name);
1444 def->parent_instr = instr;
1445 list_inithead(&def->uses);
1446 list_inithead(&def->if_uses);
1447 def->num_components = num_components;
1448 def->bit_size = bit_size;
1449
1450 if (instr->block) {
1451 nir_function_impl *impl =
1452 nir_cf_node_get_function(&instr->block->cf_node);
1453
1454 def->index = impl->ssa_alloc++;
1455 } else {
1456 def->index = UINT_MAX;
1457 }
1458 }
1459
1460 /* note: does *not* take ownership of 'name' */
1461 void
1462 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1463 unsigned num_components, unsigned bit_size,
1464 const char *name)
1465 {
1466 dest->is_ssa = true;
1467 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1468 }
1469
1470 void
1471 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1472 {
1473 assert(!new_src.is_ssa || def != new_src.ssa);
1474
1475 nir_foreach_use_safe(use_src, def)
1476 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1477
1478 nir_foreach_if_use_safe(use_src, def)
1479 nir_if_rewrite_condition(use_src->parent_if, new_src);
1480 }
1481
1482 static bool
1483 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1484 {
1485 assert(start->block == end->block);
1486
1487 if (between->block != start->block)
1488 return false;
1489
1490 /* Search backwards looking for "between" */
1491 while (start != end) {
1492 if (between == end)
1493 return true;
1494
1495 end = nir_instr_prev(end);
1496 assert(end);
1497 }
1498
1499 return false;
1500 }
1501
1502 /* Replaces all uses of the given SSA def with the given source but only if
1503 * the use comes after the after_me instruction. This can be useful if you
1504 * are emitting code to fix up the result of some instruction: you can freely
1505 * use the result in that code and then call rewrite_uses_after and pass the
1506 * last fixup instruction as after_me and it will replace all of the uses you
1507 * want without touching the fixup code.
1508 *
1509 * This function assumes that after_me is in the same block as
1510 * def->parent_instr and that after_me comes after def->parent_instr.
1511 */
1512 void
1513 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1514 nir_instr *after_me)
1515 {
1516 if (new_src.is_ssa && def == new_src.ssa)
1517 return;
1518
1519 nir_foreach_use_safe(use_src, def) {
1520 assert(use_src->parent_instr != def->parent_instr);
1521 /* Since def already dominates all of its uses, the only way a use can
1522 * not be dominated by after_me is if it is between def and after_me in
1523 * the instruction list.
1524 */
1525 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1526 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1527 }
1528
1529 nir_foreach_if_use_safe(use_src, def)
1530 nir_if_rewrite_condition(use_src->parent_if, new_src);
1531 }
1532
1533 nir_component_mask_t
1534 nir_ssa_def_components_read(const nir_ssa_def *def)
1535 {
1536 nir_component_mask_t read_mask = 0;
1537 nir_foreach_use(use, def) {
1538 if (use->parent_instr->type == nir_instr_type_alu) {
1539 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1540 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1541 int src_idx = alu_src - &alu->src[0];
1542 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1543 read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
1544 } else {
1545 return (1 << def->num_components) - 1;
1546 }
1547 }
1548
1549 if (!list_empty(&def->if_uses))
1550 read_mask |= 1;
1551
1552 return read_mask;
1553 }
1554
1555 nir_block *
1556 nir_block_cf_tree_next(nir_block *block)
1557 {
1558 if (block == NULL) {
1559 /* nir_foreach_block_safe() will call this function on a NULL block
1560 * after the last iteration, but it won't use the result so just return
1561 * NULL here.
1562 */
1563 return NULL;
1564 }
1565
1566 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1567 if (cf_next)
1568 return nir_cf_node_cf_tree_first(cf_next);
1569
1570 nir_cf_node *parent = block->cf_node.parent;
1571
1572 switch (parent->type) {
1573 case nir_cf_node_if: {
1574 /* Are we at the end of the if? Go to the beginning of the else */
1575 nir_if *if_stmt = nir_cf_node_as_if(parent);
1576 if (block == nir_if_last_then_block(if_stmt))
1577 return nir_if_first_else_block(if_stmt);
1578
1579 assert(block == nir_if_last_else_block(if_stmt));
1580 /* fall through */
1581 }
1582
1583 case nir_cf_node_loop:
1584 return nir_cf_node_as_block(nir_cf_node_next(parent));
1585
1586 case nir_cf_node_function:
1587 return NULL;
1588
1589 default:
1590 unreachable("unknown cf node type");
1591 }
1592 }
1593
1594 nir_block *
1595 nir_block_cf_tree_prev(nir_block *block)
1596 {
1597 if (block == NULL) {
1598 /* do this for consistency with nir_block_cf_tree_next() */
1599 return NULL;
1600 }
1601
1602 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1603 if (cf_prev)
1604 return nir_cf_node_cf_tree_last(cf_prev);
1605
1606 nir_cf_node *parent = block->cf_node.parent;
1607
1608 switch (parent->type) {
1609 case nir_cf_node_if: {
1610 /* Are we at the beginning of the else? Go to the end of the if */
1611 nir_if *if_stmt = nir_cf_node_as_if(parent);
1612 if (block == nir_if_first_else_block(if_stmt))
1613 return nir_if_last_then_block(if_stmt);
1614
1615 assert(block == nir_if_first_then_block(if_stmt));
1616 /* fall through */
1617 }
1618
1619 case nir_cf_node_loop:
1620 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1621
1622 case nir_cf_node_function:
1623 return NULL;
1624
1625 default:
1626 unreachable("unknown cf node type");
1627 }
1628 }
1629
1630 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1631 {
1632 switch (node->type) {
1633 case nir_cf_node_function: {
1634 nir_function_impl *impl = nir_cf_node_as_function(node);
1635 return nir_start_block(impl);
1636 }
1637
1638 case nir_cf_node_if: {
1639 nir_if *if_stmt = nir_cf_node_as_if(node);
1640 return nir_if_first_then_block(if_stmt);
1641 }
1642
1643 case nir_cf_node_loop: {
1644 nir_loop *loop = nir_cf_node_as_loop(node);
1645 return nir_loop_first_block(loop);
1646 }
1647
1648 case nir_cf_node_block: {
1649 return nir_cf_node_as_block(node);
1650 }
1651
1652 default:
1653 unreachable("unknown node type");
1654 }
1655 }
1656
1657 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1658 {
1659 switch (node->type) {
1660 case nir_cf_node_function: {
1661 nir_function_impl *impl = nir_cf_node_as_function(node);
1662 return nir_impl_last_block(impl);
1663 }
1664
1665 case nir_cf_node_if: {
1666 nir_if *if_stmt = nir_cf_node_as_if(node);
1667 return nir_if_last_else_block(if_stmt);
1668 }
1669
1670 case nir_cf_node_loop: {
1671 nir_loop *loop = nir_cf_node_as_loop(node);
1672 return nir_loop_last_block(loop);
1673 }
1674
1675 case nir_cf_node_block: {
1676 return nir_cf_node_as_block(node);
1677 }
1678
1679 default:
1680 unreachable("unknown node type");
1681 }
1682 }
1683
1684 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1685 {
1686 if (node->type == nir_cf_node_block)
1687 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1688 else if (node->type == nir_cf_node_function)
1689 return NULL;
1690 else
1691 return nir_cf_node_as_block(nir_cf_node_next(node));
1692 }
1693
1694 nir_if *
1695 nir_block_get_following_if(nir_block *block)
1696 {
1697 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1698 return NULL;
1699
1700 if (nir_cf_node_is_last(&block->cf_node))
1701 return NULL;
1702
1703 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1704
1705 if (next_node->type != nir_cf_node_if)
1706 return NULL;
1707
1708 return nir_cf_node_as_if(next_node);
1709 }
1710
1711 nir_loop *
1712 nir_block_get_following_loop(nir_block *block)
1713 {
1714 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1715 return NULL;
1716
1717 if (nir_cf_node_is_last(&block->cf_node))
1718 return NULL;
1719
1720 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1721
1722 if (next_node->type != nir_cf_node_loop)
1723 return NULL;
1724
1725 return nir_cf_node_as_loop(next_node);
1726 }
1727
1728 void
1729 nir_index_blocks(nir_function_impl *impl)
1730 {
1731 unsigned index = 0;
1732
1733 if (impl->valid_metadata & nir_metadata_block_index)
1734 return;
1735
1736 nir_foreach_block(block, impl) {
1737 block->index = index++;
1738 }
1739
1740 /* The end_block isn't really part of the program, which is why its index
1741 * is >= num_blocks.
1742 */
1743 impl->num_blocks = impl->end_block->index = index;
1744 }
1745
1746 static bool
1747 index_ssa_def_cb(nir_ssa_def *def, void *state)
1748 {
1749 unsigned *index = (unsigned *) state;
1750 def->index = (*index)++;
1751
1752 return true;
1753 }
1754
1755 /**
1756 * The indices are applied top-to-bottom which has the very nice property
1757 * that, if A dominates B, then A->index <= B->index.
1758 */
1759 void
1760 nir_index_ssa_defs(nir_function_impl *impl)
1761 {
1762 unsigned index = 0;
1763
1764 nir_foreach_block(block, impl) {
1765 nir_foreach_instr(instr, block)
1766 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1767 }
1768
1769 impl->ssa_alloc = index;
1770 }
1771
1772 /**
1773 * The indices are applied top-to-bottom which has the very nice property
1774 * that, if A dominates B, then A->index <= B->index.
1775 */
1776 unsigned
1777 nir_index_instrs(nir_function_impl *impl)
1778 {
1779 unsigned index = 0;
1780
1781 nir_foreach_block(block, impl) {
1782 nir_foreach_instr(instr, block)
1783 instr->index = index++;
1784 }
1785
1786 return index;
1787 }
1788
1789 nir_intrinsic_op
1790 nir_intrinsic_from_system_value(gl_system_value val)
1791 {
1792 switch (val) {
1793 case SYSTEM_VALUE_VERTEX_ID:
1794 return nir_intrinsic_load_vertex_id;
1795 case SYSTEM_VALUE_INSTANCE_ID:
1796 return nir_intrinsic_load_instance_id;
1797 case SYSTEM_VALUE_DRAW_ID:
1798 return nir_intrinsic_load_draw_id;
1799 case SYSTEM_VALUE_BASE_INSTANCE:
1800 return nir_intrinsic_load_base_instance;
1801 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1802 return nir_intrinsic_load_vertex_id_zero_base;
1803 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1804 return nir_intrinsic_load_is_indexed_draw;
1805 case SYSTEM_VALUE_FIRST_VERTEX:
1806 return nir_intrinsic_load_first_vertex;
1807 case SYSTEM_VALUE_BASE_VERTEX:
1808 return nir_intrinsic_load_base_vertex;
1809 case SYSTEM_VALUE_INVOCATION_ID:
1810 return nir_intrinsic_load_invocation_id;
1811 case SYSTEM_VALUE_FRAG_COORD:
1812 return nir_intrinsic_load_frag_coord;
1813 case SYSTEM_VALUE_FRONT_FACE:
1814 return nir_intrinsic_load_front_face;
1815 case SYSTEM_VALUE_SAMPLE_ID:
1816 return nir_intrinsic_load_sample_id;
1817 case SYSTEM_VALUE_SAMPLE_POS:
1818 return nir_intrinsic_load_sample_pos;
1819 case SYSTEM_VALUE_SAMPLE_MASK_IN:
1820 return nir_intrinsic_load_sample_mask_in;
1821 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
1822 return nir_intrinsic_load_local_invocation_id;
1823 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
1824 return nir_intrinsic_load_local_invocation_index;
1825 case SYSTEM_VALUE_WORK_GROUP_ID:
1826 return nir_intrinsic_load_work_group_id;
1827 case SYSTEM_VALUE_NUM_WORK_GROUPS:
1828 return nir_intrinsic_load_num_work_groups;
1829 case SYSTEM_VALUE_PRIMITIVE_ID:
1830 return nir_intrinsic_load_primitive_id;
1831 case SYSTEM_VALUE_TESS_COORD:
1832 return nir_intrinsic_load_tess_coord;
1833 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1834 return nir_intrinsic_load_tess_level_outer;
1835 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1836 return nir_intrinsic_load_tess_level_inner;
1837 case SYSTEM_VALUE_VERTICES_IN:
1838 return nir_intrinsic_load_patch_vertices_in;
1839 case SYSTEM_VALUE_HELPER_INVOCATION:
1840 return nir_intrinsic_load_helper_invocation;
1841 case SYSTEM_VALUE_COLOR0:
1842 return nir_intrinsic_load_color0;
1843 case SYSTEM_VALUE_COLOR1:
1844 return nir_intrinsic_load_color1;
1845 case SYSTEM_VALUE_VIEW_INDEX:
1846 return nir_intrinsic_load_view_index;
1847 case SYSTEM_VALUE_SUBGROUP_SIZE:
1848 return nir_intrinsic_load_subgroup_size;
1849 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
1850 return nir_intrinsic_load_subgroup_invocation;
1851 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
1852 return nir_intrinsic_load_subgroup_eq_mask;
1853 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
1854 return nir_intrinsic_load_subgroup_ge_mask;
1855 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
1856 return nir_intrinsic_load_subgroup_gt_mask;
1857 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
1858 return nir_intrinsic_load_subgroup_le_mask;
1859 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
1860 return nir_intrinsic_load_subgroup_lt_mask;
1861 case SYSTEM_VALUE_NUM_SUBGROUPS:
1862 return nir_intrinsic_load_num_subgroups;
1863 case SYSTEM_VALUE_SUBGROUP_ID:
1864 return nir_intrinsic_load_subgroup_id;
1865 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
1866 return nir_intrinsic_load_local_group_size;
1867 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
1868 return nir_intrinsic_load_global_invocation_id;
1869 case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX:
1870 return nir_intrinsic_load_global_invocation_index;
1871 case SYSTEM_VALUE_WORK_DIM:
1872 return nir_intrinsic_load_work_dim;
1873 default:
1874 unreachable("system value does not directly correspond to intrinsic");
1875 }
1876 }
1877
1878 gl_system_value
1879 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
1880 {
1881 switch (intrin) {
1882 case nir_intrinsic_load_vertex_id:
1883 return SYSTEM_VALUE_VERTEX_ID;
1884 case nir_intrinsic_load_instance_id:
1885 return SYSTEM_VALUE_INSTANCE_ID;
1886 case nir_intrinsic_load_draw_id:
1887 return SYSTEM_VALUE_DRAW_ID;
1888 case nir_intrinsic_load_base_instance:
1889 return SYSTEM_VALUE_BASE_INSTANCE;
1890 case nir_intrinsic_load_vertex_id_zero_base:
1891 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
1892 case nir_intrinsic_load_first_vertex:
1893 return SYSTEM_VALUE_FIRST_VERTEX;
1894 case nir_intrinsic_load_is_indexed_draw:
1895 return SYSTEM_VALUE_IS_INDEXED_DRAW;
1896 case nir_intrinsic_load_base_vertex:
1897 return SYSTEM_VALUE_BASE_VERTEX;
1898 case nir_intrinsic_load_invocation_id:
1899 return SYSTEM_VALUE_INVOCATION_ID;
1900 case nir_intrinsic_load_frag_coord:
1901 return SYSTEM_VALUE_FRAG_COORD;
1902 case nir_intrinsic_load_front_face:
1903 return SYSTEM_VALUE_FRONT_FACE;
1904 case nir_intrinsic_load_sample_id:
1905 return SYSTEM_VALUE_SAMPLE_ID;
1906 case nir_intrinsic_load_sample_pos:
1907 return SYSTEM_VALUE_SAMPLE_POS;
1908 case nir_intrinsic_load_sample_mask_in:
1909 return SYSTEM_VALUE_SAMPLE_MASK_IN;
1910 case nir_intrinsic_load_local_invocation_id:
1911 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1912 case nir_intrinsic_load_local_invocation_index:
1913 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1914 case nir_intrinsic_load_num_work_groups:
1915 return SYSTEM_VALUE_NUM_WORK_GROUPS;
1916 case nir_intrinsic_load_work_group_id:
1917 return SYSTEM_VALUE_WORK_GROUP_ID;
1918 case nir_intrinsic_load_primitive_id:
1919 return SYSTEM_VALUE_PRIMITIVE_ID;
1920 case nir_intrinsic_load_tess_coord:
1921 return SYSTEM_VALUE_TESS_COORD;
1922 case nir_intrinsic_load_tess_level_outer:
1923 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
1924 case nir_intrinsic_load_tess_level_inner:
1925 return SYSTEM_VALUE_TESS_LEVEL_INNER;
1926 case nir_intrinsic_load_patch_vertices_in:
1927 return SYSTEM_VALUE_VERTICES_IN;
1928 case nir_intrinsic_load_helper_invocation:
1929 return SYSTEM_VALUE_HELPER_INVOCATION;
1930 case nir_intrinsic_load_color0:
1931 return SYSTEM_VALUE_COLOR0;
1932 case nir_intrinsic_load_color1:
1933 return SYSTEM_VALUE_COLOR1;
1934 case nir_intrinsic_load_view_index:
1935 return SYSTEM_VALUE_VIEW_INDEX;
1936 case nir_intrinsic_load_subgroup_size:
1937 return SYSTEM_VALUE_SUBGROUP_SIZE;
1938 case nir_intrinsic_load_subgroup_invocation:
1939 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
1940 case nir_intrinsic_load_subgroup_eq_mask:
1941 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
1942 case nir_intrinsic_load_subgroup_ge_mask:
1943 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
1944 case nir_intrinsic_load_subgroup_gt_mask:
1945 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
1946 case nir_intrinsic_load_subgroup_le_mask:
1947 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
1948 case nir_intrinsic_load_subgroup_lt_mask:
1949 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
1950 case nir_intrinsic_load_num_subgroups:
1951 return SYSTEM_VALUE_NUM_SUBGROUPS;
1952 case nir_intrinsic_load_subgroup_id:
1953 return SYSTEM_VALUE_SUBGROUP_ID;
1954 case nir_intrinsic_load_local_group_size:
1955 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1956 case nir_intrinsic_load_global_invocation_id:
1957 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1958 default:
1959 unreachable("intrinsic doesn't produce a system value");
1960 }
1961 }
1962
1963 /* OpenGL utility method that remaps the location attributes if they are
1964 * doubles. Not needed for vulkan due the differences on the input location
1965 * count for doubles on vulkan vs OpenGL
1966 *
1967 * The bitfield returned in dual_slot is one bit for each double input slot in
1968 * the original OpenGL single-slot input numbering. The mapping from old
1969 * locations to new locations is as follows:
1970 *
1971 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1972 */
1973 void
1974 nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
1975 {
1976 assert(shader->info.stage == MESA_SHADER_VERTEX);
1977
1978 *dual_slot = 0;
1979 nir_foreach_variable(var, &shader->inputs) {
1980 if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
1981 unsigned slots = glsl_count_attribute_slots(var->type, true);
1982 *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
1983 }
1984 }
1985
1986 nir_foreach_variable(var, &shader->inputs) {
1987 var->data.location +=
1988 util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
1989 }
1990 }
1991
1992 /* Returns an attribute mask that has been re-compacted using the given
1993 * dual_slot mask.
1994 */
1995 uint64_t
1996 nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
1997 {
1998 while (dual_slot) {
1999 unsigned loc = u_bit_scan64(&dual_slot);
2000 /* mask of all bits up to and including loc */
2001 uint64_t mask = BITFIELD64_MASK(loc + 1);
2002 attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
2003 }
2004 return attribs;
2005 }
2006
2007 void
2008 nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
2009 bool bindless)
2010 {
2011 switch (intrin->intrinsic) {
2012 #define CASE(op) \
2013 case nir_intrinsic_image_deref_##op: \
2014 intrin->intrinsic = bindless ? nir_intrinsic_bindless_image_##op \
2015 : nir_intrinsic_image_##op; \
2016 break;
2017 CASE(load)
2018 CASE(store)
2019 CASE(atomic_add)
2020 CASE(atomic_min)
2021 CASE(atomic_max)
2022 CASE(atomic_and)
2023 CASE(atomic_or)
2024 CASE(atomic_xor)
2025 CASE(atomic_exchange)
2026 CASE(atomic_comp_swap)
2027 CASE(atomic_fadd)
2028 CASE(size)
2029 CASE(samples)
2030 CASE(load_raw_intel)
2031 CASE(store_raw_intel)
2032 #undef CASE
2033 default:
2034 unreachable("Unhanded image intrinsic");
2035 }
2036
2037 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
2038 nir_variable *var = nir_deref_instr_get_variable(deref);
2039
2040 nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type));
2041 nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(deref->type));
2042 nir_intrinsic_set_format(intrin, var->data.image.format);
2043
2044 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
2045 nir_src_for_ssa(src));
2046 }