compiler: Add SYSTEM_VALUE_IS_INDEXED_DRAW and instrinsics
[mesa.git] / src / compiler / nir / nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
31 #include <limits.h>
32 #include <assert.h>
33 #include <math.h>
34
35 nir_shader *
36 nir_shader_create(void *mem_ctx,
37 gl_shader_stage stage,
38 const nir_shader_compiler_options *options,
39 shader_info *si)
40 {
41 nir_shader *shader = rzalloc(mem_ctx, nir_shader);
42
43 exec_list_make_empty(&shader->uniforms);
44 exec_list_make_empty(&shader->inputs);
45 exec_list_make_empty(&shader->outputs);
46 exec_list_make_empty(&shader->shared);
47
48 shader->options = options;
49
50 if (si) {
51 assert(si->stage == stage);
52 shader->info = *si;
53 } else {
54 shader->info.stage = stage;
55 }
56
57 exec_list_make_empty(&shader->functions);
58 exec_list_make_empty(&shader->registers);
59 exec_list_make_empty(&shader->globals);
60 exec_list_make_empty(&shader->system_values);
61 shader->reg_alloc = 0;
62
63 shader->num_inputs = 0;
64 shader->num_outputs = 0;
65 shader->num_uniforms = 0;
66 shader->num_shared = 0;
67
68 return shader;
69 }
70
71 static nir_register *
72 reg_create(void *mem_ctx, struct exec_list *list)
73 {
74 nir_register *reg = ralloc(mem_ctx, nir_register);
75
76 list_inithead(&reg->uses);
77 list_inithead(&reg->defs);
78 list_inithead(&reg->if_uses);
79
80 reg->num_components = 0;
81 reg->bit_size = 32;
82 reg->num_array_elems = 0;
83 reg->is_packed = false;
84 reg->name = NULL;
85
86 exec_list_push_tail(list, &reg->node);
87
88 return reg;
89 }
90
91 nir_register *
92 nir_global_reg_create(nir_shader *shader)
93 {
94 nir_register *reg = reg_create(shader, &shader->registers);
95 reg->index = shader->reg_alloc++;
96 reg->is_global = true;
97
98 return reg;
99 }
100
101 nir_register *
102 nir_local_reg_create(nir_function_impl *impl)
103 {
104 nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
105 reg->index = impl->reg_alloc++;
106 reg->is_global = false;
107
108 return reg;
109 }
110
111 void
112 nir_reg_remove(nir_register *reg)
113 {
114 exec_node_remove(&reg->node);
115 }
116
117 void
118 nir_shader_add_variable(nir_shader *shader, nir_variable *var)
119 {
120 switch (var->data.mode) {
121 case nir_var_all:
122 assert(!"invalid mode");
123 break;
124
125 case nir_var_local:
126 assert(!"nir_shader_add_variable cannot be used for local variables");
127 break;
128
129 case nir_var_param:
130 assert(!"nir_shader_add_variable cannot be used for function parameters");
131 break;
132
133 case nir_var_global:
134 exec_list_push_tail(&shader->globals, &var->node);
135 break;
136
137 case nir_var_shader_in:
138 exec_list_push_tail(&shader->inputs, &var->node);
139 break;
140
141 case nir_var_shader_out:
142 exec_list_push_tail(&shader->outputs, &var->node);
143 break;
144
145 case nir_var_uniform:
146 case nir_var_shader_storage:
147 exec_list_push_tail(&shader->uniforms, &var->node);
148 break;
149
150 case nir_var_shared:
151 assert(shader->info.stage == MESA_SHADER_COMPUTE);
152 exec_list_push_tail(&shader->shared, &var->node);
153 break;
154
155 case nir_var_system_value:
156 exec_list_push_tail(&shader->system_values, &var->node);
157 break;
158 }
159 }
160
161 nir_variable *
162 nir_variable_create(nir_shader *shader, nir_variable_mode mode,
163 const struct glsl_type *type, const char *name)
164 {
165 nir_variable *var = rzalloc(shader, nir_variable);
166 var->name = ralloc_strdup(var, name);
167 var->type = type;
168 var->data.mode = mode;
169
170 if ((mode == nir_var_shader_in &&
171 shader->info.stage != MESA_SHADER_VERTEX) ||
172 (mode == nir_var_shader_out &&
173 shader->info.stage != MESA_SHADER_FRAGMENT))
174 var->data.interpolation = INTERP_MODE_SMOOTH;
175
176 if (mode == nir_var_shader_in || mode == nir_var_uniform)
177 var->data.read_only = true;
178
179 nir_shader_add_variable(shader, var);
180
181 return var;
182 }
183
184 nir_variable *
185 nir_local_variable_create(nir_function_impl *impl,
186 const struct glsl_type *type, const char *name)
187 {
188 nir_variable *var = rzalloc(impl->function->shader, nir_variable);
189 var->name = ralloc_strdup(var, name);
190 var->type = type;
191 var->data.mode = nir_var_local;
192
193 nir_function_impl_add_variable(impl, var);
194
195 return var;
196 }
197
198 nir_function *
199 nir_function_create(nir_shader *shader, const char *name)
200 {
201 nir_function *func = ralloc(shader, nir_function);
202
203 exec_list_push_tail(&shader->functions, &func->node);
204
205 func->name = ralloc_strdup(func, name);
206 func->shader = shader;
207 func->num_params = 0;
208 func->params = NULL;
209 func->return_type = glsl_void_type();
210 func->impl = NULL;
211
212 return func;
213 }
214
215 /* NOTE: if the instruction you are copying a src to is already added
216 * to the IR, use nir_instr_rewrite_src() instead.
217 */
218 void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx)
219 {
220 dest->is_ssa = src->is_ssa;
221 if (src->is_ssa) {
222 dest->ssa = src->ssa;
223 } else {
224 dest->reg.base_offset = src->reg.base_offset;
225 dest->reg.reg = src->reg.reg;
226 if (src->reg.indirect) {
227 dest->reg.indirect = ralloc(mem_ctx, nir_src);
228 nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx);
229 } else {
230 dest->reg.indirect = NULL;
231 }
232 }
233 }
234
235 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr)
236 {
237 /* Copying an SSA definition makes no sense whatsoever. */
238 assert(!src->is_ssa);
239
240 dest->is_ssa = false;
241
242 dest->reg.base_offset = src->reg.base_offset;
243 dest->reg.reg = src->reg.reg;
244 if (src->reg.indirect) {
245 dest->reg.indirect = ralloc(instr, nir_src);
246 nir_src_copy(dest->reg.indirect, src->reg.indirect, instr);
247 } else {
248 dest->reg.indirect = NULL;
249 }
250 }
251
252 void
253 nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
254 nir_alu_instr *instr)
255 {
256 nir_src_copy(&dest->src, &src->src, &instr->instr);
257 dest->abs = src->abs;
258 dest->negate = src->negate;
259 for (unsigned i = 0; i < 4; i++)
260 dest->swizzle[i] = src->swizzle[i];
261 }
262
263 void
264 nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
265 nir_alu_instr *instr)
266 {
267 nir_dest_copy(&dest->dest, &src->dest, &instr->instr);
268 dest->write_mask = src->write_mask;
269 dest->saturate = src->saturate;
270 }
271
272
273 static void
274 cf_init(nir_cf_node *node, nir_cf_node_type type)
275 {
276 exec_node_init(&node->node);
277 node->parent = NULL;
278 node->type = type;
279 }
280
281 nir_function_impl *
282 nir_function_impl_create_bare(nir_shader *shader)
283 {
284 nir_function_impl *impl = ralloc(shader, nir_function_impl);
285
286 impl->function = NULL;
287
288 cf_init(&impl->cf_node, nir_cf_node_function);
289
290 exec_list_make_empty(&impl->body);
291 exec_list_make_empty(&impl->registers);
292 exec_list_make_empty(&impl->locals);
293 impl->num_params = 0;
294 impl->params = NULL;
295 impl->return_var = NULL;
296 impl->reg_alloc = 0;
297 impl->ssa_alloc = 0;
298 impl->valid_metadata = nir_metadata_none;
299
300 /* create start & end blocks */
301 nir_block *start_block = nir_block_create(shader);
302 nir_block *end_block = nir_block_create(shader);
303 start_block->cf_node.parent = &impl->cf_node;
304 end_block->cf_node.parent = &impl->cf_node;
305 impl->end_block = end_block;
306
307 exec_list_push_tail(&impl->body, &start_block->cf_node.node);
308
309 start_block->successors[0] = end_block;
310 _mesa_set_add(end_block->predecessors, start_block);
311 return impl;
312 }
313
314 nir_function_impl *
315 nir_function_impl_create(nir_function *function)
316 {
317 assert(function->impl == NULL);
318
319 nir_function_impl *impl = nir_function_impl_create_bare(function->shader);
320
321 function->impl = impl;
322 impl->function = function;
323
324 impl->num_params = function->num_params;
325 impl->params = ralloc_array(function->shader,
326 nir_variable *, impl->num_params);
327
328 for (unsigned i = 0; i < impl->num_params; i++) {
329 impl->params[i] = rzalloc(function->shader, nir_variable);
330 impl->params[i]->type = function->params[i].type;
331 impl->params[i]->data.mode = nir_var_param;
332 impl->params[i]->data.location = i;
333 }
334
335 if (!glsl_type_is_void(function->return_type)) {
336 impl->return_var = rzalloc(function->shader, nir_variable);
337 impl->return_var->type = function->return_type;
338 impl->return_var->data.mode = nir_var_param;
339 impl->return_var->data.location = -1;
340 } else {
341 impl->return_var = NULL;
342 }
343
344 return impl;
345 }
346
347 nir_block *
348 nir_block_create(nir_shader *shader)
349 {
350 nir_block *block = rzalloc(shader, nir_block);
351
352 cf_init(&block->cf_node, nir_cf_node_block);
353
354 block->successors[0] = block->successors[1] = NULL;
355 block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
356 _mesa_key_pointer_equal);
357 block->imm_dom = NULL;
358 /* XXX maybe it would be worth it to defer allocation? This
359 * way it doesn't get allocated for shader refs that never run
360 * nir_calc_dominance? For example, state-tracker creates an
361 * initial IR, clones that, runs appropriate lowering pass, passes
362 * to driver which does common lowering/opt, and then stores ref
363 * which is later used to do state specific lowering and futher
364 * opt. Do any of the references not need dominance metadata?
365 */
366 block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
367 _mesa_key_pointer_equal);
368
369 exec_list_make_empty(&block->instr_list);
370
371 return block;
372 }
373
374 static inline void
375 src_init(nir_src *src)
376 {
377 src->is_ssa = false;
378 src->reg.reg = NULL;
379 src->reg.indirect = NULL;
380 src->reg.base_offset = 0;
381 }
382
383 nir_if *
384 nir_if_create(nir_shader *shader)
385 {
386 nir_if *if_stmt = ralloc(shader, nir_if);
387
388 cf_init(&if_stmt->cf_node, nir_cf_node_if);
389 src_init(&if_stmt->condition);
390
391 nir_block *then = nir_block_create(shader);
392 exec_list_make_empty(&if_stmt->then_list);
393 exec_list_push_tail(&if_stmt->then_list, &then->cf_node.node);
394 then->cf_node.parent = &if_stmt->cf_node;
395
396 nir_block *else_stmt = nir_block_create(shader);
397 exec_list_make_empty(&if_stmt->else_list);
398 exec_list_push_tail(&if_stmt->else_list, &else_stmt->cf_node.node);
399 else_stmt->cf_node.parent = &if_stmt->cf_node;
400
401 return if_stmt;
402 }
403
404 nir_loop *
405 nir_loop_create(nir_shader *shader)
406 {
407 nir_loop *loop = rzalloc(shader, nir_loop);
408
409 cf_init(&loop->cf_node, nir_cf_node_loop);
410
411 nir_block *body = nir_block_create(shader);
412 exec_list_make_empty(&loop->body);
413 exec_list_push_tail(&loop->body, &body->cf_node.node);
414 body->cf_node.parent = &loop->cf_node;
415
416 body->successors[0] = body;
417 _mesa_set_add(body->predecessors, body);
418
419 return loop;
420 }
421
422 static void
423 instr_init(nir_instr *instr, nir_instr_type type)
424 {
425 instr->type = type;
426 instr->block = NULL;
427 exec_node_init(&instr->node);
428 }
429
430 static void
431 dest_init(nir_dest *dest)
432 {
433 dest->is_ssa = false;
434 dest->reg.reg = NULL;
435 dest->reg.indirect = NULL;
436 dest->reg.base_offset = 0;
437 }
438
439 static void
440 alu_dest_init(nir_alu_dest *dest)
441 {
442 dest_init(&dest->dest);
443 dest->saturate = false;
444 dest->write_mask = 0xf;
445 }
446
447 static void
448 alu_src_init(nir_alu_src *src)
449 {
450 src_init(&src->src);
451 src->abs = src->negate = false;
452 src->swizzle[0] = 0;
453 src->swizzle[1] = 1;
454 src->swizzle[2] = 2;
455 src->swizzle[3] = 3;
456 }
457
458 nir_alu_instr *
459 nir_alu_instr_create(nir_shader *shader, nir_op op)
460 {
461 unsigned num_srcs = nir_op_infos[op].num_inputs;
462 /* TODO: don't use rzalloc */
463 nir_alu_instr *instr =
464 rzalloc_size(shader,
465 sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
466
467 instr_init(&instr->instr, nir_instr_type_alu);
468 instr->op = op;
469 alu_dest_init(&instr->dest);
470 for (unsigned i = 0; i < num_srcs; i++)
471 alu_src_init(&instr->src[i]);
472
473 return instr;
474 }
475
476 nir_jump_instr *
477 nir_jump_instr_create(nir_shader *shader, nir_jump_type type)
478 {
479 nir_jump_instr *instr = ralloc(shader, nir_jump_instr);
480 instr_init(&instr->instr, nir_instr_type_jump);
481 instr->type = type;
482 return instr;
483 }
484
485 nir_load_const_instr *
486 nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
487 unsigned bit_size)
488 {
489 nir_load_const_instr *instr = rzalloc(shader, nir_load_const_instr);
490 instr_init(&instr->instr, nir_instr_type_load_const);
491
492 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
493
494 return instr;
495 }
496
497 nir_intrinsic_instr *
498 nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
499 {
500 unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
501 /* TODO: don't use rzalloc */
502 nir_intrinsic_instr *instr =
503 rzalloc_size(shader,
504 sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
505
506 instr_init(&instr->instr, nir_instr_type_intrinsic);
507 instr->intrinsic = op;
508
509 if (nir_intrinsic_infos[op].has_dest)
510 dest_init(&instr->dest);
511
512 for (unsigned i = 0; i < num_srcs; i++)
513 src_init(&instr->src[i]);
514
515 return instr;
516 }
517
518 nir_call_instr *
519 nir_call_instr_create(nir_shader *shader, nir_function *callee)
520 {
521 nir_call_instr *instr = ralloc(shader, nir_call_instr);
522 instr_init(&instr->instr, nir_instr_type_call);
523
524 instr->callee = callee;
525 instr->num_params = callee->num_params;
526 instr->params = ralloc_array(instr, nir_deref_var *, instr->num_params);
527 instr->return_deref = NULL;
528
529 return instr;
530 }
531
532 nir_tex_instr *
533 nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
534 {
535 nir_tex_instr *instr = rzalloc(shader, nir_tex_instr);
536 instr_init(&instr->instr, nir_instr_type_tex);
537
538 dest_init(&instr->dest);
539
540 instr->num_srcs = num_srcs;
541 instr->src = ralloc_array(instr, nir_tex_src, num_srcs);
542 for (unsigned i = 0; i < num_srcs; i++)
543 src_init(&instr->src[i].src);
544
545 instr->texture_index = 0;
546 instr->texture_array_size = 0;
547 instr->texture = NULL;
548 instr->sampler_index = 0;
549 instr->sampler = NULL;
550
551 return instr;
552 }
553
554 void
555 nir_tex_instr_add_src(nir_tex_instr *tex,
556 nir_tex_src_type src_type,
557 nir_src src)
558 {
559 nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
560 tex->num_srcs + 1);
561
562 for (unsigned i = 0; i < tex->num_srcs; i++) {
563 new_srcs[i].src_type = tex->src[i].src_type;
564 nir_instr_move_src(&tex->instr, &new_srcs[i].src,
565 &tex->src[i].src);
566 }
567
568 ralloc_free(tex->src);
569 tex->src = new_srcs;
570
571 tex->src[tex->num_srcs].src_type = src_type;
572 nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, src);
573 tex->num_srcs++;
574 }
575
576 void
577 nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
578 {
579 assert(src_idx < tex->num_srcs);
580
581 /* First rewrite the source to NIR_SRC_INIT */
582 nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
583
584 /* Now, move all of the other sources down */
585 for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
586 tex->src[i-1].src_type = tex->src[i].src_type;
587 nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
588 }
589 tex->num_srcs--;
590 }
591
592 nir_phi_instr *
593 nir_phi_instr_create(nir_shader *shader)
594 {
595 nir_phi_instr *instr = ralloc(shader, nir_phi_instr);
596 instr_init(&instr->instr, nir_instr_type_phi);
597
598 dest_init(&instr->dest);
599 exec_list_make_empty(&instr->srcs);
600 return instr;
601 }
602
603 nir_parallel_copy_instr *
604 nir_parallel_copy_instr_create(nir_shader *shader)
605 {
606 nir_parallel_copy_instr *instr = ralloc(shader, nir_parallel_copy_instr);
607 instr_init(&instr->instr, nir_instr_type_parallel_copy);
608
609 exec_list_make_empty(&instr->entries);
610
611 return instr;
612 }
613
614 nir_ssa_undef_instr *
615 nir_ssa_undef_instr_create(nir_shader *shader,
616 unsigned num_components,
617 unsigned bit_size)
618 {
619 nir_ssa_undef_instr *instr = ralloc(shader, nir_ssa_undef_instr);
620 instr_init(&instr->instr, nir_instr_type_ssa_undef);
621
622 nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size, NULL);
623
624 return instr;
625 }
626
627 nir_deref_var *
628 nir_deref_var_create(void *mem_ctx, nir_variable *var)
629 {
630 nir_deref_var *deref = ralloc(mem_ctx, nir_deref_var);
631 deref->deref.deref_type = nir_deref_type_var;
632 deref->deref.child = NULL;
633 deref->deref.type = var->type;
634 deref->var = var;
635 return deref;
636 }
637
638 nir_deref_array *
639 nir_deref_array_create(void *mem_ctx)
640 {
641 nir_deref_array *deref = ralloc(mem_ctx, nir_deref_array);
642 deref->deref.deref_type = nir_deref_type_array;
643 deref->deref.child = NULL;
644 deref->deref_array_type = nir_deref_array_type_direct;
645 src_init(&deref->indirect);
646 deref->base_offset = 0;
647 return deref;
648 }
649
650 nir_deref_struct *
651 nir_deref_struct_create(void *mem_ctx, unsigned field_index)
652 {
653 nir_deref_struct *deref = ralloc(mem_ctx, nir_deref_struct);
654 deref->deref.deref_type = nir_deref_type_struct;
655 deref->deref.child = NULL;
656 deref->index = field_index;
657 return deref;
658 }
659
660 nir_deref_var *
661 nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx)
662 {
663 if (deref == NULL)
664 return NULL;
665
666 nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
667 ret->deref.type = deref->deref.type;
668 if (deref->deref.child)
669 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
670 return ret;
671 }
672
673 static nir_deref_array *
674 deref_array_clone(const nir_deref_array *deref, void *mem_ctx)
675 {
676 nir_deref_array *ret = nir_deref_array_create(mem_ctx);
677 ret->base_offset = deref->base_offset;
678 ret->deref_array_type = deref->deref_array_type;
679 if (deref->deref_array_type == nir_deref_array_type_indirect) {
680 nir_src_copy(&ret->indirect, &deref->indirect, mem_ctx);
681 }
682 ret->deref.type = deref->deref.type;
683 if (deref->deref.child)
684 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
685 return ret;
686 }
687
688 static nir_deref_struct *
689 deref_struct_clone(const nir_deref_struct *deref, void *mem_ctx)
690 {
691 nir_deref_struct *ret = nir_deref_struct_create(mem_ctx, deref->index);
692 ret->deref.type = deref->deref.type;
693 if (deref->deref.child)
694 ret->deref.child = nir_deref_clone(deref->deref.child, ret);
695 return ret;
696 }
697
698 nir_deref *
699 nir_deref_clone(const nir_deref *deref, void *mem_ctx)
700 {
701 if (deref == NULL)
702 return NULL;
703
704 switch (deref->deref_type) {
705 case nir_deref_type_var:
706 return &nir_deref_var_clone(nir_deref_as_var(deref), mem_ctx)->deref;
707 case nir_deref_type_array:
708 return &deref_array_clone(nir_deref_as_array(deref), mem_ctx)->deref;
709 case nir_deref_type_struct:
710 return &deref_struct_clone(nir_deref_as_struct(deref), mem_ctx)->deref;
711 default:
712 unreachable("Invalid dereference type");
713 }
714
715 return NULL;
716 }
717
718 /* This is the second step in the recursion. We've found the tail and made a
719 * copy. Now we need to iterate over all possible leaves and call the
720 * callback on each one.
721 */
722 static bool
723 deref_foreach_leaf_build_recur(nir_deref_var *deref, nir_deref *tail,
724 nir_deref_foreach_leaf_cb cb, void *state)
725 {
726 unsigned length;
727 union {
728 nir_deref_array arr;
729 nir_deref_struct str;
730 } tmp;
731
732 assert(tail->child == NULL);
733 switch (glsl_get_base_type(tail->type)) {
734 case GLSL_TYPE_UINT:
735 case GLSL_TYPE_UINT16:
736 case GLSL_TYPE_UINT64:
737 case GLSL_TYPE_INT:
738 case GLSL_TYPE_INT16:
739 case GLSL_TYPE_INT64:
740 case GLSL_TYPE_FLOAT:
741 case GLSL_TYPE_FLOAT16:
742 case GLSL_TYPE_DOUBLE:
743 case GLSL_TYPE_BOOL:
744 if (glsl_type_is_vector_or_scalar(tail->type))
745 return cb(deref, state);
746 /* Fall Through */
747
748 case GLSL_TYPE_ARRAY:
749 tmp.arr.deref.deref_type = nir_deref_type_array;
750 tmp.arr.deref.type = glsl_get_array_element(tail->type);
751 tmp.arr.deref_array_type = nir_deref_array_type_direct;
752 tmp.arr.indirect = NIR_SRC_INIT;
753 tail->child = &tmp.arr.deref;
754
755 length = glsl_get_length(tail->type);
756 for (unsigned i = 0; i < length; i++) {
757 tmp.arr.deref.child = NULL;
758 tmp.arr.base_offset = i;
759 if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
760 return false;
761 }
762 return true;
763
764 case GLSL_TYPE_STRUCT:
765 tmp.str.deref.deref_type = nir_deref_type_struct;
766 tail->child = &tmp.str.deref;
767
768 length = glsl_get_length(tail->type);
769 for (unsigned i = 0; i < length; i++) {
770 tmp.arr.deref.child = NULL;
771 tmp.str.deref.type = glsl_get_struct_field(tail->type, i);
772 tmp.str.index = i;
773 if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
774 return false;
775 }
776 return true;
777
778 default:
779 unreachable("Invalid type for dereference");
780 }
781 }
782
783 /* This is the first step of the foreach_leaf recursion. In this step we are
784 * walking to the end of the deref chain and making a copy in the stack as we
785 * go. This is because we don't want to mutate the deref chain that was
786 * passed in by the caller. The downside is that this deref chain is on the
787 * stack and , if the caller wants to do anything with it, they will have to
788 * make their own copy because this one will go away.
789 */
790 static bool
791 deref_foreach_leaf_copy_recur(nir_deref_var *deref, nir_deref *tail,
792 nir_deref_foreach_leaf_cb cb, void *state)
793 {
794 union {
795 nir_deref_array arr;
796 nir_deref_struct str;
797 } c;
798
799 if (tail->child) {
800 switch (tail->child->deref_type) {
801 case nir_deref_type_array:
802 c.arr = *nir_deref_as_array(tail->child);
803 tail->child = &c.arr.deref;
804 return deref_foreach_leaf_copy_recur(deref, &c.arr.deref, cb, state);
805
806 case nir_deref_type_struct:
807 c.str = *nir_deref_as_struct(tail->child);
808 tail->child = &c.str.deref;
809 return deref_foreach_leaf_copy_recur(deref, &c.str.deref, cb, state);
810
811 case nir_deref_type_var:
812 default:
813 unreachable("Invalid deref type for a child");
814 }
815 } else {
816 /* We've gotten to the end of the original deref. Time to start
817 * building our own derefs.
818 */
819 return deref_foreach_leaf_build_recur(deref, tail, cb, state);
820 }
821 }
822
823 /**
824 * This function iterates over all of the possible derefs that can be created
825 * with the given deref as the head. It then calls the provided callback with
826 * a full deref for each one.
827 *
828 * The deref passed to the callback will be allocated on the stack. You will
829 * need to make a copy if you want it to hang around.
830 */
831 bool
832 nir_deref_foreach_leaf(nir_deref_var *deref,
833 nir_deref_foreach_leaf_cb cb, void *state)
834 {
835 nir_deref_var copy = *deref;
836 return deref_foreach_leaf_copy_recur(&copy, &copy.deref, cb, state);
837 }
838
839 /* Returns a load_const instruction that represents the constant
840 * initializer for the given deref chain. The caller is responsible for
841 * ensuring that there actually is a constant initializer.
842 */
843 nir_load_const_instr *
844 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref)
845 {
846 nir_constant *constant = deref->var->constant_initializer;
847 assert(constant);
848
849 const nir_deref *tail = &deref->deref;
850 unsigned matrix_col = 0;
851 while (tail->child) {
852 switch (tail->child->deref_type) {
853 case nir_deref_type_array: {
854 nir_deref_array *arr = nir_deref_as_array(tail->child);
855 assert(arr->deref_array_type == nir_deref_array_type_direct);
856 if (glsl_type_is_matrix(tail->type)) {
857 assert(arr->deref.child == NULL);
858 matrix_col = arr->base_offset;
859 } else {
860 constant = constant->elements[arr->base_offset];
861 }
862 break;
863 }
864
865 case nir_deref_type_struct: {
866 constant = constant->elements[nir_deref_as_struct(tail->child)->index];
867 break;
868 }
869
870 default:
871 unreachable("Invalid deref child type");
872 }
873
874 tail = tail->child;
875 }
876
877 unsigned bit_size = glsl_get_bit_size(tail->type);
878 nir_load_const_instr *load =
879 nir_load_const_instr_create(shader, glsl_get_vector_elements(tail->type),
880 bit_size);
881
882 switch (glsl_get_base_type(tail->type)) {
883 case GLSL_TYPE_FLOAT:
884 case GLSL_TYPE_INT:
885 case GLSL_TYPE_UINT:
886 case GLSL_TYPE_FLOAT16:
887 case GLSL_TYPE_DOUBLE:
888 case GLSL_TYPE_INT16:
889 case GLSL_TYPE_UINT16:
890 case GLSL_TYPE_UINT64:
891 case GLSL_TYPE_INT64:
892 case GLSL_TYPE_BOOL:
893 load->value = constant->values[matrix_col];
894 break;
895 default:
896 unreachable("Invalid immediate type");
897 }
898
899 return load;
900 }
901
902 static nir_const_value
903 const_value_float(double d, unsigned bit_size)
904 {
905 nir_const_value v;
906 switch (bit_size) {
907 case 16: v.u16[0] = _mesa_float_to_half(d); break;
908 case 32: v.f32[0] = d; break;
909 case 64: v.f64[0] = d; break;
910 default:
911 unreachable("Invalid bit size");
912 }
913 return v;
914 }
915
916 static nir_const_value
917 const_value_int(int64_t i, unsigned bit_size)
918 {
919 nir_const_value v;
920 switch (bit_size) {
921 case 8: v.i8[0] = i; break;
922 case 16: v.i16[0] = i; break;
923 case 32: v.i32[0] = i; break;
924 case 64: v.i64[0] = i; break;
925 default:
926 unreachable("Invalid bit size");
927 }
928 return v;
929 }
930
931 nir_const_value
932 nir_alu_binop_identity(nir_op binop, unsigned bit_size)
933 {
934 const int64_t max_int = (1ull << (bit_size - 1)) - 1;
935 const int64_t min_int = -max_int - 1;
936 switch (binop) {
937 case nir_op_iadd:
938 return const_value_int(0, bit_size);
939 case nir_op_fadd:
940 return const_value_float(0, bit_size);
941 case nir_op_imul:
942 return const_value_int(1, bit_size);
943 case nir_op_fmul:
944 return const_value_float(1, bit_size);
945 case nir_op_imin:
946 return const_value_int(max_int, bit_size);
947 case nir_op_umin:
948 return const_value_int(~0ull, bit_size);
949 case nir_op_fmin:
950 return const_value_float(INFINITY, bit_size);
951 case nir_op_imax:
952 return const_value_int(min_int, bit_size);
953 case nir_op_umax:
954 return const_value_int(0, bit_size);
955 case nir_op_fmax:
956 return const_value_float(-INFINITY, bit_size);
957 case nir_op_iand:
958 return const_value_int(~0ull, bit_size);
959 case nir_op_ior:
960 return const_value_int(0, bit_size);
961 case nir_op_ixor:
962 return const_value_int(0, bit_size);
963 default:
964 unreachable("Invalid reduction operation");
965 }
966 }
967
968 nir_function_impl *
969 nir_cf_node_get_function(nir_cf_node *node)
970 {
971 while (node->type != nir_cf_node_function) {
972 node = node->parent;
973 }
974
975 return nir_cf_node_as_function(node);
976 }
977
978 /* Reduces a cursor by trying to convert everything to after and trying to
979 * go up to block granularity when possible.
980 */
981 static nir_cursor
982 reduce_cursor(nir_cursor cursor)
983 {
984 switch (cursor.option) {
985 case nir_cursor_before_block:
986 assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
987 nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
988 if (exec_list_is_empty(&cursor.block->instr_list)) {
989 /* Empty block. After is as good as before. */
990 cursor.option = nir_cursor_after_block;
991 }
992 return cursor;
993
994 case nir_cursor_after_block:
995 return cursor;
996
997 case nir_cursor_before_instr: {
998 nir_instr *prev_instr = nir_instr_prev(cursor.instr);
999 if (prev_instr) {
1000 /* Before this instruction is after the previous */
1001 cursor.instr = prev_instr;
1002 cursor.option = nir_cursor_after_instr;
1003 } else {
1004 /* No previous instruction. Switch to before block */
1005 cursor.block = cursor.instr->block;
1006 cursor.option = nir_cursor_before_block;
1007 }
1008 return reduce_cursor(cursor);
1009 }
1010
1011 case nir_cursor_after_instr:
1012 if (nir_instr_next(cursor.instr) == NULL) {
1013 /* This is the last instruction, switch to after block */
1014 cursor.option = nir_cursor_after_block;
1015 cursor.block = cursor.instr->block;
1016 }
1017 return cursor;
1018
1019 default:
1020 unreachable("Inavlid cursor option");
1021 }
1022 }
1023
1024 bool
1025 nir_cursors_equal(nir_cursor a, nir_cursor b)
1026 {
1027 /* Reduced cursors should be unique */
1028 a = reduce_cursor(a);
1029 b = reduce_cursor(b);
1030
1031 return a.block == b.block && a.option == b.option;
1032 }
1033
1034 static bool
1035 add_use_cb(nir_src *src, void *state)
1036 {
1037 nir_instr *instr = state;
1038
1039 src->parent_instr = instr;
1040 list_addtail(&src->use_link,
1041 src->is_ssa ? &src->ssa->uses : &src->reg.reg->uses);
1042
1043 return true;
1044 }
1045
1046 static bool
1047 add_ssa_def_cb(nir_ssa_def *def, void *state)
1048 {
1049 nir_instr *instr = state;
1050
1051 if (instr->block && def->index == UINT_MAX) {
1052 nir_function_impl *impl =
1053 nir_cf_node_get_function(&instr->block->cf_node);
1054
1055 def->index = impl->ssa_alloc++;
1056 }
1057
1058 return true;
1059 }
1060
1061 static bool
1062 add_reg_def_cb(nir_dest *dest, void *state)
1063 {
1064 nir_instr *instr = state;
1065
1066 if (!dest->is_ssa) {
1067 dest->reg.parent_instr = instr;
1068 list_addtail(&dest->reg.def_link, &dest->reg.reg->defs);
1069 }
1070
1071 return true;
1072 }
1073
1074 static void
1075 add_defs_uses(nir_instr *instr)
1076 {
1077 nir_foreach_src(instr, add_use_cb, instr);
1078 nir_foreach_dest(instr, add_reg_def_cb, instr);
1079 nir_foreach_ssa_def(instr, add_ssa_def_cb, instr);
1080 }
1081
1082 void
1083 nir_instr_insert(nir_cursor cursor, nir_instr *instr)
1084 {
1085 switch (cursor.option) {
1086 case nir_cursor_before_block:
1087 /* Only allow inserting jumps into empty blocks. */
1088 if (instr->type == nir_instr_type_jump)
1089 assert(exec_list_is_empty(&cursor.block->instr_list));
1090
1091 instr->block = cursor.block;
1092 add_defs_uses(instr);
1093 exec_list_push_head(&cursor.block->instr_list, &instr->node);
1094 break;
1095 case nir_cursor_after_block: {
1096 /* Inserting instructions after a jump is illegal. */
1097 nir_instr *last = nir_block_last_instr(cursor.block);
1098 assert(last == NULL || last->type != nir_instr_type_jump);
1099 (void) last;
1100
1101 instr->block = cursor.block;
1102 add_defs_uses(instr);
1103 exec_list_push_tail(&cursor.block->instr_list, &instr->node);
1104 break;
1105 }
1106 case nir_cursor_before_instr:
1107 assert(instr->type != nir_instr_type_jump);
1108 instr->block = cursor.instr->block;
1109 add_defs_uses(instr);
1110 exec_node_insert_node_before(&cursor.instr->node, &instr->node);
1111 break;
1112 case nir_cursor_after_instr:
1113 /* Inserting instructions after a jump is illegal. */
1114 assert(cursor.instr->type != nir_instr_type_jump);
1115
1116 /* Only allow inserting jumps at the end of the block. */
1117 if (instr->type == nir_instr_type_jump)
1118 assert(cursor.instr == nir_block_last_instr(cursor.instr->block));
1119
1120 instr->block = cursor.instr->block;
1121 add_defs_uses(instr);
1122 exec_node_insert_after(&cursor.instr->node, &instr->node);
1123 break;
1124 }
1125
1126 if (instr->type == nir_instr_type_jump)
1127 nir_handle_add_jump(instr->block);
1128 }
1129
1130 static bool
1131 src_is_valid(const nir_src *src)
1132 {
1133 return src->is_ssa ? (src->ssa != NULL) : (src->reg.reg != NULL);
1134 }
1135
1136 static bool
1137 remove_use_cb(nir_src *src, void *state)
1138 {
1139 (void) state;
1140
1141 if (src_is_valid(src))
1142 list_del(&src->use_link);
1143
1144 return true;
1145 }
1146
1147 static bool
1148 remove_def_cb(nir_dest *dest, void *state)
1149 {
1150 (void) state;
1151
1152 if (!dest->is_ssa)
1153 list_del(&dest->reg.def_link);
1154
1155 return true;
1156 }
1157
1158 static void
1159 remove_defs_uses(nir_instr *instr)
1160 {
1161 nir_foreach_dest(instr, remove_def_cb, instr);
1162 nir_foreach_src(instr, remove_use_cb, instr);
1163 }
1164
1165 void nir_instr_remove_v(nir_instr *instr)
1166 {
1167 remove_defs_uses(instr);
1168 exec_node_remove(&instr->node);
1169
1170 if (instr->type == nir_instr_type_jump) {
1171 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
1172 nir_handle_remove_jump(instr->block, jump_instr->type);
1173 }
1174 }
1175
1176 /*@}*/
1177
1178 void
1179 nir_index_local_regs(nir_function_impl *impl)
1180 {
1181 unsigned index = 0;
1182 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1183 reg->index = index++;
1184 }
1185 impl->reg_alloc = index;
1186 }
1187
1188 void
1189 nir_index_global_regs(nir_shader *shader)
1190 {
1191 unsigned index = 0;
1192 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1193 reg->index = index++;
1194 }
1195 shader->reg_alloc = index;
1196 }
1197
1198 static bool
1199 visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
1200 {
1201 return cb(&instr->dest.dest, state);
1202 }
1203
1204 static bool
1205 visit_intrinsic_dest(nir_intrinsic_instr *instr, nir_foreach_dest_cb cb,
1206 void *state)
1207 {
1208 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1209 return cb(&instr->dest, state);
1210
1211 return true;
1212 }
1213
1214 static bool
1215 visit_texture_dest(nir_tex_instr *instr, nir_foreach_dest_cb cb,
1216 void *state)
1217 {
1218 return cb(&instr->dest, state);
1219 }
1220
1221 static bool
1222 visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
1223 {
1224 return cb(&instr->dest, state);
1225 }
1226
1227 static bool
1228 visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
1229 nir_foreach_dest_cb cb, void *state)
1230 {
1231 nir_foreach_parallel_copy_entry(entry, instr) {
1232 if (!cb(&entry->dest, state))
1233 return false;
1234 }
1235
1236 return true;
1237 }
1238
1239 bool
1240 nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state)
1241 {
1242 switch (instr->type) {
1243 case nir_instr_type_alu:
1244 return visit_alu_dest(nir_instr_as_alu(instr), cb, state);
1245 case nir_instr_type_intrinsic:
1246 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
1247 case nir_instr_type_tex:
1248 return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
1249 case nir_instr_type_phi:
1250 return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
1251 case nir_instr_type_parallel_copy:
1252 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
1253 cb, state);
1254
1255 case nir_instr_type_load_const:
1256 case nir_instr_type_ssa_undef:
1257 case nir_instr_type_call:
1258 case nir_instr_type_jump:
1259 break;
1260
1261 default:
1262 unreachable("Invalid instruction type");
1263 break;
1264 }
1265
1266 return true;
1267 }
1268
1269 struct foreach_ssa_def_state {
1270 nir_foreach_ssa_def_cb cb;
1271 void *client_state;
1272 };
1273
1274 static inline bool
1275 nir_ssa_def_visitor(nir_dest *dest, void *void_state)
1276 {
1277 struct foreach_ssa_def_state *state = void_state;
1278
1279 if (dest->is_ssa)
1280 return state->cb(&dest->ssa, state->client_state);
1281 else
1282 return true;
1283 }
1284
1285 bool
1286 nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
1287 {
1288 switch (instr->type) {
1289 case nir_instr_type_alu:
1290 case nir_instr_type_tex:
1291 case nir_instr_type_intrinsic:
1292 case nir_instr_type_phi:
1293 case nir_instr_type_parallel_copy: {
1294 struct foreach_ssa_def_state foreach_state = {cb, state};
1295 return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
1296 }
1297
1298 case nir_instr_type_load_const:
1299 return cb(&nir_instr_as_load_const(instr)->def, state);
1300 case nir_instr_type_ssa_undef:
1301 return cb(&nir_instr_as_ssa_undef(instr)->def, state);
1302 case nir_instr_type_call:
1303 case nir_instr_type_jump:
1304 return true;
1305 default:
1306 unreachable("Invalid instruction type");
1307 }
1308 }
1309
1310 static bool
1311 visit_src(nir_src *src, nir_foreach_src_cb cb, void *state)
1312 {
1313 if (!cb(src, state))
1314 return false;
1315 if (!src->is_ssa && src->reg.indirect)
1316 return cb(src->reg.indirect, state);
1317 return true;
1318 }
1319
1320 static bool
1321 visit_deref_array_src(nir_deref_array *deref, nir_foreach_src_cb cb,
1322 void *state)
1323 {
1324 if (deref->deref_array_type == nir_deref_array_type_indirect)
1325 return visit_src(&deref->indirect, cb, state);
1326 return true;
1327 }
1328
1329 static bool
1330 visit_deref_src(nir_deref_var *deref, nir_foreach_src_cb cb, void *state)
1331 {
1332 nir_deref *cur = &deref->deref;
1333 while (cur != NULL) {
1334 if (cur->deref_type == nir_deref_type_array) {
1335 if (!visit_deref_array_src(nir_deref_as_array(cur), cb, state))
1336 return false;
1337 }
1338
1339 cur = cur->child;
1340 }
1341
1342 return true;
1343 }
1344
1345 static bool
1346 visit_alu_src(nir_alu_instr *instr, nir_foreach_src_cb cb, void *state)
1347 {
1348 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1349 if (!visit_src(&instr->src[i].src, cb, state))
1350 return false;
1351
1352 return true;
1353 }
1354
1355 static bool
1356 visit_tex_src(nir_tex_instr *instr, nir_foreach_src_cb cb, void *state)
1357 {
1358 for (unsigned i = 0; i < instr->num_srcs; i++) {
1359 if (!visit_src(&instr->src[i].src, cb, state))
1360 return false;
1361 }
1362
1363 if (instr->texture != NULL) {
1364 if (!visit_deref_src(instr->texture, cb, state))
1365 return false;
1366 }
1367
1368 if (instr->sampler != NULL) {
1369 if (!visit_deref_src(instr->sampler, cb, state))
1370 return false;
1371 }
1372
1373 return true;
1374 }
1375
1376 static bool
1377 visit_intrinsic_src(nir_intrinsic_instr *instr, nir_foreach_src_cb cb,
1378 void *state)
1379 {
1380 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
1381 for (unsigned i = 0; i < num_srcs; i++) {
1382 if (!visit_src(&instr->src[i], cb, state))
1383 return false;
1384 }
1385
1386 unsigned num_vars =
1387 nir_intrinsic_infos[instr->intrinsic].num_variables;
1388 for (unsigned i = 0; i < num_vars; i++) {
1389 if (!visit_deref_src(instr->variables[i], cb, state))
1390 return false;
1391 }
1392
1393 return true;
1394 }
1395
1396 static bool
1397 visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
1398 {
1399 nir_foreach_phi_src(src, instr) {
1400 if (!visit_src(&src->src, cb, state))
1401 return false;
1402 }
1403
1404 return true;
1405 }
1406
1407 static bool
1408 visit_parallel_copy_src(nir_parallel_copy_instr *instr,
1409 nir_foreach_src_cb cb, void *state)
1410 {
1411 nir_foreach_parallel_copy_entry(entry, instr) {
1412 if (!visit_src(&entry->src, cb, state))
1413 return false;
1414 }
1415
1416 return true;
1417 }
1418
1419 typedef struct {
1420 void *state;
1421 nir_foreach_src_cb cb;
1422 } visit_dest_indirect_state;
1423
1424 static bool
1425 visit_dest_indirect(nir_dest *dest, void *_state)
1426 {
1427 visit_dest_indirect_state *state = (visit_dest_indirect_state *) _state;
1428
1429 if (!dest->is_ssa && dest->reg.indirect)
1430 return state->cb(dest->reg.indirect, state->state);
1431
1432 return true;
1433 }
1434
1435 bool
1436 nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state)
1437 {
1438 switch (instr->type) {
1439 case nir_instr_type_alu:
1440 if (!visit_alu_src(nir_instr_as_alu(instr), cb, state))
1441 return false;
1442 break;
1443 case nir_instr_type_intrinsic:
1444 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr), cb, state))
1445 return false;
1446 break;
1447 case nir_instr_type_tex:
1448 if (!visit_tex_src(nir_instr_as_tex(instr), cb, state))
1449 return false;
1450 break;
1451 case nir_instr_type_call:
1452 /* Call instructions have no regular sources */
1453 break;
1454 case nir_instr_type_load_const:
1455 /* Constant load instructions have no regular sources */
1456 break;
1457 case nir_instr_type_phi:
1458 if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
1459 return false;
1460 break;
1461 case nir_instr_type_parallel_copy:
1462 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr),
1463 cb, state))
1464 return false;
1465 break;
1466 case nir_instr_type_jump:
1467 case nir_instr_type_ssa_undef:
1468 return true;
1469
1470 default:
1471 unreachable("Invalid instruction type");
1472 break;
1473 }
1474
1475 visit_dest_indirect_state dest_state;
1476 dest_state.state = state;
1477 dest_state.cb = cb;
1478 return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
1479 }
1480
1481 nir_const_value *
1482 nir_src_as_const_value(nir_src src)
1483 {
1484 if (!src.is_ssa)
1485 return NULL;
1486
1487 if (src.ssa->parent_instr->type != nir_instr_type_load_const)
1488 return NULL;
1489
1490 nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
1491
1492 return &load->value;
1493 }
1494
1495 /**
1496 * Returns true if the source is known to be dynamically uniform. Otherwise it
1497 * returns false which means it may or may not be dynamically uniform but it
1498 * can't be determined.
1499 */
1500 bool
1501 nir_src_is_dynamically_uniform(nir_src src)
1502 {
1503 if (!src.is_ssa)
1504 return false;
1505
1506 /* Constants are trivially dynamically uniform */
1507 if (src.ssa->parent_instr->type == nir_instr_type_load_const)
1508 return true;
1509
1510 /* As are uniform variables */
1511 if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
1512 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
1513
1514 if (intr->intrinsic == nir_intrinsic_load_uniform)
1515 return true;
1516 }
1517
1518 /* XXX: this could have many more tests, such as when a sampler function is
1519 * called with dynamically uniform arguments.
1520 */
1521 return false;
1522 }
1523
1524 static void
1525 src_remove_all_uses(nir_src *src)
1526 {
1527 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1528 if (!src_is_valid(src))
1529 continue;
1530
1531 list_del(&src->use_link);
1532 }
1533 }
1534
1535 static void
1536 src_add_all_uses(nir_src *src, nir_instr *parent_instr, nir_if *parent_if)
1537 {
1538 for (; src; src = src->is_ssa ? NULL : src->reg.indirect) {
1539 if (!src_is_valid(src))
1540 continue;
1541
1542 if (parent_instr) {
1543 src->parent_instr = parent_instr;
1544 if (src->is_ssa)
1545 list_addtail(&src->use_link, &src->ssa->uses);
1546 else
1547 list_addtail(&src->use_link, &src->reg.reg->uses);
1548 } else {
1549 assert(parent_if);
1550 src->parent_if = parent_if;
1551 if (src->is_ssa)
1552 list_addtail(&src->use_link, &src->ssa->if_uses);
1553 else
1554 list_addtail(&src->use_link, &src->reg.reg->if_uses);
1555 }
1556 }
1557 }
1558
1559 void
1560 nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src)
1561 {
1562 assert(!src_is_valid(src) || src->parent_instr == instr);
1563
1564 src_remove_all_uses(src);
1565 *src = new_src;
1566 src_add_all_uses(src, instr, NULL);
1567 }
1568
1569 void
1570 nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src)
1571 {
1572 assert(!src_is_valid(dest) || dest->parent_instr == dest_instr);
1573
1574 src_remove_all_uses(dest);
1575 src_remove_all_uses(src);
1576 *dest = *src;
1577 *src = NIR_SRC_INIT;
1578 src_add_all_uses(dest, dest_instr, NULL);
1579 }
1580
1581 void
1582 nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
1583 {
1584 nir_src *src = &if_stmt->condition;
1585 assert(!src_is_valid(src) || src->parent_if == if_stmt);
1586
1587 src_remove_all_uses(src);
1588 *src = new_src;
1589 src_add_all_uses(src, NULL, if_stmt);
1590 }
1591
1592 void
1593 nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
1594 {
1595 if (dest->is_ssa) {
1596 /* We can only overwrite an SSA destination if it has no uses. */
1597 assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
1598 } else {
1599 list_del(&dest->reg.def_link);
1600 if (dest->reg.indirect)
1601 src_remove_all_uses(dest->reg.indirect);
1602 }
1603
1604 /* We can't re-write with an SSA def */
1605 assert(!new_dest.is_ssa);
1606
1607 nir_dest_copy(dest, &new_dest, instr);
1608
1609 dest->reg.parent_instr = instr;
1610 list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs);
1611
1612 if (dest->reg.indirect)
1613 src_add_all_uses(dest->reg.indirect, instr, NULL);
1614 }
1615
1616 void
1617 nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
1618 nir_deref_var *new_deref)
1619 {
1620 if (*deref)
1621 visit_deref_src(*deref, remove_use_cb, NULL);
1622
1623 *deref = new_deref;
1624
1625 if (*deref)
1626 visit_deref_src(*deref, add_use_cb, instr);
1627 }
1628
1629 /* note: does *not* take ownership of 'name' */
1630 void
1631 nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
1632 unsigned num_components,
1633 unsigned bit_size, const char *name)
1634 {
1635 def->name = ralloc_strdup(instr, name);
1636 def->parent_instr = instr;
1637 list_inithead(&def->uses);
1638 list_inithead(&def->if_uses);
1639 def->num_components = num_components;
1640 def->bit_size = bit_size;
1641
1642 if (instr->block) {
1643 nir_function_impl *impl =
1644 nir_cf_node_get_function(&instr->block->cf_node);
1645
1646 def->index = impl->ssa_alloc++;
1647 } else {
1648 def->index = UINT_MAX;
1649 }
1650 }
1651
1652 /* note: does *not* take ownership of 'name' */
1653 void
1654 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
1655 unsigned num_components, unsigned bit_size,
1656 const char *name)
1657 {
1658 dest->is_ssa = true;
1659 nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size, name);
1660 }
1661
1662 void
1663 nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src)
1664 {
1665 assert(!new_src.is_ssa || def != new_src.ssa);
1666
1667 nir_foreach_use_safe(use_src, def)
1668 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1669
1670 nir_foreach_if_use_safe(use_src, def)
1671 nir_if_rewrite_condition(use_src->parent_if, new_src);
1672 }
1673
1674 static bool
1675 is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
1676 {
1677 assert(start->block == end->block);
1678
1679 if (between->block != start->block)
1680 return false;
1681
1682 /* Search backwards looking for "between" */
1683 while (start != end) {
1684 if (between == end)
1685 return true;
1686
1687 end = nir_instr_prev(end);
1688 assert(end);
1689 }
1690
1691 return false;
1692 }
1693
1694 /* Replaces all uses of the given SSA def with the given source but only if
1695 * the use comes after the after_me instruction. This can be useful if you
1696 * are emitting code to fix up the result of some instruction: you can freely
1697 * use the result in that code and then call rewrite_uses_after and pass the
1698 * last fixup instruction as after_me and it will replace all of the uses you
1699 * want without touching the fixup code.
1700 *
1701 * This function assumes that after_me is in the same block as
1702 * def->parent_instr and that after_me comes after def->parent_instr.
1703 */
1704 void
1705 nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
1706 nir_instr *after_me)
1707 {
1708 assert(!new_src.is_ssa || def != new_src.ssa);
1709
1710 nir_foreach_use_safe(use_src, def) {
1711 assert(use_src->parent_instr != def->parent_instr);
1712 /* Since def already dominates all of its uses, the only way a use can
1713 * not be dominated by after_me is if it is between def and after_me in
1714 * the instruction list.
1715 */
1716 if (!is_instr_between(def->parent_instr, after_me, use_src->parent_instr))
1717 nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
1718 }
1719
1720 nir_foreach_if_use_safe(use_src, def)
1721 nir_if_rewrite_condition(use_src->parent_if, new_src);
1722 }
1723
1724 uint8_t
1725 nir_ssa_def_components_read(const nir_ssa_def *def)
1726 {
1727 uint8_t read_mask = 0;
1728 nir_foreach_use(use, def) {
1729 if (use->parent_instr->type == nir_instr_type_alu) {
1730 nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
1731 nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
1732 int src_idx = alu_src - &alu->src[0];
1733 assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
1734
1735 for (unsigned c = 0; c < 4; c++) {
1736 if (!nir_alu_instr_channel_used(alu, src_idx, c))
1737 continue;
1738
1739 read_mask |= (1 << alu_src->swizzle[c]);
1740 }
1741 } else {
1742 return (1 << def->num_components) - 1;
1743 }
1744 }
1745
1746 return read_mask;
1747 }
1748
1749 nir_block *
1750 nir_block_cf_tree_next(nir_block *block)
1751 {
1752 if (block == NULL) {
1753 /* nir_foreach_block_safe() will call this function on a NULL block
1754 * after the last iteration, but it won't use the result so just return
1755 * NULL here.
1756 */
1757 return NULL;
1758 }
1759
1760 nir_cf_node *cf_next = nir_cf_node_next(&block->cf_node);
1761 if (cf_next)
1762 return nir_cf_node_cf_tree_first(cf_next);
1763
1764 nir_cf_node *parent = block->cf_node.parent;
1765
1766 switch (parent->type) {
1767 case nir_cf_node_if: {
1768 /* Are we at the end of the if? Go to the beginning of the else */
1769 nir_if *if_stmt = nir_cf_node_as_if(parent);
1770 if (block == nir_if_last_then_block(if_stmt))
1771 return nir_if_first_else_block(if_stmt);
1772
1773 assert(block == nir_if_last_else_block(if_stmt));
1774 /* fall through */
1775 }
1776
1777 case nir_cf_node_loop:
1778 return nir_cf_node_as_block(nir_cf_node_next(parent));
1779
1780 case nir_cf_node_function:
1781 return NULL;
1782
1783 default:
1784 unreachable("unknown cf node type");
1785 }
1786 }
1787
1788 nir_block *
1789 nir_block_cf_tree_prev(nir_block *block)
1790 {
1791 if (block == NULL) {
1792 /* do this for consistency with nir_block_cf_tree_next() */
1793 return NULL;
1794 }
1795
1796 nir_cf_node *cf_prev = nir_cf_node_prev(&block->cf_node);
1797 if (cf_prev)
1798 return nir_cf_node_cf_tree_last(cf_prev);
1799
1800 nir_cf_node *parent = block->cf_node.parent;
1801
1802 switch (parent->type) {
1803 case nir_cf_node_if: {
1804 /* Are we at the beginning of the else? Go to the end of the if */
1805 nir_if *if_stmt = nir_cf_node_as_if(parent);
1806 if (block == nir_if_first_else_block(if_stmt))
1807 return nir_if_last_then_block(if_stmt);
1808
1809 assert(block == nir_if_first_then_block(if_stmt));
1810 /* fall through */
1811 }
1812
1813 case nir_cf_node_loop:
1814 return nir_cf_node_as_block(nir_cf_node_prev(parent));
1815
1816 case nir_cf_node_function:
1817 return NULL;
1818
1819 default:
1820 unreachable("unknown cf node type");
1821 }
1822 }
1823
1824 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node)
1825 {
1826 switch (node->type) {
1827 case nir_cf_node_function: {
1828 nir_function_impl *impl = nir_cf_node_as_function(node);
1829 return nir_start_block(impl);
1830 }
1831
1832 case nir_cf_node_if: {
1833 nir_if *if_stmt = nir_cf_node_as_if(node);
1834 return nir_if_first_then_block(if_stmt);
1835 }
1836
1837 case nir_cf_node_loop: {
1838 nir_loop *loop = nir_cf_node_as_loop(node);
1839 return nir_loop_first_block(loop);
1840 }
1841
1842 case nir_cf_node_block: {
1843 return nir_cf_node_as_block(node);
1844 }
1845
1846 default:
1847 unreachable("unknown node type");
1848 }
1849 }
1850
1851 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node)
1852 {
1853 switch (node->type) {
1854 case nir_cf_node_function: {
1855 nir_function_impl *impl = nir_cf_node_as_function(node);
1856 return nir_impl_last_block(impl);
1857 }
1858
1859 case nir_cf_node_if: {
1860 nir_if *if_stmt = nir_cf_node_as_if(node);
1861 return nir_if_last_else_block(if_stmt);
1862 }
1863
1864 case nir_cf_node_loop: {
1865 nir_loop *loop = nir_cf_node_as_loop(node);
1866 return nir_loop_last_block(loop);
1867 }
1868
1869 case nir_cf_node_block: {
1870 return nir_cf_node_as_block(node);
1871 }
1872
1873 default:
1874 unreachable("unknown node type");
1875 }
1876 }
1877
1878 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
1879 {
1880 if (node->type == nir_cf_node_block)
1881 return nir_block_cf_tree_next(nir_cf_node_as_block(node));
1882 else if (node->type == nir_cf_node_function)
1883 return NULL;
1884 else
1885 return nir_cf_node_as_block(nir_cf_node_next(node));
1886 }
1887
1888 nir_if *
1889 nir_block_get_following_if(nir_block *block)
1890 {
1891 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1892 return NULL;
1893
1894 if (nir_cf_node_is_last(&block->cf_node))
1895 return NULL;
1896
1897 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1898
1899 if (next_node->type != nir_cf_node_if)
1900 return NULL;
1901
1902 return nir_cf_node_as_if(next_node);
1903 }
1904
1905 nir_loop *
1906 nir_block_get_following_loop(nir_block *block)
1907 {
1908 if (exec_node_is_tail_sentinel(&block->cf_node.node))
1909 return NULL;
1910
1911 if (nir_cf_node_is_last(&block->cf_node))
1912 return NULL;
1913
1914 nir_cf_node *next_node = nir_cf_node_next(&block->cf_node);
1915
1916 if (next_node->type != nir_cf_node_loop)
1917 return NULL;
1918
1919 return nir_cf_node_as_loop(next_node);
1920 }
1921
1922 void
1923 nir_index_blocks(nir_function_impl *impl)
1924 {
1925 unsigned index = 0;
1926
1927 if (impl->valid_metadata & nir_metadata_block_index)
1928 return;
1929
1930 nir_foreach_block(block, impl) {
1931 block->index = index++;
1932 }
1933
1934 impl->num_blocks = index;
1935 }
1936
1937 static bool
1938 index_ssa_def_cb(nir_ssa_def *def, void *state)
1939 {
1940 unsigned *index = (unsigned *) state;
1941 def->index = (*index)++;
1942
1943 return true;
1944 }
1945
1946 /**
1947 * The indices are applied top-to-bottom which has the very nice property
1948 * that, if A dominates B, then A->index <= B->index.
1949 */
1950 void
1951 nir_index_ssa_defs(nir_function_impl *impl)
1952 {
1953 unsigned index = 0;
1954
1955 nir_foreach_block(block, impl) {
1956 nir_foreach_instr(instr, block)
1957 nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
1958 }
1959
1960 impl->ssa_alloc = index;
1961 }
1962
1963 /**
1964 * The indices are applied top-to-bottom which has the very nice property
1965 * that, if A dominates B, then A->index <= B->index.
1966 */
1967 unsigned
1968 nir_index_instrs(nir_function_impl *impl)
1969 {
1970 unsigned index = 0;
1971
1972 nir_foreach_block(block, impl) {
1973 nir_foreach_instr(instr, block)
1974 instr->index = index++;
1975 }
1976
1977 return index;
1978 }
1979
1980 nir_intrinsic_op
1981 nir_intrinsic_from_system_value(gl_system_value val)
1982 {
1983 switch (val) {
1984 case SYSTEM_VALUE_VERTEX_ID:
1985 return nir_intrinsic_load_vertex_id;
1986 case SYSTEM_VALUE_INSTANCE_ID:
1987 return nir_intrinsic_load_instance_id;
1988 case SYSTEM_VALUE_DRAW_ID:
1989 return nir_intrinsic_load_draw_id;
1990 case SYSTEM_VALUE_BASE_INSTANCE:
1991 return nir_intrinsic_load_base_instance;
1992 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
1993 return nir_intrinsic_load_vertex_id_zero_base;
1994 case SYSTEM_VALUE_IS_INDEXED_DRAW:
1995 return nir_intrinsic_load_is_indexed_draw;
1996 case SYSTEM_VALUE_FIRST_VERTEX:
1997 return nir_intrinsic_load_first_vertex;
1998 case SYSTEM_VALUE_BASE_VERTEX:
1999 return nir_intrinsic_load_base_vertex;
2000 case SYSTEM_VALUE_INVOCATION_ID:
2001 return nir_intrinsic_load_invocation_id;
2002 case SYSTEM_VALUE_FRAG_COORD:
2003 return nir_intrinsic_load_frag_coord;
2004 case SYSTEM_VALUE_FRONT_FACE:
2005 return nir_intrinsic_load_front_face;
2006 case SYSTEM_VALUE_SAMPLE_ID:
2007 return nir_intrinsic_load_sample_id;
2008 case SYSTEM_VALUE_SAMPLE_POS:
2009 return nir_intrinsic_load_sample_pos;
2010 case SYSTEM_VALUE_SAMPLE_MASK_IN:
2011 return nir_intrinsic_load_sample_mask_in;
2012 case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
2013 return nir_intrinsic_load_local_invocation_id;
2014 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
2015 return nir_intrinsic_load_local_invocation_index;
2016 case SYSTEM_VALUE_WORK_GROUP_ID:
2017 return nir_intrinsic_load_work_group_id;
2018 case SYSTEM_VALUE_NUM_WORK_GROUPS:
2019 return nir_intrinsic_load_num_work_groups;
2020 case SYSTEM_VALUE_PRIMITIVE_ID:
2021 return nir_intrinsic_load_primitive_id;
2022 case SYSTEM_VALUE_TESS_COORD:
2023 return nir_intrinsic_load_tess_coord;
2024 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
2025 return nir_intrinsic_load_tess_level_outer;
2026 case SYSTEM_VALUE_TESS_LEVEL_INNER:
2027 return nir_intrinsic_load_tess_level_inner;
2028 case SYSTEM_VALUE_VERTICES_IN:
2029 return nir_intrinsic_load_patch_vertices_in;
2030 case SYSTEM_VALUE_HELPER_INVOCATION:
2031 return nir_intrinsic_load_helper_invocation;
2032 case SYSTEM_VALUE_VIEW_INDEX:
2033 return nir_intrinsic_load_view_index;
2034 case SYSTEM_VALUE_SUBGROUP_SIZE:
2035 return nir_intrinsic_load_subgroup_size;
2036 case SYSTEM_VALUE_SUBGROUP_INVOCATION:
2037 return nir_intrinsic_load_subgroup_invocation;
2038 case SYSTEM_VALUE_SUBGROUP_EQ_MASK:
2039 return nir_intrinsic_load_subgroup_eq_mask;
2040 case SYSTEM_VALUE_SUBGROUP_GE_MASK:
2041 return nir_intrinsic_load_subgroup_ge_mask;
2042 case SYSTEM_VALUE_SUBGROUP_GT_MASK:
2043 return nir_intrinsic_load_subgroup_gt_mask;
2044 case SYSTEM_VALUE_SUBGROUP_LE_MASK:
2045 return nir_intrinsic_load_subgroup_le_mask;
2046 case SYSTEM_VALUE_SUBGROUP_LT_MASK:
2047 return nir_intrinsic_load_subgroup_lt_mask;
2048 case SYSTEM_VALUE_NUM_SUBGROUPS:
2049 return nir_intrinsic_load_num_subgroups;
2050 case SYSTEM_VALUE_SUBGROUP_ID:
2051 return nir_intrinsic_load_subgroup_id;
2052 case SYSTEM_VALUE_LOCAL_GROUP_SIZE:
2053 return nir_intrinsic_load_local_group_size;
2054 default:
2055 unreachable("system value does not directly correspond to intrinsic");
2056 }
2057 }
2058
2059 gl_system_value
2060 nir_system_value_from_intrinsic(nir_intrinsic_op intrin)
2061 {
2062 switch (intrin) {
2063 case nir_intrinsic_load_vertex_id:
2064 return SYSTEM_VALUE_VERTEX_ID;
2065 case nir_intrinsic_load_instance_id:
2066 return SYSTEM_VALUE_INSTANCE_ID;
2067 case nir_intrinsic_load_draw_id:
2068 return SYSTEM_VALUE_DRAW_ID;
2069 case nir_intrinsic_load_base_instance:
2070 return SYSTEM_VALUE_BASE_INSTANCE;
2071 case nir_intrinsic_load_vertex_id_zero_base:
2072 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
2073 case nir_intrinsic_load_first_vertex:
2074 return SYSTEM_VALUE_FIRST_VERTEX;
2075 case nir_intrinsic_load_is_indexed_draw:
2076 return SYSTEM_VALUE_IS_INDEXED_DRAW;
2077 case nir_intrinsic_load_base_vertex:
2078 return SYSTEM_VALUE_BASE_VERTEX;
2079 case nir_intrinsic_load_invocation_id:
2080 return SYSTEM_VALUE_INVOCATION_ID;
2081 case nir_intrinsic_load_frag_coord:
2082 return SYSTEM_VALUE_FRAG_COORD;
2083 case nir_intrinsic_load_front_face:
2084 return SYSTEM_VALUE_FRONT_FACE;
2085 case nir_intrinsic_load_sample_id:
2086 return SYSTEM_VALUE_SAMPLE_ID;
2087 case nir_intrinsic_load_sample_pos:
2088 return SYSTEM_VALUE_SAMPLE_POS;
2089 case nir_intrinsic_load_sample_mask_in:
2090 return SYSTEM_VALUE_SAMPLE_MASK_IN;
2091 case nir_intrinsic_load_local_invocation_id:
2092 return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
2093 case nir_intrinsic_load_local_invocation_index:
2094 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
2095 case nir_intrinsic_load_num_work_groups:
2096 return SYSTEM_VALUE_NUM_WORK_GROUPS;
2097 case nir_intrinsic_load_work_group_id:
2098 return SYSTEM_VALUE_WORK_GROUP_ID;
2099 case nir_intrinsic_load_primitive_id:
2100 return SYSTEM_VALUE_PRIMITIVE_ID;
2101 case nir_intrinsic_load_tess_coord:
2102 return SYSTEM_VALUE_TESS_COORD;
2103 case nir_intrinsic_load_tess_level_outer:
2104 return SYSTEM_VALUE_TESS_LEVEL_OUTER;
2105 case nir_intrinsic_load_tess_level_inner:
2106 return SYSTEM_VALUE_TESS_LEVEL_INNER;
2107 case nir_intrinsic_load_patch_vertices_in:
2108 return SYSTEM_VALUE_VERTICES_IN;
2109 case nir_intrinsic_load_helper_invocation:
2110 return SYSTEM_VALUE_HELPER_INVOCATION;
2111 case nir_intrinsic_load_view_index:
2112 return SYSTEM_VALUE_VIEW_INDEX;
2113 case nir_intrinsic_load_subgroup_size:
2114 return SYSTEM_VALUE_SUBGROUP_SIZE;
2115 case nir_intrinsic_load_subgroup_invocation:
2116 return SYSTEM_VALUE_SUBGROUP_INVOCATION;
2117 case nir_intrinsic_load_subgroup_eq_mask:
2118 return SYSTEM_VALUE_SUBGROUP_EQ_MASK;
2119 case nir_intrinsic_load_subgroup_ge_mask:
2120 return SYSTEM_VALUE_SUBGROUP_GE_MASK;
2121 case nir_intrinsic_load_subgroup_gt_mask:
2122 return SYSTEM_VALUE_SUBGROUP_GT_MASK;
2123 case nir_intrinsic_load_subgroup_le_mask:
2124 return SYSTEM_VALUE_SUBGROUP_LE_MASK;
2125 case nir_intrinsic_load_subgroup_lt_mask:
2126 return SYSTEM_VALUE_SUBGROUP_LT_MASK;
2127 case nir_intrinsic_load_num_subgroups:
2128 return SYSTEM_VALUE_NUM_SUBGROUPS;
2129 case nir_intrinsic_load_subgroup_id:
2130 return SYSTEM_VALUE_SUBGROUP_ID;
2131 case nir_intrinsic_load_local_group_size:
2132 return SYSTEM_VALUE_LOCAL_GROUP_SIZE;
2133 default:
2134 unreachable("intrinsic doesn't produce a system value");
2135 }
2136 }