lima/ppir: add support for unconditional branches and condition negation
[mesa.git] / src / gallium / drivers / lima / ir / pp / nir.c
1 /*
2 * Copyright (c) 2017 Lima Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <string.h>
26
27 #include "util/ralloc.h"
28 #include "util/bitscan.h"
29 #include "compiler/nir/nir.h"
30 #include "pipe/p_state.h"
31
32
33 #include "ppir.h"
34
35 static void *ppir_node_create_ssa(ppir_block *block, ppir_op op, nir_ssa_def *ssa)
36 {
37 ppir_node *node = ppir_node_create(block, op, ssa->index, 0);
38 if (!node)
39 return NULL;
40
41 ppir_dest *dest = ppir_node_get_dest(node);
42 dest->type = ppir_target_ssa;
43 dest->ssa.num_components = ssa->num_components;
44 dest->ssa.live_in = INT_MAX;
45 dest->ssa.live_out = 0;
46 dest->write_mask = u_bit_consecutive(0, ssa->num_components);
47
48 if (node->type == ppir_node_type_load ||
49 node->type == ppir_node_type_store)
50 dest->ssa.is_head = true;
51
52 return node;
53 }
54
55 static void *ppir_node_create_reg(ppir_block *block, ppir_op op,
56 nir_reg_dest *reg, unsigned mask)
57 {
58 ppir_node *node = ppir_node_create(block, op, reg->reg->index, mask);
59 if (!node)
60 return NULL;
61
62 ppir_dest *dest = ppir_node_get_dest(node);
63
64 list_for_each_entry(ppir_reg, r, &block->comp->reg_list, list) {
65 if (r->index == reg->reg->index) {
66 dest->reg = r;
67 break;
68 }
69 }
70
71 dest->type = ppir_target_register;
72 dest->write_mask = mask;
73
74 if (node->type == ppir_node_type_load ||
75 node->type == ppir_node_type_store)
76 dest->reg->is_head = true;
77
78 return node;
79 }
80
81 static void *ppir_node_create_dest(ppir_block *block, ppir_op op,
82 nir_dest *dest, unsigned mask)
83 {
84 unsigned index = -1;
85
86 if (dest) {
87 if (dest->is_ssa)
88 return ppir_node_create_ssa(block, op, &dest->ssa);
89 else
90 return ppir_node_create_reg(block, op, &dest->reg, mask);
91 }
92
93 return ppir_node_create(block, op, index, 0);
94 }
95
96 static void ppir_node_add_src(ppir_compiler *comp, ppir_node *node,
97 ppir_src *ps, nir_src *ns, unsigned mask)
98 {
99 ppir_node *child = NULL;
100
101 if (ns->is_ssa) {
102 child = comp->var_nodes[ns->ssa->index];
103 /* Clone consts for each successor */
104 switch (child->op) {
105 case ppir_op_const:
106 child = ppir_node_clone(node->block, child);
107 break;
108 /* Clone uniforms and load textures for each block */
109 case ppir_op_load_texture:
110 case ppir_op_load_uniform:
111 case ppir_op_load_varying:
112 if (child->block != node->block) {
113 child = ppir_node_clone(node->block, child);
114 comp->var_nodes[ns->ssa->index] = child;
115 }
116 break;
117 default:
118 break;
119 }
120
121 ppir_node_add_dep(node, child);
122 }
123 else {
124 nir_register *reg = ns->reg.reg;
125 while (mask) {
126 int swizzle = ps->swizzle[u_bit_scan(&mask)];
127 child = comp->var_nodes[(reg->index << 2) + comp->reg_base + swizzle];
128 ppir_node_add_dep(node, child);
129 }
130 }
131
132 ppir_node_target_assign(ps, child);
133 }
134
135 static int nir_to_ppir_opcodes[nir_num_opcodes] = {
136 /* not supported */
137 [0 ... nir_last_opcode] = -1,
138
139 [nir_op_mov] = ppir_op_mov,
140 [nir_op_fmul] = ppir_op_mul,
141 [nir_op_fabs] = ppir_op_abs,
142 [nir_op_fneg] = ppir_op_neg,
143 [nir_op_fadd] = ppir_op_add,
144 [nir_op_fsum3] = ppir_op_sum3,
145 [nir_op_fsum4] = ppir_op_sum4,
146 [nir_op_frsq] = ppir_op_rsqrt,
147 [nir_op_flog2] = ppir_op_log2,
148 [nir_op_fexp2] = ppir_op_exp2,
149 [nir_op_fsqrt] = ppir_op_sqrt,
150 [nir_op_fsin] = ppir_op_sin,
151 [nir_op_fcos] = ppir_op_cos,
152 [nir_op_fmax] = ppir_op_max,
153 [nir_op_fmin] = ppir_op_min,
154 [nir_op_frcp] = ppir_op_rcp,
155 [nir_op_ffloor] = ppir_op_floor,
156 [nir_op_fceil] = ppir_op_ceil,
157 [nir_op_ffract] = ppir_op_fract,
158 [nir_op_sge] = ppir_op_ge,
159 [nir_op_fge] = ppir_op_ge,
160 [nir_op_slt] = ppir_op_lt,
161 [nir_op_flt] = ppir_op_lt,
162 [nir_op_seq] = ppir_op_eq,
163 [nir_op_feq] = ppir_op_eq,
164 [nir_op_sne] = ppir_op_ne,
165 [nir_op_fne] = ppir_op_ne,
166 [nir_op_fcsel] = ppir_op_select,
167 [nir_op_inot] = ppir_op_not,
168 [nir_op_ftrunc] = ppir_op_trunc,
169 [nir_op_fsat] = ppir_op_sat,
170 [nir_op_fddx] = ppir_op_ddx,
171 [nir_op_fddy] = ppir_op_ddy,
172 };
173
174 static ppir_node *ppir_emit_alu(ppir_block *block, nir_instr *ni)
175 {
176 nir_alu_instr *instr = nir_instr_as_alu(ni);
177 int op = nir_to_ppir_opcodes[instr->op];
178
179 if (op < 0) {
180 ppir_error("unsupported nir_op: %s\n", nir_op_infos[instr->op].name);
181 return NULL;
182 }
183
184 ppir_alu_node *node = ppir_node_create_dest(block, op, &instr->dest.dest,
185 instr->dest.write_mask);
186 if (!node)
187 return NULL;
188
189 ppir_dest *pd = &node->dest;
190 nir_alu_dest *nd = &instr->dest;
191 if (nd->saturate)
192 pd->modifier = ppir_outmod_clamp_fraction;
193
194 unsigned src_mask;
195 switch (op) {
196 case ppir_op_sum3:
197 src_mask = 0b0111;
198 break;
199 case ppir_op_sum4:
200 src_mask = 0b1111;
201 break;
202 default:
203 src_mask = pd->write_mask;
204 break;
205 }
206
207 unsigned num_child = nir_op_infos[instr->op].num_inputs;
208 node->num_src = num_child;
209
210 for (int i = 0; i < num_child; i++) {
211 nir_alu_src *ns = instr->src + i;
212 ppir_src *ps = node->src + i;
213 memcpy(ps->swizzle, ns->swizzle, sizeof(ps->swizzle));
214 ppir_node_add_src(block->comp, &node->node, ps, &ns->src, src_mask);
215
216 ps->absolute = ns->abs;
217 ps->negate = ns->negate;
218 }
219
220 return &node->node;
221 }
222
223 static ppir_block *ppir_block_create(ppir_compiler *comp);
224
225 static bool ppir_emit_discard_block(ppir_compiler *comp)
226 {
227 ppir_block *block = ppir_block_create(comp);
228 ppir_discard_node *discard;
229 if (!block)
230 return false;
231
232 comp->discard_block = block;
233 block->comp = comp;
234
235 discard = ppir_node_create(block, ppir_op_discard, -1, 0);
236 if (discard)
237 list_addtail(&discard->node.list, &block->node_list);
238 else
239 return false;
240
241 return true;
242 }
243
244 static ppir_node *ppir_emit_discard_if(ppir_block *block, nir_instr *ni)
245 {
246 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(ni);
247 ppir_node *node;
248 ppir_compiler *comp = block->comp;
249 ppir_branch_node *branch;
250
251 if (!comp->discard_block && !ppir_emit_discard_block(comp))
252 return NULL;
253
254 node = ppir_node_create(block, ppir_op_branch, -1, 0);
255 if (!node)
256 return NULL;
257 branch = ppir_node_to_branch(node);
258
259 /* second src and condition will be updated during lowering */
260 ppir_node_add_src(block->comp, node, &branch->src[0],
261 &instr->src[0], u_bit_consecutive(0, instr->num_components));
262 branch->num_src = 1;
263 branch->target = comp->discard_block;
264
265 return node;
266 }
267
268 static ppir_node *ppir_emit_discard(ppir_block *block, nir_instr *ni)
269 {
270 ppir_node *node = ppir_node_create(block, ppir_op_discard, -1, 0);
271
272 return node;
273 }
274
275 static ppir_node *ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
276 {
277 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(ni);
278 unsigned mask = 0;
279 ppir_load_node *lnode;
280 ppir_store_node *snode;
281
282 switch (instr->intrinsic) {
283 case nir_intrinsic_load_input:
284 if (!instr->dest.is_ssa)
285 mask = u_bit_consecutive(0, instr->num_components);
286
287 lnode = ppir_node_create_dest(block, ppir_op_load_varying, &instr->dest, mask);
288 if (!lnode)
289 return NULL;
290
291 lnode->num_components = instr->num_components;
292 lnode->index = nir_intrinsic_base(instr) * 4 + nir_intrinsic_component(instr);
293 return &lnode->node;
294
295 case nir_intrinsic_load_frag_coord:
296 case nir_intrinsic_load_point_coord:
297 case nir_intrinsic_load_front_face:
298 if (!instr->dest.is_ssa)
299 mask = u_bit_consecutive(0, instr->num_components);
300
301 ppir_op op;
302 switch (instr->intrinsic) {
303 case nir_intrinsic_load_frag_coord:
304 op = ppir_op_load_fragcoord;
305 break;
306 case nir_intrinsic_load_point_coord:
307 op = ppir_op_load_pointcoord;
308 break;
309 case nir_intrinsic_load_front_face:
310 op = ppir_op_load_frontface;
311 break;
312 default:
313 assert(0);
314 break;
315 }
316
317 lnode = ppir_node_create_dest(block, op, &instr->dest, mask);
318 if (!lnode)
319 return NULL;
320
321 lnode->num_components = instr->num_components;
322 return &lnode->node;
323
324 case nir_intrinsic_load_uniform:
325 if (!instr->dest.is_ssa)
326 mask = u_bit_consecutive(0, instr->num_components);
327
328 lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &instr->dest, mask);
329 if (!lnode)
330 return NULL;
331
332 lnode->num_components = instr->num_components;
333 lnode->index = nir_intrinsic_base(instr);
334 lnode->index += (uint32_t)nir_src_as_float(instr->src[0]);
335
336 return &lnode->node;
337
338 case nir_intrinsic_store_output:
339 snode = ppir_node_create_dest(block, ppir_op_store_color, NULL, 0);
340 if (!snode)
341 return NULL;
342
343 snode->index = nir_intrinsic_base(instr);
344
345 for (int i = 0; i < instr->num_components; i++)
346 snode->src.swizzle[i] = i;
347
348 ppir_node_add_src(block->comp, &snode->node, &snode->src, instr->src,
349 u_bit_consecutive(0, instr->num_components));
350
351 return &snode->node;
352
353 case nir_intrinsic_discard:
354 return ppir_emit_discard(block, ni);
355
356 case nir_intrinsic_discard_if:
357 return ppir_emit_discard_if(block, ni);
358
359 default:
360 ppir_error("unsupported nir_intrinsic_instr %s\n",
361 nir_intrinsic_infos[instr->intrinsic].name);
362 return NULL;
363 }
364 }
365
366 static ppir_node *ppir_emit_load_const(ppir_block *block, nir_instr *ni)
367 {
368 nir_load_const_instr *instr = nir_instr_as_load_const(ni);
369 ppir_const_node *node = ppir_node_create_ssa(block, ppir_op_const, &instr->def);
370 if (!node)
371 return NULL;
372
373 assert(instr->def.bit_size == 32);
374
375 for (int i = 0; i < instr->def.num_components; i++)
376 node->constant.value[i].i = instr->value[i].i32;
377 node->constant.num = instr->def.num_components;
378
379 return &node->node;
380 }
381
382 static ppir_node *ppir_emit_ssa_undef(ppir_block *block, nir_instr *ni)
383 {
384 ppir_error("nir_ssa_undef_instr not support\n");
385 return NULL;
386 }
387
388 static ppir_node *ppir_emit_tex(ppir_block *block, nir_instr *ni)
389 {
390 nir_tex_instr *instr = nir_instr_as_tex(ni);
391 ppir_load_texture_node *node;
392
393 if (instr->op != nir_texop_tex) {
394 ppir_error("unsupported texop %d\n", instr->op);
395 return NULL;
396 }
397
398 node = ppir_node_create_dest(block, ppir_op_load_texture, &instr->dest, 0);
399 if (!node)
400 return NULL;
401
402 node->sampler = instr->texture_index;
403
404 switch (instr->sampler_dim) {
405 case GLSL_SAMPLER_DIM_2D:
406 case GLSL_SAMPLER_DIM_RECT:
407 case GLSL_SAMPLER_DIM_EXTERNAL:
408 break;
409 default:
410 ppir_error("unsupported sampler dim: %d\n", instr->sampler_dim);
411 return NULL;
412 }
413
414 node->sampler_dim = instr->sampler_dim;
415
416 for (int i = 0; i < instr->coord_components; i++)
417 node->src_coords.swizzle[i] = i;
418
419 for (int i = 0; i < instr->num_srcs; i++) {
420 switch (instr->src[i].src_type) {
421 case nir_tex_src_coord:
422 ppir_node_add_src(block->comp, &node->node, &node->src_coords, &instr->src[i].src,
423 u_bit_consecutive(0, instr->coord_components));
424 break;
425 default:
426 ppir_error("unsupported texture source type\n");
427 assert(0);
428 return NULL;
429 }
430 }
431
432 return &node->node;
433 }
434
435 static ppir_node *ppir_emit_jump(ppir_block *block, nir_instr *ni)
436 {
437 ppir_error("nir_jump_instr not support\n");
438 return NULL;
439 }
440
441 static ppir_node *(*ppir_emit_instr[nir_instr_type_phi])(ppir_block *, nir_instr *) = {
442 [nir_instr_type_alu] = ppir_emit_alu,
443 [nir_instr_type_intrinsic] = ppir_emit_intrinsic,
444 [nir_instr_type_load_const] = ppir_emit_load_const,
445 [nir_instr_type_ssa_undef] = ppir_emit_ssa_undef,
446 [nir_instr_type_tex] = ppir_emit_tex,
447 [nir_instr_type_jump] = ppir_emit_jump,
448 };
449
450 static ppir_block *ppir_block_create(ppir_compiler *comp)
451 {
452 ppir_block *block = rzalloc(comp, ppir_block);
453 if (!block)
454 return NULL;
455
456 list_inithead(&block->node_list);
457 list_inithead(&block->instr_list);
458
459 return block;
460 }
461
462 static bool ppir_emit_block(ppir_compiler *comp, nir_block *nblock)
463 {
464 ppir_block *block = ppir_block_create(comp);
465 if (!block)
466 return false;
467
468 list_addtail(&block->list, &comp->block_list);
469 block->comp = comp;
470
471 nir_foreach_instr(instr, nblock) {
472 assert(instr->type < nir_instr_type_phi);
473 ppir_node *node = ppir_emit_instr[instr->type](block, instr);
474 if (!node)
475 return false;
476
477 list_addtail(&node->list, &block->node_list);
478 }
479
480 return true;
481 }
482
483 static bool ppir_emit_if(ppir_compiler *comp, nir_if *nif)
484 {
485 ppir_error("if nir_cf_node not support\n");
486 return false;
487 }
488
489 static bool ppir_emit_loop(ppir_compiler *comp, nir_loop *nloop)
490 {
491 ppir_error("loop nir_cf_node not support\n");
492 return false;
493 }
494
495 static bool ppir_emit_function(ppir_compiler *comp, nir_function_impl *nfunc)
496 {
497 ppir_error("function nir_cf_node not support\n");
498 return false;
499 }
500
501 static bool ppir_emit_cf_list(ppir_compiler *comp, struct exec_list *list)
502 {
503 foreach_list_typed(nir_cf_node, node, node, list) {
504 bool ret;
505
506 switch (node->type) {
507 case nir_cf_node_block:
508 ret = ppir_emit_block(comp, nir_cf_node_as_block(node));
509 break;
510 case nir_cf_node_if:
511 ret = ppir_emit_if(comp, nir_cf_node_as_if(node));
512 break;
513 case nir_cf_node_loop:
514 ret = ppir_emit_loop(comp, nir_cf_node_as_loop(node));
515 break;
516 case nir_cf_node_function:
517 ret = ppir_emit_function(comp, nir_cf_node_as_function(node));
518 break;
519 default:
520 ppir_error("unknown NIR node type %d\n", node->type);
521 return false;
522 }
523
524 if (!ret)
525 return false;
526 }
527
528 return true;
529 }
530
531 static ppir_compiler *ppir_compiler_create(void *prog, unsigned num_reg, unsigned num_ssa)
532 {
533 ppir_compiler *comp = rzalloc_size(
534 prog, sizeof(*comp) + ((num_reg << 2) + num_ssa) * sizeof(ppir_node *));
535 if (!comp)
536 return NULL;
537
538 list_inithead(&comp->block_list);
539 list_inithead(&comp->reg_list);
540
541 comp->var_nodes = (ppir_node **)(comp + 1);
542 comp->reg_base = num_ssa;
543 comp->prog = prog;
544 return comp;
545 }
546
547 static void ppir_add_ordering_deps(ppir_compiler *comp)
548 {
549 /* Some intrinsics do not have explicit dependencies and thus depend
550 * on instructions order. Consider discard_if and store_ouput as
551 * example. If we don't add fake dependency of discard_if to store_output
552 * scheduler may put store_output first and since store_output terminates
553 * shader on Utgard PP, rest of it will never be executed.
554 * Add fake dependencies for discard/branch/store to preserve
555 * instruction order.
556 *
557 * TODO: scheduler should schedule discard_if as early as possible otherwise
558 * we may end up with suboptimal code for cases like this:
559 *
560 * s3 = s1 < s2
561 * discard_if s3
562 * s4 = s1 + s2
563 * store s4
564 *
565 * In this case store depends on discard_if and s4, but since dependencies can
566 * be scheduled in any order it can result in code like this:
567 *
568 * instr1: s3 = s1 < s3
569 * instr2: s4 = s1 + s2
570 * instr3: discard_if s3
571 * instr4: store s4
572 */
573 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
574 ppir_node *prev_node = NULL;
575 list_for_each_entry(ppir_node, node, &block->node_list, list) {
576 if (node->type == ppir_node_type_discard ||
577 node->type == ppir_node_type_store ||
578 node->type == ppir_node_type_branch) {
579 if (prev_node)
580 ppir_node_add_dep(node, prev_node);
581 prev_node = node;
582 }
583 }
584 }
585 }
586
587 static void ppir_print_shader_db(struct nir_shader *nir, ppir_compiler *comp,
588 struct pipe_debug_callback *debug)
589 {
590 const struct shader_info *info = &nir->info;
591 char *shaderdb;
592 int ret = asprintf(&shaderdb,
593 "%s shader: %d inst, %d loops, %d:%d spills:fills\n",
594 gl_shader_stage_name(info->stage),
595 comp->cur_instr_index,
596 comp->num_loops,
597 comp->num_spills,
598 comp->num_fills);
599 assert(ret >= 0);
600
601 if (lima_debug & LIMA_DEBUG_SHADERDB)
602 fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
603
604 pipe_debug_message(debug, SHADER_INFO, "%s", shaderdb);
605 free(shaderdb);
606 }
607
608 bool ppir_compile_nir(struct lima_fs_shader_state *prog, struct nir_shader *nir,
609 struct ra_regs *ra,
610 struct pipe_debug_callback *debug)
611 {
612 nir_function_impl *func = nir_shader_get_entrypoint(nir);
613 ppir_compiler *comp = ppir_compiler_create(prog, func->reg_alloc, func->ssa_alloc);
614 if (!comp)
615 return false;
616
617 comp->ra = ra;
618
619 foreach_list_typed(nir_register, reg, node, &func->registers) {
620 ppir_reg *r = rzalloc(comp, ppir_reg);
621 if (!r)
622 return false;
623
624 r->index = reg->index;
625 r->num_components = reg->num_components;
626 r->live_in = INT_MAX;
627 r->live_out = 0;
628 r->is_head = false;
629 list_addtail(&r->list, &comp->reg_list);
630 }
631
632 if (!ppir_emit_cf_list(comp, &func->body))
633 goto err_out0;
634
635 /* If we have discard block add it to the very end */
636 if (comp->discard_block)
637 list_addtail(&comp->discard_block->list, &comp->block_list);
638
639 ppir_add_ordering_deps(comp);
640
641 ppir_node_print_prog(comp);
642
643 if (!ppir_lower_prog(comp))
644 goto err_out0;
645
646 ppir_node_print_prog(comp);
647
648 if (!ppir_node_to_instr(comp))
649 goto err_out0;
650
651 if (!ppir_schedule_prog(comp))
652 goto err_out0;
653
654 if (!ppir_regalloc_prog(comp))
655 goto err_out0;
656
657 if (!ppir_codegen_prog(comp))
658 goto err_out0;
659
660 ppir_print_shader_db(nir, comp, debug);
661
662 ralloc_free(comp);
663 return true;
664
665 err_out0:
666 ralloc_free(comp);
667 return false;
668 }
669