lima/ppir: Add fddx and fddy
[mesa.git] / src / gallium / drivers / lima / ir / pp / nir.c
1 /*
2 * Copyright (c) 2017 Lima Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <string.h>
26
27 #include "util/ralloc.h"
28 #include "util/bitscan.h"
29 #include "compiler/nir/nir.h"
30 #include "pipe/p_state.h"
31
32
33 #include "ppir.h"
34
35 static void *ppir_node_create_ssa(ppir_block *block, ppir_op op, nir_ssa_def *ssa)
36 {
37 ppir_node *node = ppir_node_create(block, op, ssa->index, 0);
38 if (!node)
39 return NULL;
40
41 ppir_dest *dest = ppir_node_get_dest(node);
42 dest->type = ppir_target_ssa;
43 dest->ssa.num_components = ssa->num_components;
44 dest->ssa.live_in = INT_MAX;
45 dest->ssa.live_out = 0;
46 dest->write_mask = u_bit_consecutive(0, ssa->num_components);
47
48 if (node->type == ppir_node_type_load ||
49 node->type == ppir_node_type_store)
50 dest->ssa.is_head = true;
51
52 return node;
53 }
54
55 static void *ppir_node_create_reg(ppir_block *block, ppir_op op,
56 nir_reg_dest *reg, unsigned mask)
57 {
58 ppir_node *node = ppir_node_create(block, op, reg->reg->index, mask);
59 if (!node)
60 return NULL;
61
62 ppir_dest *dest = ppir_node_get_dest(node);
63
64 list_for_each_entry(ppir_reg, r, &block->comp->reg_list, list) {
65 if (r->index == reg->reg->index) {
66 dest->reg = r;
67 break;
68 }
69 }
70
71 dest->type = ppir_target_register;
72 dest->write_mask = mask;
73
74 if (node->type == ppir_node_type_load ||
75 node->type == ppir_node_type_store)
76 dest->reg->is_head = true;
77
78 return node;
79 }
80
81 static void *ppir_node_create_dest(ppir_block *block, ppir_op op,
82 nir_dest *dest, unsigned mask)
83 {
84 unsigned index = -1;
85
86 if (dest) {
87 if (dest->is_ssa)
88 return ppir_node_create_ssa(block, op, &dest->ssa);
89 else
90 return ppir_node_create_reg(block, op, &dest->reg, mask);
91 }
92
93 return ppir_node_create(block, op, index, 0);
94 }
95
96 static void ppir_node_add_src(ppir_compiler *comp, ppir_node *node,
97 ppir_src *ps, nir_src *ns, unsigned mask)
98 {
99 ppir_node *child = NULL;
100
101 if (ns->is_ssa) {
102 child = comp->var_nodes[ns->ssa->index];
103 ppir_node_add_dep(node, child);
104 }
105 else {
106 nir_register *reg = ns->reg.reg;
107 while (mask) {
108 int swizzle = ps->swizzle[u_bit_scan(&mask)];
109 child = comp->var_nodes[(reg->index << 2) + comp->reg_base + swizzle];
110 ppir_node_add_dep(node, child);
111 }
112 }
113
114 ppir_dest *dest = ppir_node_get_dest(child);
115 ppir_node_target_assign(ps, dest);
116 }
117
118 static int nir_to_ppir_opcodes[nir_num_opcodes] = {
119 /* not supported */
120 [0 ... nir_last_opcode] = -1,
121
122 [nir_op_mov] = ppir_op_mov,
123 [nir_op_fmul] = ppir_op_mul,
124 [nir_op_fabs] = ppir_op_abs,
125 [nir_op_fneg] = ppir_op_neg,
126 [nir_op_fadd] = ppir_op_add,
127 [nir_op_fsum3] = ppir_op_sum3,
128 [nir_op_fsum4] = ppir_op_sum4,
129 [nir_op_frsq] = ppir_op_rsqrt,
130 [nir_op_flog2] = ppir_op_log2,
131 [nir_op_fexp2] = ppir_op_exp2,
132 [nir_op_fsqrt] = ppir_op_sqrt,
133 [nir_op_fsin] = ppir_op_sin,
134 [nir_op_fcos] = ppir_op_cos,
135 [nir_op_fmax] = ppir_op_max,
136 [nir_op_fmin] = ppir_op_min,
137 [nir_op_frcp] = ppir_op_rcp,
138 [nir_op_ffloor] = ppir_op_floor,
139 [nir_op_fceil] = ppir_op_ceil,
140 [nir_op_ffract] = ppir_op_fract,
141 [nir_op_sge] = ppir_op_ge,
142 [nir_op_fge] = ppir_op_ge,
143 [nir_op_slt] = ppir_op_lt,
144 [nir_op_flt] = ppir_op_lt,
145 [nir_op_seq] = ppir_op_eq,
146 [nir_op_feq] = ppir_op_eq,
147 [nir_op_sne] = ppir_op_ne,
148 [nir_op_fne] = ppir_op_ne,
149 [nir_op_fcsel] = ppir_op_select,
150 [nir_op_inot] = ppir_op_not,
151 [nir_op_ftrunc] = ppir_op_trunc,
152 [nir_op_fsat] = ppir_op_sat,
153 [nir_op_fddx] = ppir_op_ddx,
154 [nir_op_fddy] = ppir_op_ddy,
155 };
156
157 static ppir_node *ppir_emit_alu(ppir_block *block, nir_instr *ni)
158 {
159 nir_alu_instr *instr = nir_instr_as_alu(ni);
160 int op = nir_to_ppir_opcodes[instr->op];
161
162 if (op < 0) {
163 ppir_error("unsupported nir_op: %s\n", nir_op_infos[instr->op].name);
164 return NULL;
165 }
166
167 ppir_alu_node *node = ppir_node_create_dest(block, op, &instr->dest.dest,
168 instr->dest.write_mask);
169 if (!node)
170 return NULL;
171
172 ppir_dest *pd = &node->dest;
173 nir_alu_dest *nd = &instr->dest;
174 if (nd->saturate)
175 pd->modifier = ppir_outmod_clamp_fraction;
176
177 unsigned src_mask;
178 switch (op) {
179 case ppir_op_sum3:
180 src_mask = 0b0111;
181 break;
182 case ppir_op_sum4:
183 src_mask = 0b1111;
184 break;
185 default:
186 src_mask = pd->write_mask;
187 break;
188 }
189
190 unsigned num_child = nir_op_infos[instr->op].num_inputs;
191 node->num_src = num_child;
192
193 for (int i = 0; i < num_child; i++) {
194 nir_alu_src *ns = instr->src + i;
195 ppir_src *ps = node->src + i;
196 memcpy(ps->swizzle, ns->swizzle, sizeof(ps->swizzle));
197 ppir_node_add_src(block->comp, &node->node, ps, &ns->src, src_mask);
198
199 ps->absolute = ns->abs;
200 ps->negate = ns->negate;
201 }
202
203 return &node->node;
204 }
205
206 static ppir_block *ppir_block_create(ppir_compiler *comp);
207
208 static bool ppir_emit_discard_block(ppir_compiler *comp)
209 {
210 ppir_block *block = ppir_block_create(comp);
211 ppir_discard_node *discard;
212 if (!block)
213 return false;
214
215 comp->discard_block = block;
216 block->comp = comp;
217
218 discard = ppir_node_create(block, ppir_op_discard, -1, 0);
219 if (discard)
220 list_addtail(&discard->node.list, &block->node_list);
221 else
222 return false;
223
224 return true;
225 }
226
227 static ppir_node *ppir_emit_discard_if(ppir_block *block, nir_instr *ni)
228 {
229 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(ni);
230 ppir_node *node;
231 ppir_compiler *comp = block->comp;
232 ppir_branch_node *branch;
233
234 if (!comp->discard_block && !ppir_emit_discard_block(comp))
235 return NULL;
236
237 node = ppir_node_create(block, ppir_op_branch, -1, 0);
238 if (!node)
239 return NULL;
240 branch = ppir_node_to_branch(node);
241
242 /* second src and condition will be updated during lowering */
243 ppir_node_add_src(block->comp, node, &branch->src[0],
244 &instr->src[0], u_bit_consecutive(0, instr->num_components));
245 branch->target = comp->discard_block;
246
247 return node;
248 }
249
250 static ppir_node *ppir_emit_discard(ppir_block *block, nir_instr *ni)
251 {
252 ppir_node *node = ppir_node_create(block, ppir_op_discard, -1, 0);
253
254 return node;
255 }
256
257 static ppir_node *ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
258 {
259 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(ni);
260 unsigned mask = 0;
261 ppir_load_node *lnode;
262 ppir_store_node *snode;
263
264 switch (instr->intrinsic) {
265 case nir_intrinsic_load_input:
266 if (!instr->dest.is_ssa)
267 mask = u_bit_consecutive(0, instr->num_components);
268
269 lnode = ppir_node_create_dest(block, ppir_op_load_varying, &instr->dest, mask);
270 if (!lnode)
271 return NULL;
272
273 lnode->num_components = instr->num_components;
274 lnode->index = nir_intrinsic_base(instr) * 4 + nir_intrinsic_component(instr);
275 return &lnode->node;
276
277 case nir_intrinsic_load_frag_coord:
278 case nir_intrinsic_load_point_coord:
279 case nir_intrinsic_load_front_face:
280 if (!instr->dest.is_ssa)
281 mask = u_bit_consecutive(0, instr->num_components);
282
283 ppir_op op;
284 switch (instr->intrinsic) {
285 case nir_intrinsic_load_frag_coord:
286 op = ppir_op_load_fragcoord;
287 break;
288 case nir_intrinsic_load_point_coord:
289 op = ppir_op_load_pointcoord;
290 break;
291 case nir_intrinsic_load_front_face:
292 op = ppir_op_load_frontface;
293 break;
294 default:
295 assert(0);
296 break;
297 }
298
299 lnode = ppir_node_create_dest(block, op, &instr->dest, mask);
300 if (!lnode)
301 return NULL;
302
303 lnode->num_components = instr->num_components;
304 return &lnode->node;
305
306 case nir_intrinsic_load_uniform:
307 if (!instr->dest.is_ssa)
308 mask = u_bit_consecutive(0, instr->num_components);
309
310 lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &instr->dest, mask);
311 if (!lnode)
312 return NULL;
313
314 lnode->num_components = instr->num_components;
315 lnode->index = nir_intrinsic_base(instr);
316 lnode->index += (uint32_t)nir_src_as_float(instr->src[0]);
317
318 return &lnode->node;
319
320 case nir_intrinsic_store_output:
321 snode = ppir_node_create_dest(block, ppir_op_store_color, NULL, 0);
322 if (!snode)
323 return NULL;
324
325 snode->index = nir_intrinsic_base(instr);
326
327 for (int i = 0; i < instr->num_components; i++)
328 snode->src.swizzle[i] = i;
329
330 ppir_node_add_src(block->comp, &snode->node, &snode->src, instr->src,
331 u_bit_consecutive(0, instr->num_components));
332
333 return &snode->node;
334
335 case nir_intrinsic_discard:
336 return ppir_emit_discard(block, ni);
337
338 case nir_intrinsic_discard_if:
339 return ppir_emit_discard_if(block, ni);
340
341 default:
342 ppir_error("unsupported nir_intrinsic_instr %s\n",
343 nir_intrinsic_infos[instr->intrinsic].name);
344 return NULL;
345 }
346 }
347
348 static ppir_node *ppir_emit_load_const(ppir_block *block, nir_instr *ni)
349 {
350 nir_load_const_instr *instr = nir_instr_as_load_const(ni);
351 ppir_const_node *node = ppir_node_create_ssa(block, ppir_op_const, &instr->def);
352 if (!node)
353 return NULL;
354
355 assert(instr->def.bit_size == 32);
356
357 for (int i = 0; i < instr->def.num_components; i++)
358 node->constant.value[i].i = instr->value[i].i32;
359 node->constant.num = instr->def.num_components;
360
361 return &node->node;
362 }
363
364 static ppir_node *ppir_emit_ssa_undef(ppir_block *block, nir_instr *ni)
365 {
366 ppir_error("nir_ssa_undef_instr not support\n");
367 return NULL;
368 }
369
370 static ppir_node *ppir_emit_tex(ppir_block *block, nir_instr *ni)
371 {
372 nir_tex_instr *instr = nir_instr_as_tex(ni);
373 ppir_load_texture_node *node;
374
375 if (instr->op != nir_texop_tex) {
376 ppir_error("unsupported texop %d\n", instr->op);
377 return NULL;
378 }
379
380 node = ppir_node_create_dest(block, ppir_op_load_texture, &instr->dest, 0);
381 if (!node)
382 return NULL;
383
384 node->sampler = instr->texture_index;
385
386 switch (instr->sampler_dim) {
387 case GLSL_SAMPLER_DIM_2D:
388 case GLSL_SAMPLER_DIM_RECT:
389 case GLSL_SAMPLER_DIM_EXTERNAL:
390 break;
391 default:
392 ppir_error("unsupported sampler dim: %d\n", instr->sampler_dim);
393 return NULL;
394 }
395
396 node->sampler_dim = instr->sampler_dim;
397
398 for (int i = 0; i < instr->coord_components; i++)
399 node->src_coords.swizzle[i] = i;
400
401 for (int i = 0; i < instr->num_srcs; i++) {
402 switch (instr->src[i].src_type) {
403 case nir_tex_src_coord:
404 ppir_node_add_src(block->comp, &node->node, &node->src_coords, &instr->src[i].src,
405 u_bit_consecutive(0, instr->coord_components));
406 break;
407 default:
408 ppir_error("unsupported texture source type\n");
409 assert(0);
410 return NULL;
411 }
412 }
413
414 return &node->node;
415 }
416
417 static ppir_node *ppir_emit_jump(ppir_block *block, nir_instr *ni)
418 {
419 ppir_error("nir_jump_instr not support\n");
420 return NULL;
421 }
422
423 static ppir_node *(*ppir_emit_instr[nir_instr_type_phi])(ppir_block *, nir_instr *) = {
424 [nir_instr_type_alu] = ppir_emit_alu,
425 [nir_instr_type_intrinsic] = ppir_emit_intrinsic,
426 [nir_instr_type_load_const] = ppir_emit_load_const,
427 [nir_instr_type_ssa_undef] = ppir_emit_ssa_undef,
428 [nir_instr_type_tex] = ppir_emit_tex,
429 [nir_instr_type_jump] = ppir_emit_jump,
430 };
431
432 static ppir_block *ppir_block_create(ppir_compiler *comp)
433 {
434 ppir_block *block = rzalloc(comp, ppir_block);
435 if (!block)
436 return NULL;
437
438 list_inithead(&block->node_list);
439 list_inithead(&block->instr_list);
440
441 return block;
442 }
443
444 static bool ppir_emit_block(ppir_compiler *comp, nir_block *nblock)
445 {
446 ppir_block *block = ppir_block_create(comp);
447 if (!block)
448 return false;
449
450 list_addtail(&block->list, &comp->block_list);
451 block->comp = comp;
452
453 nir_foreach_instr(instr, nblock) {
454 assert(instr->type < nir_instr_type_phi);
455 ppir_node *node = ppir_emit_instr[instr->type](block, instr);
456 if (!node)
457 return false;
458
459 list_addtail(&node->list, &block->node_list);
460 }
461
462 return true;
463 }
464
465 static bool ppir_emit_if(ppir_compiler *comp, nir_if *nif)
466 {
467 ppir_error("if nir_cf_node not support\n");
468 return false;
469 }
470
471 static bool ppir_emit_loop(ppir_compiler *comp, nir_loop *nloop)
472 {
473 ppir_error("loop nir_cf_node not support\n");
474 return false;
475 }
476
477 static bool ppir_emit_function(ppir_compiler *comp, nir_function_impl *nfunc)
478 {
479 ppir_error("function nir_cf_node not support\n");
480 return false;
481 }
482
483 static bool ppir_emit_cf_list(ppir_compiler *comp, struct exec_list *list)
484 {
485 foreach_list_typed(nir_cf_node, node, node, list) {
486 bool ret;
487
488 switch (node->type) {
489 case nir_cf_node_block:
490 ret = ppir_emit_block(comp, nir_cf_node_as_block(node));
491 break;
492 case nir_cf_node_if:
493 ret = ppir_emit_if(comp, nir_cf_node_as_if(node));
494 break;
495 case nir_cf_node_loop:
496 ret = ppir_emit_loop(comp, nir_cf_node_as_loop(node));
497 break;
498 case nir_cf_node_function:
499 ret = ppir_emit_function(comp, nir_cf_node_as_function(node));
500 break;
501 default:
502 ppir_error("unknown NIR node type %d\n", node->type);
503 return false;
504 }
505
506 if (!ret)
507 return false;
508 }
509
510 return true;
511 }
512
513 static ppir_compiler *ppir_compiler_create(void *prog, unsigned num_reg, unsigned num_ssa)
514 {
515 ppir_compiler *comp = rzalloc_size(
516 prog, sizeof(*comp) + ((num_reg << 2) + num_ssa) * sizeof(ppir_node *));
517 if (!comp)
518 return NULL;
519
520 list_inithead(&comp->block_list);
521 list_inithead(&comp->reg_list);
522
523 comp->var_nodes = (ppir_node **)(comp + 1);
524 comp->reg_base = num_ssa;
525 comp->prog = prog;
526 return comp;
527 }
528
529 static void ppir_add_ordering_deps(ppir_compiler *comp)
530 {
531 /* Some intrinsics do not have explicit dependencies and thus depend
532 * on instructions order. Consider discard_if and store_ouput as
533 * example. If we don't add fake dependency of discard_if to store_output
534 * scheduler may put store_output first and since store_output terminates
535 * shader on Utgard PP, rest of it will never be executed.
536 * Add fake dependencies for discard/branch/store to preserve
537 * instruction order.
538 *
539 * TODO: scheduler should schedule discard_if as early as possible otherwise
540 * we may end up with suboptimal code for cases like this:
541 *
542 * s3 = s1 < s2
543 * discard_if s3
544 * s4 = s1 + s2
545 * store s4
546 *
547 * In this case store depends on discard_if and s4, but since dependencies can
548 * be scheduled in any order it can result in code like this:
549 *
550 * instr1: s3 = s1 < s3
551 * instr2: s4 = s1 + s2
552 * instr3: discard_if s3
553 * instr4: store s4
554 */
555 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
556 ppir_node *prev_node = NULL;
557 list_for_each_entry(ppir_node, node, &block->node_list, list) {
558 if (node->type == ppir_node_type_discard ||
559 node->type == ppir_node_type_store ||
560 node->type == ppir_node_type_branch) {
561 if (prev_node)
562 ppir_node_add_dep(node, prev_node);
563 prev_node = node;
564 }
565 }
566 }
567 }
568
569 static void ppir_print_shader_db(struct nir_shader *nir, ppir_compiler *comp,
570 struct pipe_debug_callback *debug)
571 {
572 const struct shader_info *info = &nir->info;
573 char *shaderdb;
574 int ret = asprintf(&shaderdb,
575 "%s shader: %d inst, %d loops, %d:%d spills:fills\n",
576 gl_shader_stage_name(info->stage),
577 comp->cur_instr_index,
578 comp->num_loops,
579 comp->num_spills,
580 comp->num_fills);
581 assert(ret >= 0);
582
583 if (lima_debug & LIMA_DEBUG_SHADERDB)
584 fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
585
586 pipe_debug_message(debug, SHADER_INFO, "%s", shaderdb);
587 free(shaderdb);
588 }
589
590 bool ppir_compile_nir(struct lima_fs_shader_state *prog, struct nir_shader *nir,
591 struct ra_regs *ra,
592 struct pipe_debug_callback *debug)
593 {
594 nir_function_impl *func = nir_shader_get_entrypoint(nir);
595 ppir_compiler *comp = ppir_compiler_create(prog, func->reg_alloc, func->ssa_alloc);
596 if (!comp)
597 return false;
598
599 comp->ra = ra;
600
601 foreach_list_typed(nir_register, reg, node, &func->registers) {
602 ppir_reg *r = rzalloc(comp, ppir_reg);
603 if (!r)
604 return false;
605
606 r->index = reg->index;
607 r->num_components = reg->num_components;
608 r->live_in = INT_MAX;
609 r->live_out = 0;
610 r->is_head = false;
611 list_addtail(&r->list, &comp->reg_list);
612 }
613
614 if (!ppir_emit_cf_list(comp, &func->body))
615 goto err_out0;
616
617 /* If we have discard block add it to the very end */
618 if (comp->discard_block)
619 list_addtail(&comp->discard_block->list, &comp->block_list);
620
621 ppir_add_ordering_deps(comp);
622
623 ppir_node_print_prog(comp);
624
625 if (!ppir_lower_prog(comp))
626 goto err_out0;
627
628 if (!ppir_node_to_instr(comp))
629 goto err_out0;
630
631 if (!ppir_schedule_prog(comp))
632 goto err_out0;
633
634 if (!ppir_regalloc_prog(comp))
635 goto err_out0;
636
637 if (!ppir_codegen_prog(comp))
638 goto err_out0;
639
640 ppir_print_shader_db(nir, comp, debug);
641
642 ralloc_free(comp);
643 return true;
644
645 err_out0:
646 ralloc_free(comp);
647 return false;
648 }
649