2 * Copyright (c) 2017 Lima Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
27 #include "util/ralloc.h"
28 #include "util/bitscan.h"
29 #include "compiler/nir/nir.h"
30 #include "pipe/p_state.h"
35 static void *ppir_node_create_ssa(ppir_block
*block
, ppir_op op
, nir_ssa_def
*ssa
)
37 ppir_node
*node
= ppir_node_create(block
, op
, ssa
->index
, 0);
41 ppir_dest
*dest
= ppir_node_get_dest(node
);
42 dest
->type
= ppir_target_ssa
;
43 dest
->ssa
.num_components
= ssa
->num_components
;
44 dest
->ssa
.live_in
= INT_MAX
;
45 dest
->ssa
.live_out
= 0;
46 dest
->write_mask
= u_bit_consecutive(0, ssa
->num_components
);
48 if (node
->type
== ppir_node_type_load
||
49 node
->type
== ppir_node_type_store
)
50 dest
->ssa
.is_head
= true;
55 static void *ppir_node_create_reg(ppir_block
*block
, ppir_op op
,
56 nir_reg_dest
*reg
, unsigned mask
)
58 ppir_node
*node
= ppir_node_create(block
, op
, reg
->reg
->index
, mask
);
62 ppir_dest
*dest
= ppir_node_get_dest(node
);
64 list_for_each_entry(ppir_reg
, r
, &block
->comp
->reg_list
, list
) {
65 if (r
->index
== reg
->reg
->index
) {
71 dest
->type
= ppir_target_register
;
72 dest
->write_mask
= mask
;
74 if (node
->type
== ppir_node_type_load
||
75 node
->type
== ppir_node_type_store
)
76 dest
->reg
->is_head
= true;
81 static void *ppir_node_create_dest(ppir_block
*block
, ppir_op op
,
82 nir_dest
*dest
, unsigned mask
)
88 return ppir_node_create_ssa(block
, op
, &dest
->ssa
);
90 return ppir_node_create_reg(block
, op
, &dest
->reg
, mask
);
93 return ppir_node_create(block
, op
, index
, 0);
96 static void ppir_node_add_src(ppir_compiler
*comp
, ppir_node
*node
,
97 ppir_src
*ps
, nir_src
*ns
, unsigned mask
)
99 ppir_node
*child
= NULL
;
102 child
= comp
->var_nodes
[ns
->ssa
->index
];
103 ppir_node_add_dep(node
, child
);
106 nir_register
*reg
= ns
->reg
.reg
;
108 int swizzle
= ps
->swizzle
[u_bit_scan(&mask
)];
109 child
= comp
->var_nodes
[(reg
->index
<< 2) + comp
->reg_base
+ swizzle
];
110 ppir_node_add_dep(node
, child
);
114 ppir_dest
*dest
= ppir_node_get_dest(child
);
115 ppir_node_target_assign(ps
, dest
);
118 static int nir_to_ppir_opcodes
[nir_num_opcodes
] = {
120 [0 ... nir_last_opcode
] = -1,
122 [nir_op_mov
] = ppir_op_mov
,
123 [nir_op_fmul
] = ppir_op_mul
,
124 [nir_op_fabs
] = ppir_op_abs
,
125 [nir_op_fneg
] = ppir_op_neg
,
126 [nir_op_fadd
] = ppir_op_add
,
127 [nir_op_fsum3
] = ppir_op_sum3
,
128 [nir_op_fsum4
] = ppir_op_sum4
,
129 [nir_op_frsq
] = ppir_op_rsqrt
,
130 [nir_op_flog2
] = ppir_op_log2
,
131 [nir_op_fexp2
] = ppir_op_exp2
,
132 [nir_op_fsqrt
] = ppir_op_sqrt
,
133 [nir_op_fsin
] = ppir_op_sin
,
134 [nir_op_fcos
] = ppir_op_cos
,
135 [nir_op_fmax
] = ppir_op_max
,
136 [nir_op_fmin
] = ppir_op_min
,
137 [nir_op_frcp
] = ppir_op_rcp
,
138 [nir_op_ffloor
] = ppir_op_floor
,
139 [nir_op_fceil
] = ppir_op_ceil
,
140 [nir_op_ffract
] = ppir_op_fract
,
141 [nir_op_sge
] = ppir_op_ge
,
142 [nir_op_fge
] = ppir_op_ge
,
143 [nir_op_slt
] = ppir_op_lt
,
144 [nir_op_flt
] = ppir_op_lt
,
145 [nir_op_seq
] = ppir_op_eq
,
146 [nir_op_feq
] = ppir_op_eq
,
147 [nir_op_sne
] = ppir_op_ne
,
148 [nir_op_fne
] = ppir_op_ne
,
149 [nir_op_fcsel
] = ppir_op_select
,
150 [nir_op_inot
] = ppir_op_not
,
151 [nir_op_ftrunc
] = ppir_op_trunc
,
152 [nir_op_fsat
] = ppir_op_sat
,
153 [nir_op_fddx
] = ppir_op_ddx
,
154 [nir_op_fddy
] = ppir_op_ddy
,
157 static ppir_node
*ppir_emit_alu(ppir_block
*block
, nir_instr
*ni
)
159 nir_alu_instr
*instr
= nir_instr_as_alu(ni
);
160 int op
= nir_to_ppir_opcodes
[instr
->op
];
163 ppir_error("unsupported nir_op: %s\n", nir_op_infos
[instr
->op
].name
);
167 ppir_alu_node
*node
= ppir_node_create_dest(block
, op
, &instr
->dest
.dest
,
168 instr
->dest
.write_mask
);
172 ppir_dest
*pd
= &node
->dest
;
173 nir_alu_dest
*nd
= &instr
->dest
;
175 pd
->modifier
= ppir_outmod_clamp_fraction
;
186 src_mask
= pd
->write_mask
;
190 unsigned num_child
= nir_op_infos
[instr
->op
].num_inputs
;
191 node
->num_src
= num_child
;
193 for (int i
= 0; i
< num_child
; i
++) {
194 nir_alu_src
*ns
= instr
->src
+ i
;
195 ppir_src
*ps
= node
->src
+ i
;
196 memcpy(ps
->swizzle
, ns
->swizzle
, sizeof(ps
->swizzle
));
197 ppir_node_add_src(block
->comp
, &node
->node
, ps
, &ns
->src
, src_mask
);
199 ps
->absolute
= ns
->abs
;
200 ps
->negate
= ns
->negate
;
206 static ppir_block
*ppir_block_create(ppir_compiler
*comp
);
208 static bool ppir_emit_discard_block(ppir_compiler
*comp
)
210 ppir_block
*block
= ppir_block_create(comp
);
211 ppir_discard_node
*discard
;
215 comp
->discard_block
= block
;
218 discard
= ppir_node_create(block
, ppir_op_discard
, -1, 0);
220 list_addtail(&discard
->node
.list
, &block
->node_list
);
227 static ppir_node
*ppir_emit_discard_if(ppir_block
*block
, nir_instr
*ni
)
229 nir_intrinsic_instr
*instr
= nir_instr_as_intrinsic(ni
);
231 ppir_compiler
*comp
= block
->comp
;
232 ppir_branch_node
*branch
;
234 if (!comp
->discard_block
&& !ppir_emit_discard_block(comp
))
237 node
= ppir_node_create(block
, ppir_op_branch
, -1, 0);
240 branch
= ppir_node_to_branch(node
);
242 /* second src and condition will be updated during lowering */
243 ppir_node_add_src(block
->comp
, node
, &branch
->src
[0],
244 &instr
->src
[0], u_bit_consecutive(0, instr
->num_components
));
245 branch
->target
= comp
->discard_block
;
250 static ppir_node
*ppir_emit_discard(ppir_block
*block
, nir_instr
*ni
)
252 ppir_node
*node
= ppir_node_create(block
, ppir_op_discard
, -1, 0);
257 static ppir_node
*ppir_emit_intrinsic(ppir_block
*block
, nir_instr
*ni
)
259 nir_intrinsic_instr
*instr
= nir_instr_as_intrinsic(ni
);
261 ppir_load_node
*lnode
;
262 ppir_store_node
*snode
;
264 switch (instr
->intrinsic
) {
265 case nir_intrinsic_load_input
:
266 if (!instr
->dest
.is_ssa
)
267 mask
= u_bit_consecutive(0, instr
->num_components
);
269 lnode
= ppir_node_create_dest(block
, ppir_op_load_varying
, &instr
->dest
, mask
);
273 lnode
->num_components
= instr
->num_components
;
274 lnode
->index
= nir_intrinsic_base(instr
) * 4 + nir_intrinsic_component(instr
);
277 case nir_intrinsic_load_frag_coord
:
278 case nir_intrinsic_load_point_coord
:
279 case nir_intrinsic_load_front_face
:
280 if (!instr
->dest
.is_ssa
)
281 mask
= u_bit_consecutive(0, instr
->num_components
);
284 switch (instr
->intrinsic
) {
285 case nir_intrinsic_load_frag_coord
:
286 op
= ppir_op_load_fragcoord
;
288 case nir_intrinsic_load_point_coord
:
289 op
= ppir_op_load_pointcoord
;
291 case nir_intrinsic_load_front_face
:
292 op
= ppir_op_load_frontface
;
299 lnode
= ppir_node_create_dest(block
, op
, &instr
->dest
, mask
);
303 lnode
->num_components
= instr
->num_components
;
306 case nir_intrinsic_load_uniform
:
307 if (!instr
->dest
.is_ssa
)
308 mask
= u_bit_consecutive(0, instr
->num_components
);
310 lnode
= ppir_node_create_dest(block
, ppir_op_load_uniform
, &instr
->dest
, mask
);
314 lnode
->num_components
= instr
->num_components
;
315 lnode
->index
= nir_intrinsic_base(instr
);
316 lnode
->index
+= (uint32_t)nir_src_as_float(instr
->src
[0]);
320 case nir_intrinsic_store_output
:
321 snode
= ppir_node_create_dest(block
, ppir_op_store_color
, NULL
, 0);
325 snode
->index
= nir_intrinsic_base(instr
);
327 for (int i
= 0; i
< instr
->num_components
; i
++)
328 snode
->src
.swizzle
[i
] = i
;
330 ppir_node_add_src(block
->comp
, &snode
->node
, &snode
->src
, instr
->src
,
331 u_bit_consecutive(0, instr
->num_components
));
335 case nir_intrinsic_discard
:
336 return ppir_emit_discard(block
, ni
);
338 case nir_intrinsic_discard_if
:
339 return ppir_emit_discard_if(block
, ni
);
342 ppir_error("unsupported nir_intrinsic_instr %s\n",
343 nir_intrinsic_infos
[instr
->intrinsic
].name
);
348 static ppir_node
*ppir_emit_load_const(ppir_block
*block
, nir_instr
*ni
)
350 nir_load_const_instr
*instr
= nir_instr_as_load_const(ni
);
351 ppir_const_node
*node
= ppir_node_create_ssa(block
, ppir_op_const
, &instr
->def
);
355 assert(instr
->def
.bit_size
== 32);
357 for (int i
= 0; i
< instr
->def
.num_components
; i
++)
358 node
->constant
.value
[i
].i
= instr
->value
[i
].i32
;
359 node
->constant
.num
= instr
->def
.num_components
;
364 static ppir_node
*ppir_emit_ssa_undef(ppir_block
*block
, nir_instr
*ni
)
366 ppir_error("nir_ssa_undef_instr not support\n");
370 static ppir_node
*ppir_emit_tex(ppir_block
*block
, nir_instr
*ni
)
372 nir_tex_instr
*instr
= nir_instr_as_tex(ni
);
373 ppir_load_texture_node
*node
;
375 if (instr
->op
!= nir_texop_tex
) {
376 ppir_error("unsupported texop %d\n", instr
->op
);
380 node
= ppir_node_create_dest(block
, ppir_op_load_texture
, &instr
->dest
, 0);
384 node
->sampler
= instr
->texture_index
;
386 switch (instr
->sampler_dim
) {
387 case GLSL_SAMPLER_DIM_2D
:
388 case GLSL_SAMPLER_DIM_RECT
:
389 case GLSL_SAMPLER_DIM_EXTERNAL
:
392 ppir_error("unsupported sampler dim: %d\n", instr
->sampler_dim
);
396 node
->sampler_dim
= instr
->sampler_dim
;
398 for (int i
= 0; i
< instr
->coord_components
; i
++)
399 node
->src_coords
.swizzle
[i
] = i
;
401 for (int i
= 0; i
< instr
->num_srcs
; i
++) {
402 switch (instr
->src
[i
].src_type
) {
403 case nir_tex_src_coord
:
404 ppir_node_add_src(block
->comp
, &node
->node
, &node
->src_coords
, &instr
->src
[i
].src
,
405 u_bit_consecutive(0, instr
->coord_components
));
408 ppir_error("unsupported texture source type\n");
417 static ppir_node
*ppir_emit_jump(ppir_block
*block
, nir_instr
*ni
)
419 ppir_error("nir_jump_instr not support\n");
423 static ppir_node
*(*ppir_emit_instr
[nir_instr_type_phi
])(ppir_block
*, nir_instr
*) = {
424 [nir_instr_type_alu
] = ppir_emit_alu
,
425 [nir_instr_type_intrinsic
] = ppir_emit_intrinsic
,
426 [nir_instr_type_load_const
] = ppir_emit_load_const
,
427 [nir_instr_type_ssa_undef
] = ppir_emit_ssa_undef
,
428 [nir_instr_type_tex
] = ppir_emit_tex
,
429 [nir_instr_type_jump
] = ppir_emit_jump
,
432 static ppir_block
*ppir_block_create(ppir_compiler
*comp
)
434 ppir_block
*block
= rzalloc(comp
, ppir_block
);
438 list_inithead(&block
->node_list
);
439 list_inithead(&block
->instr_list
);
444 static bool ppir_emit_block(ppir_compiler
*comp
, nir_block
*nblock
)
446 ppir_block
*block
= ppir_block_create(comp
);
450 list_addtail(&block
->list
, &comp
->block_list
);
453 nir_foreach_instr(instr
, nblock
) {
454 assert(instr
->type
< nir_instr_type_phi
);
455 ppir_node
*node
= ppir_emit_instr
[instr
->type
](block
, instr
);
459 list_addtail(&node
->list
, &block
->node_list
);
465 static bool ppir_emit_if(ppir_compiler
*comp
, nir_if
*nif
)
467 ppir_error("if nir_cf_node not support\n");
471 static bool ppir_emit_loop(ppir_compiler
*comp
, nir_loop
*nloop
)
473 ppir_error("loop nir_cf_node not support\n");
477 static bool ppir_emit_function(ppir_compiler
*comp
, nir_function_impl
*nfunc
)
479 ppir_error("function nir_cf_node not support\n");
483 static bool ppir_emit_cf_list(ppir_compiler
*comp
, struct exec_list
*list
)
485 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
488 switch (node
->type
) {
489 case nir_cf_node_block
:
490 ret
= ppir_emit_block(comp
, nir_cf_node_as_block(node
));
493 ret
= ppir_emit_if(comp
, nir_cf_node_as_if(node
));
495 case nir_cf_node_loop
:
496 ret
= ppir_emit_loop(comp
, nir_cf_node_as_loop(node
));
498 case nir_cf_node_function
:
499 ret
= ppir_emit_function(comp
, nir_cf_node_as_function(node
));
502 ppir_error("unknown NIR node type %d\n", node
->type
);
513 static ppir_compiler
*ppir_compiler_create(void *prog
, unsigned num_reg
, unsigned num_ssa
)
515 ppir_compiler
*comp
= rzalloc_size(
516 prog
, sizeof(*comp
) + ((num_reg
<< 2) + num_ssa
) * sizeof(ppir_node
*));
520 list_inithead(&comp
->block_list
);
521 list_inithead(&comp
->reg_list
);
523 comp
->var_nodes
= (ppir_node
**)(comp
+ 1);
524 comp
->reg_base
= num_ssa
;
529 static void ppir_add_ordering_deps(ppir_compiler
*comp
)
531 /* Some intrinsics do not have explicit dependencies and thus depend
532 * on instructions order. Consider discard_if and store_ouput as
533 * example. If we don't add fake dependency of discard_if to store_output
534 * scheduler may put store_output first and since store_output terminates
535 * shader on Utgard PP, rest of it will never be executed.
536 * Add fake dependencies for discard/branch/store to preserve
539 * TODO: scheduler should schedule discard_if as early as possible otherwise
540 * we may end up with suboptimal code for cases like this:
547 * In this case store depends on discard_if and s4, but since dependencies can
548 * be scheduled in any order it can result in code like this:
550 * instr1: s3 = s1 < s3
551 * instr2: s4 = s1 + s2
552 * instr3: discard_if s3
555 list_for_each_entry(ppir_block
, block
, &comp
->block_list
, list
) {
556 ppir_node
*prev_node
= NULL
;
557 list_for_each_entry(ppir_node
, node
, &block
->node_list
, list
) {
558 if (node
->type
== ppir_node_type_discard
||
559 node
->type
== ppir_node_type_store
||
560 node
->type
== ppir_node_type_branch
) {
562 ppir_node_add_dep(node
, prev_node
);
569 static void ppir_print_shader_db(struct nir_shader
*nir
, ppir_compiler
*comp
,
570 struct pipe_debug_callback
*debug
)
572 const struct shader_info
*info
= &nir
->info
;
574 int ret
= asprintf(&shaderdb
,
575 "%s shader: %d inst, %d loops, %d:%d spills:fills\n",
576 gl_shader_stage_name(info
->stage
),
577 comp
->cur_instr_index
,
583 if (lima_debug
& LIMA_DEBUG_SHADERDB
)
584 fprintf(stderr
, "SHADER-DB: %s\n", shaderdb
);
586 pipe_debug_message(debug
, SHADER_INFO
, "%s", shaderdb
);
590 bool ppir_compile_nir(struct lima_fs_shader_state
*prog
, struct nir_shader
*nir
,
592 struct pipe_debug_callback
*debug
)
594 nir_function_impl
*func
= nir_shader_get_entrypoint(nir
);
595 ppir_compiler
*comp
= ppir_compiler_create(prog
, func
->reg_alloc
, func
->ssa_alloc
);
601 foreach_list_typed(nir_register
, reg
, node
, &func
->registers
) {
602 ppir_reg
*r
= rzalloc(comp
, ppir_reg
);
606 r
->index
= reg
->index
;
607 r
->num_components
= reg
->num_components
;
608 r
->live_in
= INT_MAX
;
611 list_addtail(&r
->list
, &comp
->reg_list
);
614 if (!ppir_emit_cf_list(comp
, &func
->body
))
617 /* If we have discard block add it to the very end */
618 if (comp
->discard_block
)
619 list_addtail(&comp
->discard_block
->list
, &comp
->block_list
);
621 ppir_add_ordering_deps(comp
);
623 ppir_node_print_prog(comp
);
625 if (!ppir_lower_prog(comp
))
628 if (!ppir_node_to_instr(comp
))
631 if (!ppir_schedule_prog(comp
))
634 if (!ppir_regalloc_prog(comp
))
637 if (!ppir_codegen_prog(comp
))
640 ppir_print_shader_db(nir
, comp
, debug
);