2 * Copyright (c) 2017 Lima Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 #include "util/bitscan.h"
26 #include "util/ralloc.h"
30 static bool ppir_lower_const(ppir_block
*block
, ppir_node
*node
)
32 if (ppir_node_is_root(node
)) {
33 ppir_node_delete(node
);
37 assert(ppir_node_has_single_succ(node
));
39 ppir_node
*succ
= ppir_node_first_succ(node
);
40 ppir_dest
*dest
= ppir_node_get_dest(node
);
43 case ppir_node_type_alu
:
44 case ppir_node_type_branch
:
45 /* ALU and branch can consume consts directly */
46 dest
->type
= ppir_target_pipeline
;
47 /* Reg will be updated in node_to_instr later */
48 dest
->pipeline
= ppir_pipeline_reg_const0
;
50 /* single succ can still have multiple references to this node */
51 for (int i
= 0; i
< ppir_node_get_src_num(succ
); i
++) {
52 ppir_src
*src
= ppir_node_get_src(succ
, i
);
53 if (src
&& src
->node
== node
) {
54 src
->type
= ppir_target_pipeline
;
55 src
->pipeline
= ppir_pipeline_reg_const0
;
60 /* Create a move for everyone else */
64 ppir_node
*move
= ppir_node_insert_mov(node
);
68 ppir_debug("lower const create move %d for %d\n",
69 move
->index
, node
->index
);
71 /* Need to be careful with changing src/dst type here:
72 * it has to be done *after* successors have their children
73 * replaced, otherwise ppir_node_replace_child() won't find
74 * matching src/dst and as result won't work
76 ppir_src
*mov_src
= ppir_node_get_src(move
, 0);
77 mov_src
->type
= dest
->type
= ppir_target_pipeline
;
78 mov_src
->pipeline
= dest
->pipeline
= ppir_pipeline_reg_const0
;
83 static bool ppir_lower_swap_args(ppir_block
*block
, ppir_node
*node
)
85 /* swapped op must be the next op */
88 assert(node
->type
== ppir_node_type_alu
);
89 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
90 assert(alu
->num_src
== 2);
92 ppir_src tmp
= alu
->src
[0];
93 alu
->src
[0] = alu
->src
[1];
98 static bool ppir_lower_load(ppir_block
*block
, ppir_node
*node
)
100 ppir_dest
*dest
= ppir_node_get_dest(node
);
101 if (ppir_node_is_root(node
) && dest
->type
== ppir_target_ssa
) {
102 ppir_node_delete(node
);
106 /* load can have multiple successors in case if we duplicated load node
107 * that has load node in source
109 if ((ppir_node_has_single_src_succ(node
) || ppir_node_is_root(node
)) &&
110 dest
->type
!= ppir_target_register
) {
111 ppir_node
*succ
= ppir_node_first_succ(node
);
112 switch (succ
->type
) {
113 case ppir_node_type_alu
:
114 case ppir_node_type_branch
: {
115 /* single succ can still have multiple references to this node */
116 for (int i
= 0; i
< ppir_node_get_src_num(succ
); i
++) {
117 ppir_src
*src
= ppir_node_get_src(succ
, i
);
118 if (src
&& src
->node
== node
) {
119 /* Can consume uniforms directly */
120 src
->type
= dest
->type
= ppir_target_pipeline
;
121 src
->pipeline
= dest
->pipeline
= ppir_pipeline_reg_uniform
;
127 /* Create mov for everyone else */
132 ppir_node
*move
= ppir_node_insert_mov(node
);
136 ppir_src
*mov_src
= ppir_node_get_src(move
, 0);
137 mov_src
->type
= dest
->type
= ppir_target_pipeline
;
138 mov_src
->pipeline
= dest
->pipeline
= ppir_pipeline_reg_uniform
;
143 static bool ppir_lower_ddxy(ppir_block
*block
, ppir_node
*node
)
145 assert(node
->type
== ppir_node_type_alu
);
146 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
148 alu
->src
[1] = alu
->src
[0];
149 if (node
->op
== ppir_op_ddx
)
150 alu
->src
[1].negate
= !alu
->src
[1].negate
;
151 else if (node
->op
== ppir_op_ddy
)
152 alu
->src
[0].negate
= !alu
->src
[0].negate
;
161 static bool ppir_lower_texture(ppir_block
*block
, ppir_node
*node
)
163 ppir_load_texture_node
*load_tex
= ppir_node_to_load_texture(node
);
164 ppir_dest
*dest
= ppir_node_get_dest(node
);
165 ppir_node
*src_coords
= ppir_node_get_src(node
, 0)->node
;
166 ppir_load_node
*load
= NULL
;
168 if (src_coords
&& ppir_node_has_single_src_succ(src_coords
) &&
169 (src_coords
->op
== ppir_op_load_coords
))
170 load
= ppir_node_to_load(src_coords
);
172 /* Create load_coords node */
173 load
= ppir_node_create(block
, ppir_op_load_coords_reg
, -1, 0);
176 list_addtail(&load
->node
.list
, &node
->list
);
178 load
->src
= load_tex
->src
[0];
180 if (load_tex
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
181 load
->num_components
= 3;
183 load
->num_components
= 2;
185 ppir_debug("%s create load_coords node %d for %d\n",
186 __FUNCTION__
, load
->node
.index
, node
->index
);
188 ppir_node_foreach_pred_safe(node
, dep
) {
189 ppir_node
*pred
= dep
->pred
;
190 ppir_node_remove_dep(dep
);
191 ppir_node_add_dep(&load
->node
, pred
, ppir_dep_src
);
193 ppir_node_add_dep(node
, &load
->node
, ppir_dep_src
);
197 load_tex
->src
[0].type
= load
->dest
.type
= ppir_target_pipeline
;
198 load_tex
->src
[0].pipeline
= load
->dest
.pipeline
= ppir_pipeline_reg_discard
;
200 /* Always create move node since there can be successors in other blocks */
201 ppir_node
*move
= ppir_node_insert_mov_all_blocks(node
);
205 ppir_debug("lower texture create move %d for %d\n",
206 move
->index
, node
->index
);
208 ppir_src
*mov_src
= ppir_node_get_src(move
, 0);
209 mov_src
->type
= dest
->type
= ppir_target_pipeline
;
210 mov_src
->pipeline
= dest
->pipeline
= ppir_pipeline_reg_sampler
;
215 /* insert a move as the select condition to make sure it can
216 * be inserted to select instr float mul slot
218 static bool ppir_lower_select(ppir_block
*block
, ppir_node
*node
)
220 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
222 ppir_node
*move
= ppir_node_create(block
, ppir_op_sel_cond
, -1, 0);
225 list_addtail(&move
->list
, &node
->list
);
227 ppir_alu_node
*move_alu
= ppir_node_to_alu(move
);
228 ppir_src
*move_src
= move_alu
->src
, *src
= alu
->src
;
229 move_src
->type
= src
->type
;
230 move_src
->ssa
= src
->ssa
;
231 move_src
->swizzle
[0] = src
->swizzle
[0];
232 move_alu
->num_src
= 1;
234 ppir_dest
*move_dest
= &move_alu
->dest
;
235 move_dest
->type
= ppir_target_pipeline
;
236 move_dest
->pipeline
= ppir_pipeline_reg_fmul
;
237 move_dest
->write_mask
= 1;
239 ppir_node
*pred
= alu
->src
[0].node
;
240 ppir_dep
*dep
= ppir_dep_for_pred(node
, pred
);
242 ppir_node_replace_pred(dep
, move
);
244 ppir_node_add_dep(node
, move
, ppir_dep_src
);
246 /* pred can be a register */
248 ppir_node_add_dep(move
, pred
, ppir_dep_src
);
251 ppir_node_target_assign(alu
->src
, move
);
256 static bool ppir_lower_trunc(ppir_block
*block
, ppir_node
*node
)
258 /* Turn it into a mov with a round to integer output modifier */
259 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
260 ppir_dest
*move_dest
= &alu
->dest
;
261 move_dest
->modifier
= ppir_outmod_round
;
262 node
->op
= ppir_op_mov
;
267 static bool ppir_lower_abs(ppir_block
*block
, ppir_node
*node
)
269 /* Turn it into a mov and set the absolute modifier */
270 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
272 assert(alu
->num_src
== 1);
274 alu
->src
[0].absolute
= true;
275 alu
->src
[0].negate
= false;
276 node
->op
= ppir_op_mov
;
281 static bool ppir_lower_neg(ppir_block
*block
, ppir_node
*node
)
283 /* Turn it into a mov and set the negate modifier */
284 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
286 assert(alu
->num_src
== 1);
288 alu
->src
[0].negate
= !alu
->src
[0].negate
;
289 node
->op
= ppir_op_mov
;
294 static bool ppir_lower_sat(ppir_block
*block
, ppir_node
*node
)
296 /* Turn it into a mov with the saturate output modifier */
297 ppir_alu_node
*alu
= ppir_node_to_alu(node
);
299 assert(alu
->num_src
== 1);
301 ppir_dest
*move_dest
= &alu
->dest
;
302 move_dest
->modifier
= ppir_outmod_clamp_fraction
;
303 node
->op
= ppir_op_mov
;
308 static bool ppir_lower_branch(ppir_block
*block
, ppir_node
*node
)
310 ppir_branch_node
*branch
= ppir_node_to_branch(node
);
312 /* Unconditional branch */
313 if (branch
->num_src
== 0)
316 ppir_const_node
*zero
= ppir_node_create(block
, ppir_op_const
, -1, 0);
321 zero
->constant
.value
[0].f
= 0;
322 zero
->constant
.num
= 1;
323 zero
->dest
.type
= ppir_target_pipeline
;
324 zero
->dest
.pipeline
= ppir_pipeline_reg_const0
;
325 zero
->dest
.ssa
.num_components
= 1;
326 zero
->dest
.write_mask
= 0x01;
328 /* For now we're just comparing branch condition with 0,
329 * in future we should look whether it's possible to move
330 * comparision node into branch itself and use current
331 * way as a fallback for complex conditions.
333 ppir_node_target_assign(&branch
->src
[1], &zero
->node
);
336 branch
->cond_eq
= true;
338 branch
->cond_gt
= true;
339 branch
->cond_lt
= true;
344 ppir_node_add_dep(&branch
->node
, &zero
->node
, ppir_dep_src
);
345 list_addtail(&zero
->node
.list
, &node
->list
);
350 static bool (*ppir_lower_funcs
[ppir_op_num
])(ppir_block
*, ppir_node
*) = {
351 [ppir_op_abs
] = ppir_lower_abs
,
352 [ppir_op_neg
] = ppir_lower_neg
,
353 [ppir_op_const
] = ppir_lower_const
,
354 [ppir_op_ddx
] = ppir_lower_ddxy
,
355 [ppir_op_ddy
] = ppir_lower_ddxy
,
356 [ppir_op_lt
] = ppir_lower_swap_args
,
357 [ppir_op_le
] = ppir_lower_swap_args
,
358 [ppir_op_load_texture
] = ppir_lower_texture
,
359 [ppir_op_select
] = ppir_lower_select
,
360 [ppir_op_trunc
] = ppir_lower_trunc
,
361 [ppir_op_sat
] = ppir_lower_sat
,
362 [ppir_op_branch
] = ppir_lower_branch
,
363 [ppir_op_load_uniform
] = ppir_lower_load
,
364 [ppir_op_load_temp
] = ppir_lower_load
,
367 bool ppir_lower_prog(ppir_compiler
*comp
)
369 list_for_each_entry(ppir_block
, block
, &comp
->block_list
, list
) {
370 list_for_each_entry_safe(ppir_node
, node
, &block
->node_list
, list
) {
371 if (ppir_lower_funcs
[node
->op
] &&
372 !ppir_lower_funcs
[node
->op
](block
, node
))