2 * Copyright (c) 2020 Etnaviv Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Jonathan Marek <jonathan@marek.ca>
27 #ifndef H_ETNAVIV_COMPILER_NIR
28 #define H_ETNAVIV_COMPILER_NIR
30 #include "compiler/nir/nir.h"
31 #include "etnaviv_asm.h"
32 #include "etnaviv_compiler.h"
33 #include "util/register_allocate.h"
37 nir_function_impl
*impl
;
38 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
39 const struct etna_specs
*specs
;
40 struct etna_shader_variant
*variant
;
42 /* block # to instr index */
46 int inst_ptr
; /* current instruction pointer */
47 struct etna_inst code
[ETNA_MAX_INSTRUCTIONS
* ETNA_INST_SIZE
];
50 uint64_t consts
[ETNA_MAX_IMM
];
59 /* There was an error during compilation */
63 #define compile_error(ctx, args...) ({ \
74 static inline bool is_sysval(nir_instr
*instr
)
76 if (instr
->type
!= nir_instr_type_intrinsic
)
79 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
80 return intr
->intrinsic
== nir_intrinsic_load_front_face
||
81 intr
->intrinsic
== nir_intrinsic_load_frag_coord
;
84 /* get unique ssa/reg index for nir_src */
85 static inline unsigned
86 src_index(nir_function_impl
*impl
, nir_src
*src
)
88 return src
->is_ssa
? src
->ssa
->index
: (src
->reg
.reg
->index
+ impl
->ssa_alloc
);
91 /* get unique ssa/reg index for nir_dest */
92 static inline unsigned
93 dest_index(nir_function_impl
*impl
, nir_dest
*dest
)
95 return dest
->is_ssa
? dest
->ssa
.index
: (dest
->reg
.reg
->index
+ impl
->ssa_alloc
);
99 update_swiz_mask(nir_alu_instr
*alu
, nir_dest
*dest
, unsigned *swiz
, unsigned *mask
)
104 bool is_vec
= dest
!= NULL
;
105 unsigned swizzle
= 0, write_mask
= 0;
106 for (unsigned i
= 0; i
< 4; i
++) {
107 /* channel not written */
108 if (!(alu
->dest
.write_mask
& (1 << i
)))
110 /* src is different (only check for vecN) */
111 if (is_vec
&& alu
->src
[i
].src
.ssa
!= &dest
->ssa
)
114 unsigned src_swiz
= is_vec
? alu
->src
[i
].swizzle
[0] : alu
->src
[0].swizzle
[i
];
115 swizzle
|= (*swiz
>> src_swiz
* 2 & 3) << i
* 2;
116 /* this channel isn't written through this chain */
117 if (*mask
& (1 << src_swiz
))
118 write_mask
|= 1 << i
;
125 real_dest(nir_dest
*dest
, unsigned *swiz
, unsigned *mask
)
127 if (!dest
|| !dest
->is_ssa
)
130 bool can_bypass_src
= !list_length(&dest
->ssa
.if_uses
);
131 nir_instr
*p_instr
= dest
->ssa
.parent_instr
;
133 /* if used by a vecN, the "real" destination becomes the vecN destination
134 * lower_alu guarantees that values used by a vecN are only used by that vecN
135 * we can apply the same logic to movs in a some cases too
137 nir_foreach_use(use_src
, &dest
->ssa
) {
138 nir_instr
*instr
= use_src
->parent_instr
;
140 /* src bypass check: for now only deal with tex src mov case
141 * note: for alu don't bypass mov for multiple uniform sources
143 switch (instr
->type
) {
144 case nir_instr_type_tex
:
145 if (p_instr
->type
== nir_instr_type_alu
&&
146 nir_instr_as_alu(p_instr
)->op
== nir_op_mov
) {
150 can_bypass_src
= false;
154 if (instr
->type
!= nir_instr_type_alu
)
157 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
163 assert(list_length(&dest
->ssa
.if_uses
) == 0);
164 nir_foreach_use(use_src
, &dest
->ssa
)
165 assert(use_src
->parent_instr
== instr
);
167 update_swiz_mask(alu
, dest
, swiz
, mask
);
170 switch (dest
->ssa
.parent_instr
->type
) {
171 case nir_instr_type_alu
:
172 case nir_instr_type_tex
:
177 if (list_length(&dest
->ssa
.if_uses
) || list_length(&dest
->ssa
.uses
) > 1)
180 update_swiz_mask(alu
, NULL
, swiz
, mask
);
187 assert(!(instr
->pass_flags
& BYPASS_SRC
));
188 instr
->pass_flags
|= BYPASS_DST
;
189 return real_dest(&alu
->dest
.dest
, swiz
, mask
);
192 if (can_bypass_src
&& !(p_instr
->pass_flags
& BYPASS_DST
)) {
193 p_instr
->pass_flags
|= BYPASS_SRC
;
200 /* if instruction dest needs a register, return nir_dest for it */
201 static inline nir_dest
*
202 dest_for_instr(nir_instr
*instr
)
204 nir_dest
*dest
= NULL
;
206 switch (instr
->type
) {
207 case nir_instr_type_alu
:
208 dest
= &nir_instr_as_alu(instr
)->dest
.dest
;
210 case nir_instr_type_tex
:
211 dest
= &nir_instr_as_tex(instr
)->dest
;
213 case nir_instr_type_intrinsic
: {
214 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
215 if (intr
->intrinsic
== nir_intrinsic_load_uniform
||
216 intr
->intrinsic
== nir_intrinsic_load_ubo
||
217 intr
->intrinsic
== nir_intrinsic_load_input
||
218 intr
->intrinsic
== nir_intrinsic_load_instance_id
)
221 case nir_instr_type_deref
:
226 return real_dest(dest
, NULL
, NULL
);
231 nir_dest
*dest
; /* cached dest_for_instr */
232 unsigned live_start
, live_end
; /* live range */
236 etna_live_defs(nir_function_impl
*impl
, struct live_def
*defs
, unsigned *live_map
);