2 * Copyright (c) 2012-2015 Etnaviv Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Wladimir J. van der Laan <laanwj@gmail.com>
27 #include "etnaviv_shader.h"
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_context.h"
31 #include "etnaviv_debug.h"
32 #include "etnaviv_screen.h"
33 #include "etnaviv_util.h"
35 #include "tgsi/tgsi_parse.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
40 /* Upload shader code to bo, if not already done */
41 static bool etna_icache_upload_shader(struct etna_context
*ctx
, struct etna_shader_variant
*v
)
45 v
->bo
= etna_bo_new(ctx
->screen
->dev
, v
->code_size
*4, DRM_ETNA_GEM_CACHE_WC
);
49 void *buf
= etna_bo_map(v
->bo
);
50 etna_bo_cpu_prep(v
->bo
, DRM_ETNA_PREP_WRITE
);
51 memcpy(buf
, v
->code
, v
->code_size
*4);
52 etna_bo_cpu_fini(v
->bo
);
53 DBG("Uploaded %s of %u words to bo %p", v
->stage
== MESA_SHADER_FRAGMENT
? "fs":"vs", v
->code_size
, v
->bo
);
57 /* Link vs and fs together: fill in shader_state from vs and fs
58 * as this function is called every time a new fs or vs is bound, the goal is to
59 * do little processing as possible here, and to precompute as much as possible in
60 * the vs/fs shader_object.
62 * XXX we could cache the link result for a certain set of VS/PS; usually a pair
63 * of VS and PS will be used together anyway.
66 etna_link_shaders(struct etna_context
*ctx
, struct compiled_shader_state
*cs
,
67 struct etna_shader_variant
*vs
, struct etna_shader_variant
*fs
)
69 struct etna_shader_link_info link
= { };
72 assert(vs
->stage
== MESA_SHADER_VERTEX
);
73 assert(fs
->stage
== MESA_SHADER_FRAGMENT
);
76 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS
)) {
77 if (DBG_ENABLED(ETNA_DBG_NIR
)) {
78 etna_dump_shader_nir(vs
);
79 etna_dump_shader_nir(fs
);
87 if (DBG_ENABLED(ETNA_DBG_NIR
))
88 failed
= etna_link_shader_nir(&link
, vs
, fs
);
90 failed
= etna_link_shader(&link
, vs
, fs
);
93 /* linking failed: some fs inputs do not have corresponding
100 if (DBG_ENABLED(ETNA_DBG_LINKER_MSGS
)) {
101 debug_printf("link result:\n");
102 debug_printf(" vs -> fs comps use pa_attr\n");
104 for (int idx
= 0; idx
< link
.num_varyings
; ++idx
)
105 debug_printf(" t%-2u -> t%-2u %-5.*s %u,%u,%u,%u 0x%08x\n",
106 link
.varyings
[idx
].reg
, idx
+ 1,
107 link
.varyings
[idx
].num_components
, "xyzw",
108 link
.varyings
[idx
].use
[0], link
.varyings
[idx
].use
[1],
109 link
.varyings
[idx
].use
[2], link
.varyings
[idx
].use
[3],
110 link
.varyings
[idx
].pa_attributes
);
113 /* set last_varying_2x flag if the last varying has 1 or 2 components */
114 bool last_varying_2x
= false;
115 if (link
.num_varyings
> 0 && link
.varyings
[link
.num_varyings
- 1].num_components
<= 2)
116 last_varying_2x
= true;
118 cs
->RA_CONTROL
= VIVS_RA_CONTROL_UNK0
|
119 COND(last_varying_2x
, VIVS_RA_CONTROL_LAST_VARYING_2X
);
121 cs
->PA_ATTRIBUTE_ELEMENT_COUNT
= VIVS_PA_ATTRIBUTE_ELEMENT_COUNT_COUNT(link
.num_varyings
);
122 for (int idx
= 0; idx
< link
.num_varyings
; ++idx
)
123 cs
->PA_SHADER_ATTRIBUTES
[idx
] = link
.varyings
[idx
].pa_attributes
;
125 cs
->VS_END_PC
= vs
->code_size
/ 4;
126 cs
->VS_OUTPUT_COUNT
= 1 + link
.num_varyings
; /* position + varyings */
128 /* vs outputs (varyings) */
129 DEFINE_ETNA_BITARRAY(vs_output
, 16, 8) = {0};
131 etna_bitarray_set(vs_output
, 8, varid
++, vs
->vs_pos_out_reg
);
132 for (int idx
= 0; idx
< link
.num_varyings
; ++idx
)
133 etna_bitarray_set(vs_output
, 8, varid
++, link
.varyings
[idx
].reg
);
134 if (vs
->vs_pointsize_out_reg
>= 0)
135 etna_bitarray_set(vs_output
, 8, varid
++, vs
->vs_pointsize_out_reg
); /* pointsize is last */
137 for (int idx
= 0; idx
< ARRAY_SIZE(cs
->VS_OUTPUT
); ++idx
)
138 cs
->VS_OUTPUT
[idx
] = vs_output
[idx
];
140 if (vs
->vs_pointsize_out_reg
!= -1) {
141 /* vertex shader outputs point coordinate, provide extra output and make
145 cs
->VS_OUTPUT_COUNT_PSIZE
= cs
->VS_OUTPUT_COUNT
+ 1;
147 /* vertex shader does not output point coordinate, make sure thate
148 * POINT_SIZE_ENABLE is masked
149 * and no extra output is given */
150 cs
->PA_CONFIG
= ~VIVS_PA_CONFIG_POINT_SIZE_ENABLE
;
151 cs
->VS_OUTPUT_COUNT_PSIZE
= cs
->VS_OUTPUT_COUNT
;
154 /* if fragment shader doesn't read pointcoord, disable it */
155 if (link
.pcoord_varying_comp_ofs
== -1)
156 cs
->PA_CONFIG
&= ~VIVS_PA_CONFIG_POINT_SPRITE_ENABLE
;
158 cs
->VS_LOAD_BALANCING
= vs
->vs_load_balancing
;
161 cs
->PS_END_PC
= fs
->code_size
/ 4;
162 cs
->PS_OUTPUT_REG
= fs
->ps_color_out_reg
;
164 VIVS_PS_INPUT_COUNT_COUNT(link
.num_varyings
+ 1) | /* Number of inputs plus position */
165 VIVS_PS_INPUT_COUNT_UNK8(fs
->input_count_unk8
);
166 cs
->PS_TEMP_REGISTER_CONTROL
=
167 VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs
->num_temps
, link
.num_varyings
+ 1));
170 /* Precompute PS_INPUT_COUNT and TEMP_REGISTER_CONTROL in the case of MSAA
171 * mode, avoids some fumbling in sync_context. */
172 cs
->PS_INPUT_COUNT_MSAA
=
173 VIVS_PS_INPUT_COUNT_COUNT(link
.num_varyings
+ 2) | /* MSAA adds another input */
174 VIVS_PS_INPUT_COUNT_UNK8(fs
->input_count_unk8
);
175 cs
->PS_TEMP_REGISTER_CONTROL_MSAA
=
176 VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs
->num_temps
, link
.num_varyings
+ 2));
178 uint32_t total_components
= 0;
179 DEFINE_ETNA_BITARRAY(num_components
, ETNA_NUM_VARYINGS
, 4) = {0};
180 DEFINE_ETNA_BITARRAY(component_use
, 4 * ETNA_NUM_VARYINGS
, 2) = {0};
181 for (int idx
= 0; idx
< link
.num_varyings
; ++idx
) {
182 const struct etna_varying
*varying
= &link
.varyings
[idx
];
184 etna_bitarray_set(num_components
, 4, idx
, varying
->num_components
);
185 for (int comp
= 0; comp
< varying
->num_components
; ++comp
) {
186 etna_bitarray_set(component_use
, 2, total_components
, varying
->use
[comp
]);
187 total_components
+= 1;
191 cs
->GL_VARYING_TOTAL_COMPONENTS
=
192 VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(align(total_components
, 2));
193 cs
->GL_VARYING_NUM_COMPONENTS
= num_components
[0];
194 cs
->GL_VARYING_COMPONENT_USE
[0] = component_use
[0];
195 cs
->GL_VARYING_COMPONENT_USE
[1] = component_use
[1];
197 cs
->GL_HALTI5_SH_SPECIALS
=
198 0x7f7f0000 | /* unknown bits, probably other PS inputs */
199 /* pointsize is last (see above) */
200 VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT((vs
->vs_pointsize_out_reg
!= -1) ?
201 cs
->VS_OUTPUT_COUNT
* 4 : 0x00) |
202 VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN((link
.pcoord_varying_comp_ofs
!= -1) ?
203 link
.pcoord_varying_comp_ofs
: 0x7f);
205 /* mask out early Z bit when frag depth is written */
206 cs
->PE_DEPTH_CONFIG
= ~COND(fs
->ps_depth_out_reg
>= 0, VIVS_PE_DEPTH_CONFIG_EARLY_Z
);
208 /* reference instruction memory */
209 cs
->vs_inst_mem_size
= vs
->code_size
;
210 cs
->VS_INST_MEM
= vs
->code
;
212 cs
->ps_inst_mem_size
= fs
->code_size
;
213 cs
->PS_INST_MEM
= fs
->code
;
215 if (vs
->needs_icache
|| fs
->needs_icache
) {
216 /* If either of the shaders needs ICACHE, we use it for both. It is
217 * either switched on or off for the entire shader processor.
219 if (!etna_icache_upload_shader(ctx
, vs
) ||
220 !etna_icache_upload_shader(ctx
, fs
)) {
225 cs
->VS_INST_ADDR
.bo
= vs
->bo
;
226 cs
->VS_INST_ADDR
.offset
= 0;
227 cs
->VS_INST_ADDR
.flags
= ETNA_RELOC_READ
;
228 cs
->PS_INST_ADDR
.bo
= fs
->bo
;
229 cs
->PS_INST_ADDR
.offset
= 0;
230 cs
->PS_INST_ADDR
.flags
= ETNA_RELOC_READ
;
233 memset(&cs
->VS_INST_ADDR
, 0, sizeof(cs
->VS_INST_ADDR
));
234 memset(&cs
->PS_INST_ADDR
, 0, sizeof(cs
->PS_INST_ADDR
));
241 etna_shader_link(struct etna_context
*ctx
)
243 if (!ctx
->shader
.vs
|| !ctx
->shader
.fs
)
246 /* re-link vs and fs if needed */
247 return etna_link_shaders(ctx
, &ctx
->shader_state
, ctx
->shader
.vs
, ctx
->shader
.fs
);
251 etna_shader_update_vs_inputs(struct compiled_shader_state
*cs
,
252 const struct etna_shader_variant
*vs
,
253 const struct compiled_vertex_elements_state
*ves
)
255 unsigned num_temps
, cur_temp
, num_vs_inputs
;
260 /* Number of vertex elements determines number of VS inputs. Otherwise,
261 * the GPU crashes. Allocate any unused vertex elements to VS temporary
263 num_vs_inputs
= MAX2(ves
->num_elements
, vs
->infile
.num_reg
);
264 if (num_vs_inputs
!= ves
->num_elements
) {
265 BUG("Number of elements %u does not match the number of VS inputs %zu",
266 ves
->num_elements
, vs
->infile
.num_reg
);
270 cur_temp
= vs
->num_temps
;
271 num_temps
= num_vs_inputs
- vs
->infile
.num_reg
+ cur_temp
;
273 cs
->VS_INPUT_COUNT
= VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs
) |
274 VIVS_VS_INPUT_COUNT_UNK8(vs
->input_count_unk8
);
275 cs
->VS_TEMP_REGISTER_CONTROL
=
276 VIVS_VS_TEMP_REGISTER_CONTROL_NUM_TEMPS(num_temps
);
278 /* vs inputs (attributes) */
279 DEFINE_ETNA_BITARRAY(vs_input
, 16, 8) = {0};
280 for (int idx
= 0; idx
< num_vs_inputs
; ++idx
) {
281 if (idx
< vs
->infile
.num_reg
)
282 etna_bitarray_set(vs_input
, 8, idx
, vs
->infile
.reg
[idx
].reg
);
284 etna_bitarray_set(vs_input
, 8, idx
, cur_temp
++);
287 if (vs
->vs_id_in_reg
>= 0) {
288 cs
->VS_INPUT_COUNT
= VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs
+ 1) |
289 VIVS_VS_INPUT_COUNT_UNK8(vs
->input_count_unk8
) |
290 VIVS_VS_INPUT_COUNT_ID_ENABLE
;
292 etna_bitarray_set(vs_input
, 8, num_vs_inputs
, vs
->vs_id_in_reg
);
294 cs
->FE_HALTI5_ID_CONFIG
=
295 VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_ENABLE
|
296 VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_ENABLE
|
297 VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_REG(vs
->vs_id_in_reg
* 4) |
298 VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_REG(vs
->vs_id_in_reg
* 4 + 1);
301 for (int idx
= 0; idx
< ARRAY_SIZE(cs
->VS_INPUT
); ++idx
)
302 cs
->VS_INPUT
[idx
] = vs_input
[idx
];
307 static inline const char *
308 etna_shader_stage(struct etna_shader_variant
*shader
)
310 switch (shader
->stage
) {
311 case MESA_SHADER_VERTEX
: return "VERT";
312 case MESA_SHADER_FRAGMENT
: return "FRAG";
313 case MESA_SHADER_COMPUTE
: return "CL";
315 unreachable("invalid type");
321 dump_shader_info(struct etna_shader_variant
*v
, struct pipe_debug_callback
*debug
)
323 if (!unlikely(etna_mesa_debug
& ETNA_DBG_SHADERDB
))
326 pipe_debug_message(debug
, SHADER_INFO
,
327 "%s shader: %u instructions, %u temps, "
328 "%u immediates, %u loops",
329 etna_shader_stage(v
),
332 v
->uniforms
.imm_count
,
337 etna_shader_update_vertex(struct etna_context
*ctx
)
339 return etna_shader_update_vs_inputs(&ctx
->shader_state
, ctx
->shader
.vs
,
340 ctx
->vertex_elements
);
343 static struct etna_shader_variant
*
344 create_variant(struct etna_shader
*shader
, struct etna_shader_key key
)
346 struct etna_shader_variant
*v
= CALLOC_STRUCT(etna_shader_variant
);
355 ret
= etna_compile_shader(v
);
357 debug_error("compile failed!");
361 v
->id
= ++shader
->variant_count
;
370 struct etna_shader_variant
*
371 etna_shader_variant(struct etna_shader
*shader
, struct etna_shader_key key
,
372 struct pipe_debug_callback
*debug
)
374 struct etna_shader_variant
*v
;
376 for (v
= shader
->variants
; v
; v
= v
->next
)
377 if (etna_shader_key_equal(&key
, &v
->key
))
380 /* compile new variant if it doesn't exist already */
381 v
= create_variant(shader
, key
);
383 v
->next
= shader
->variants
;
384 shader
->variants
= v
;
385 dump_shader_info(v
, debug
);
392 etna_create_shader_state(struct pipe_context
*pctx
,
393 const struct pipe_shader_state
*pss
)
395 struct etna_context
*ctx
= etna_context(pctx
);
396 struct etna_shader
*shader
= CALLOC_STRUCT(etna_shader
);
403 shader
->specs
= &ctx
->specs
;
405 if (DBG_ENABLED(ETNA_DBG_NIR
))
406 shader
->nir
= (pss
->type
== PIPE_SHADER_IR_NIR
) ? pss
->ir
.nir
:
407 tgsi_to_nir(pss
->tokens
, pctx
->screen
);
409 shader
->tokens
= tgsi_dup_tokens(pss
->tokens
);
413 if (etna_mesa_debug
& ETNA_DBG_SHADERDB
) {
414 /* if shader-db run, create a standard variant immediately
415 * (as otherwise nothing will trigger the shader to be
416 * actually compiled).
418 struct etna_shader_key key
= {};
419 etna_shader_variant(shader
, key
, &ctx
->debug
);
426 etna_delete_shader_state(struct pipe_context
*pctx
, void *ss
)
428 struct etna_shader
*shader
= ss
;
429 struct etna_shader_variant
*v
, *t
;
431 v
= shader
->variants
;
438 if (DBG_ENABLED(ETNA_DBG_NIR
))
439 etna_destroy_shader_nir(t
);
441 etna_destroy_shader(t
);
444 ralloc_free(shader
->nir
);
449 etna_bind_fs_state(struct pipe_context
*pctx
, void *hwcso
)
451 struct etna_context
*ctx
= etna_context(pctx
);
453 ctx
->shader
.bind_fs
= hwcso
;
454 ctx
->dirty
|= ETNA_DIRTY_SHADER
;
458 etna_bind_vs_state(struct pipe_context
*pctx
, void *hwcso
)
460 struct etna_context
*ctx
= etna_context(pctx
);
462 ctx
->shader
.bind_vs
= hwcso
;
463 ctx
->dirty
|= ETNA_DIRTY_SHADER
;
467 etna_shader_init(struct pipe_context
*pctx
)
469 pctx
->create_fs_state
= etna_create_shader_state
;
470 pctx
->bind_fs_state
= etna_bind_fs_state
;
471 pctx
->delete_fs_state
= etna_delete_shader_state
;
472 pctx
->create_vs_state
= etna_create_shader_state
;
473 pctx
->bind_vs_state
= etna_bind_vs_state
;
474 pctx
->delete_vs_state
= etna_delete_shader_state
;