#include "etnaviv_compiler.h"
#include "etnaviv_context.h"
#include "etnaviv_debug.h"
+#include "etnaviv_screen.h"
#include "etnaviv_util.h"
#include "tgsi/tgsi_parse.h"
+#include "nir/tgsi_to_nir.h"
#include "util/u_math.h"
#include "util/u_memory.h"
+/* Upload shader code to bo, if not already done */
+static bool etna_icache_upload_shader(struct etna_context *ctx, struct etna_shader_variant *v)
+{
+ if (v->bo)
+ return true;
+ v->bo = etna_bo_new(ctx->screen->dev, v->code_size*4, DRM_ETNA_GEM_CACHE_WC);
+ if (!v->bo)
+ return false;
+
+ void *buf = etna_bo_map(v->bo);
+ etna_bo_cpu_prep(v->bo, DRM_ETNA_PREP_WRITE);
+ memcpy(buf, v->code, v->code_size*4);
+ etna_bo_cpu_fini(v->bo);
+ DBG("Uploaded %s of %u words to bo %p", v->stage == MESA_SHADER_FRAGMENT ? "fs":"vs", v->code_size, v->bo);
+ return true;
+}
+
/* Link vs and fs together: fill in shader_state from vs and fs
* as this function is called every time a new fs or vs is bound, the goal is to
* do little processing as possible here, and to precompute as much as possible in
*/
static bool
etna_link_shaders(struct etna_context *ctx, struct compiled_shader_state *cs,
- const struct etna_shader_variant *vs, const struct etna_shader_variant *fs)
+ struct etna_shader_variant *vs, struct etna_shader_variant *fs)
{
struct etna_shader_link_info link = { };
- assert(vs->processor == PIPE_SHADER_VERTEX);
- assert(fs->processor == PIPE_SHADER_FRAGMENT);
+ assert(vs->stage == MESA_SHADER_VERTEX);
+ assert(fs->stage == MESA_SHADER_FRAGMENT);
#ifdef DEBUG
if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS)) {
- etna_dump_shader(vs);
- etna_dump_shader(fs);
+ if (DBG_ENABLED(ETNA_DBG_NIR)) {
+ etna_dump_shader_nir(vs);
+ etna_dump_shader_nir(fs);
+ } else {
+ etna_dump_shader(vs);
+ etna_dump_shader(fs);
+ }
}
#endif
cs->GL_VARYING_COMPONENT_USE[0] = component_use[0];
cs->GL_VARYING_COMPONENT_USE[1] = component_use[1];
+ cs->GL_HALTI5_SH_SPECIALS =
+ 0x7f7f0000 | /* unknown bits, probably other PS inputs */
+ /* pointsize is last (see above) */
+ VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT((vs->vs_pointsize_out_reg != -1) ?
+ cs->VS_OUTPUT_COUNT * 4 : 0x00) |
+ VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN((link.pcoord_varying_comp_ofs != -1) ?
+ link.pcoord_varying_comp_ofs : 0x7f);
+
/* reference instruction memory */
cs->vs_inst_mem_size = vs->code_size;
cs->VS_INST_MEM = vs->code;
+
cs->ps_inst_mem_size = fs->code_size;
cs->PS_INST_MEM = fs->code;
+ if (vs->needs_icache | fs->needs_icache) {
+ /* If either of the shaders needs ICACHE, we use it for both. It is
+ * either switched on or off for the entire shader processor.
+ */
+ if (!etna_icache_upload_shader(ctx, vs) ||
+ !etna_icache_upload_shader(ctx, fs)) {
+ assert(0);
+ return false;
+ }
+
+ cs->VS_INST_ADDR.bo = vs->bo;
+ cs->VS_INST_ADDR.offset = 0;
+ cs->VS_INST_ADDR.flags = ETNA_RELOC_READ;
+ cs->PS_INST_ADDR.bo = fs->bo;
+ cs->PS_INST_ADDR.offset = 0;
+ cs->PS_INST_ADDR.flags = ETNA_RELOC_READ;
+ } else {
+ /* clear relocs */
+ memset(&cs->VS_INST_ADDR, 0, sizeof(cs->VS_INST_ADDR));
+ memset(&cs->PS_INST_ADDR, 0, sizeof(cs->PS_INST_ADDR));
+ }
+
return true;
}
}
static bool
-etna_shader_update_vs_inputs(struct etna_context *ctx,
- struct compiled_shader_state *cs,
+etna_shader_update_vs_inputs(struct compiled_shader_state *cs,
const struct etna_shader_variant *vs,
const struct compiled_vertex_elements_state *ves)
{
num_vs_inputs = MAX2(ves->num_elements, vs->infile.num_reg);
if (num_vs_inputs != ves->num_elements) {
BUG("Number of elements %u does not match the number of VS inputs %zu",
- ctx->vertex_elements->num_elements, ctx->shader.vs->infile.num_reg);
+ ves->num_elements, vs->infile.num_reg);
return false;
}
static inline const char *
etna_shader_stage(struct etna_shader_variant *shader)
{
- switch (shader->processor) {
- case PIPE_SHADER_VERTEX: return "VERT";
- case PIPE_SHADER_FRAGMENT: return "FRAG";
- case PIPE_SHADER_COMPUTE: return "CL";
+ switch (shader->stage) {
+ case MESA_SHADER_VERTEX: return "VERT";
+ case MESA_SHADER_FRAGMENT: return "FRAG";
+ case MESA_SHADER_COMPUTE: return "CL";
default:
unreachable("invalid type");
return NULL;
}
static void
-dump_shader_info(struct etna_shader_variant *shader, struct pipe_debug_callback *debug)
+dump_shader_info(struct etna_shader_variant *v, struct pipe_debug_callback *debug)
{
if (!unlikely(etna_mesa_debug & ETNA_DBG_SHADERDB))
return;
pipe_debug_message(debug, SHADER_INFO, "\n"
- "SHADER-DB: %s prog %d: %u instructions %u temps\n"
- "SHADER-DB: %s prog %d: %u immediates %u consts\n"
- "SHADER-DB: %s prog %d: %u loops\n",
- etna_shader_stage(shader),
- shader->id,
- shader->code_size,
- shader->num_temps,
- etna_shader_stage(shader),
- shader->id,
- shader->uniforms.imm_count,
- shader->uniforms.const_count,
- etna_shader_stage(shader),
- shader->id,
- shader->num_loops);
+ "SHADER-DB: %s prog %d/%d: %u instructions %u temps\n"
+ "SHADER-DB: %s prog %d/%d: %u immediates %u loops\n",
+ etna_shader_stage(v),
+ v->shader->id, v->id,
+ v->code_size,
+ v->num_temps,
+ etna_shader_stage(v),
+ v->shader->id, v->id,
+ v->uniforms.imm_count,
+ v->num_loops);
}
bool
etna_shader_update_vertex(struct etna_context *ctx)
{
- return etna_shader_update_vs_inputs(ctx, &ctx->shader_state, ctx->shader.vs,
+ return etna_shader_update_vs_inputs(&ctx->shader_state, ctx->shader.vs,
ctx->vertex_elements);
}
static struct etna_shader_variant *
-create_variant(struct etna_shader *shader)
+create_variant(struct etna_shader *shader, struct etna_shader_key key)
{
struct etna_shader_variant *v = CALLOC_STRUCT(etna_shader_variant);
int ret;
return NULL;
v->shader = shader;
+ v->key = key;
ret = etna_compile_shader(v);
if (!ret) {
return NULL;
}
+struct etna_shader_variant *
+etna_shader_variant(struct etna_shader *shader, struct etna_shader_key key,
+ struct pipe_debug_callback *debug)
+{
+ struct etna_shader_variant *v;
+
+ for (v = shader->variants; v; v = v->next)
+ if (etna_shader_key_equal(&key, &v->key))
+ return v;
+
+ /* compile new variant if it doesn't exist already */
+ v = create_variant(shader, key);
+ if (v) {
+ v->next = shader->variants;
+ shader->variants = v;
+ dump_shader_info(v, debug);
+ }
+
+ return v;
+}
+
static void *
etna_create_shader_state(struct pipe_context *pctx,
const struct pipe_shader_state *pss)
static uint32_t id;
shader->id = id++;
shader->specs = &ctx->specs;
- shader->tokens = tgsi_dup_tokens(pss->tokens);
- /* compile new variant */
- struct etna_shader_variant *v;
+ if (DBG_ENABLED(ETNA_DBG_NIR))
+ shader->nir = (pss->type == PIPE_SHADER_IR_NIR) ? pss->ir.nir :
+ tgsi_to_nir(pss->tokens, pctx->screen);
+ else
+ shader->tokens = tgsi_dup_tokens(pss->tokens);
- v = create_variant(shader);
- if (v) {
- v->next = shader->variants;
- shader->variants = v;
- dump_shader_info(v, &ctx->debug);
+
+
+ if (etna_mesa_debug & ETNA_DBG_SHADERDB) {
+ /* if shader-db run, create a standard variant immediately
+ * (as otherwise nothing will trigger the shader to be
+ * actually compiled).
+ */
+ struct etna_shader_key key = {};
+ etna_shader_variant(shader, key, &ctx->debug);
}
- return v;
+ return shader;
}
static void
while (v) {
t = v;
v = v->next;
+ if (t->bo)
+ etna_bo_del(t->bo);
etna_destroy_shader(t);
}
- FREE(shader->tokens);
+ ralloc_free(shader->nir);
FREE(shader);
}
static void
-etna_bind_fs_state(struct pipe_context *pctx, void *fss_)
+etna_bind_fs_state(struct pipe_context *pctx, void *hwcso)
{
struct etna_context *ctx = etna_context(pctx);
- struct etna_shader_variant *fss = fss_;
- if (ctx->shader.fs == fss) /* skip if already bound */
- return;
-
- assert(fss == NULL || fss->processor == PIPE_SHADER_FRAGMENT);
- ctx->shader.fs = fss;
+ ctx->shader.bind_fs = hwcso;
ctx->dirty |= ETNA_DIRTY_SHADER;
}
static void
-etna_bind_vs_state(struct pipe_context *pctx, void *vss_)
+etna_bind_vs_state(struct pipe_context *pctx, void *hwcso)
{
struct etna_context *ctx = etna_context(pctx);
- struct etna_shader_variant *vss = vss_;
-
- if (ctx->shader.vs == vss) /* skip if already bound */
- return;
- assert(vss == NULL || vss->processor == PIPE_SHADER_VERTEX);
- ctx->shader.vs = vss;
+ ctx->shader.bind_vs = hwcso;
ctx->dirty |= ETNA_DIRTY_SHADER;
}