#include "program/programopt.h"
#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_serialize.h"
#include "draw/draw_context.h"
#include "pipe/p_context.h"
delete_variant(struct st_context *st, struct st_variant *v, GLenum target)
{
if (v->driver_shader) {
- if (st->has_shareable_shaders || v->st == st) {
+ if (target == GL_VERTEX_PROGRAM_ARB &&
+ ((struct st_common_variant*)v)->key.is_draw_shader) {
+ /* Draw shader. */
+ draw_delete_vertex_shader(st->draw, v->driver_shader);
+ } else if (st->has_shareable_shaders || v->st == st) {
/* The shader's context matches the calling context, or we
* don't care.
*/
switch (target) {
case GL_VERTEX_PROGRAM_ARB:
- cso_delete_vertex_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_vs_state(st->pipe, v->driver_shader);
break;
case GL_TESS_CONTROL_PROGRAM_NV:
- cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_tcs_state(st->pipe, v->driver_shader);
break;
case GL_TESS_EVALUATION_PROGRAM_NV:
- cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_tes_state(st->pipe, v->driver_shader);
break;
case GL_GEOMETRY_PROGRAM_NV:
- cso_delete_geometry_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_gs_state(st->pipe, v->driver_shader);
break;
case GL_FRAGMENT_PROGRAM_ARB:
- cso_delete_fragment_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_fs_state(st->pipe, v->driver_shader);
break;
case GL_COMPUTE_PROGRAM_NV:
- cso_delete_compute_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_compute_state(st->pipe, v->driver_shader);
break;
default:
unreachable("bad shader type in delete_basic_variant");
free(v);
}
+static void
+st_unbind_program(struct st_context *st, struct st_program *p)
+{
+ /* Unbind the shader in cso_context and re-bind in st/mesa. */
+ switch (p->Base.info.stage) {
+ case MESA_SHADER_VERTEX:
+ cso_set_vertex_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_VS_STATE;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ cso_set_tessctrl_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_TCS_STATE;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ cso_set_tesseval_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_TES_STATE;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ cso_set_geometry_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_GS_STATE;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ cso_set_fragment_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_FS_STATE;
+ break;
+ case MESA_SHADER_COMPUTE:
+ cso_set_compute_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_CS_STATE;
+ break;
+ default:
+ unreachable("invalid shader type");
+ }
+}
/**
* Free all basic program variants.
{
struct st_variant *v;
+ /* If we are releasing shaders, re-bind them, because we don't
+ * know which shaders are bound in the driver.
+ */
+ if (p->variants)
+ st_unbind_program(st, p);
+
for (v = p->variants; v; ) {
struct st_variant *next = v->next;
delete_variant(st, v, p->Base.Target);
if (stp->Base.Parameters->NumParameters)
stp->affected_states |= ST_NEW_VS_CONSTANTS;
- /* No samplers are allowed in ARB_vp. */
+ /* Translate to NIR if preferred. */
+ if (st->pipe->screen->get_shader_param(st->pipe->screen,
+ PIPE_SHADER_VERTEX,
+ PIPE_SHADER_CAP_PREFERRED_IR)) {
+ assert(!stp->glsl_to_tgsi);
+
+ if (stp->Base.nir)
+ ralloc_free(stp->Base.nir);
+
+ if (stp->serialized_nir) {
+ free(stp->serialized_nir);
+ stp->serialized_nir = NULL;
+ }
+
+ stp->state.type = PIPE_SHADER_IR_NIR;
+ stp->Base.nir = st_translate_prog_to_nir(st, &stp->Base,
+ MESA_SHADER_VERTEX);
+ /* For st_draw_feedback, we need to generate TGSI too if draw doesn't
+ * use LLVM.
+ */
+ if (draw_has_llvm())
+ return true;
+ }
}
/* Get semantic names and indices. */
st_store_ir_in_disk_cache(st, &stp->Base, false);
}
- /* Translate to NIR.
- *
- * This must be done after the translation to TGSI is done, because
- * we'll pass the NIR shader to the driver and the TGSI version to
- * the draw module for the select/feedback/rasterpos code.
- */
- if (st->pipe->screen->get_shader_param(st->pipe->screen,
- PIPE_SHADER_VERTEX,
- PIPE_SHADER_CAP_PREFERRED_IR)) {
- assert(!stp->glsl_to_tgsi);
-
- nir_shader *nir =
- st_translate_prog_to_nir(st, &stp->Base, MESA_SHADER_VERTEX);
-
- if (stp->Base.nir)
- ralloc_free(stp->Base.nir);
- stp->state.type = PIPE_SHADER_IR_NIR;
- stp->Base.nir = nir;
- return true;
+ return stp->state.tokens != NULL;
+}
+
+static struct nir_shader *
+get_nir_shader(struct st_context *st, struct st_program *stp)
+{
+ if (stp->Base.nir) {
+ nir_shader *nir = stp->Base.nir;
+
+ /* The first shader variant takes ownership of NIR, so that there is
+ * no cloning. Additional shader variants are always generated from
+ * serialized NIR to save memory.
+ */
+ stp->Base.nir = NULL;
+ assert(stp->serialized_nir && stp->serialized_nir_size);
+ return nir;
}
- return stp->state.tokens != NULL;
+ struct blob_reader blob_reader;
+ const struct nir_shader_compiler_options *options =
+ st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
+
+ blob_reader_init(&blob_reader, stp->serialized_nir, stp->serialized_nir_size);
+ return nir_deserialize(NULL, options, &blob_reader);
}
static const gl_state_index16 depth_range_state[STATE_LENGTH] =
state.stream_output = stvp->state.stream_output;
- if (stvp->state.type == PIPE_SHADER_IR_NIR) {
+ if (stvp->state.type == PIPE_SHADER_IR_NIR &&
+ (!key->is_draw_shader || draw_has_llvm())) {
bool finalize = false;
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, stvp->Base.nir);
+ state.ir.nir = get_nir_shader(st, stvp);
if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
finalize = true;
else
vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
+ if (state.tokens) {
+ tgsi_free_tokens(state.tokens);
+ }
+
return vpv;
}
if (stfp->Base.nir)
ralloc_free(stfp->Base.nir);
+ if (stfp->serialized_nir) {
+ free(stfp->serialized_nir);
+ stfp->serialized_nir = NULL;
+ }
stfp->state.type = PIPE_SHADER_IR_NIR;
stfp->Base.nir = nir;
return true;
bool finalize = false;
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, stfp->Base.nir);
+ state.ir.nir = get_nir_shader(st, stfp);
if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
key->external.lower_ayuv || key->external.lower_xyuv)) {
+
+ st_nir_lower_samplers(pipe->screen, state.ir.nir,
+ stfp->shader_program, &stfp->Base);
+
nir_lower_tex_options options = {0};
options.lower_y_uv_external = key->external.lower_nv12;
options.lower_y_u_v_external = key->external.lower_iyuv;
/* This pass needs to happen *after* nir_lower_sampler */
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
- key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
+ key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
+ key->external.lower_ayuv || key->external.lower_xyuv)) {
NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 || key->external.lower_xy_uxvx ||
bool finalize = false;
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, prog->Base.nir);
+ state.ir.nir = get_nir_shader(st, prog);
if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
struct st_program *p = st_program(target);
struct st_variant *v, **prevPtr = &p->variants;
+ bool unbound = false;
for (v = p->variants; v; ) {
struct st_variant *next = v->next;
if (v->st == st) {
+ if (!unbound) {
+ st_unbind_program(st, p);
+ unbound = true;
+ }
+
/* unlink from list */
*prevPtr = next;
/* destroy this variant */
}
}
+void
+st_serialize_nir(struct st_program *stp)
+{
+ if (!stp->serialized_nir) {
+ struct blob blob;
+ size_t size;
+
+ blob_init(&blob);
+ nir_serialize(&blob, stp->Base.nir, false);
+ blob_finish_get_buffer(&blob, &stp->serialized_nir, &size);
+ stp->serialized_nir_size = size;
+ }
+}
+
void
st_finalize_program(struct st_context *st, struct gl_program *prog)
{
st->dirty |= ((struct st_program *)prog)->affected_states;
}
- if (prog->nir)
+ if (prog->nir) {
nir_sweep(prog->nir);
+ /* This is only needed for ARB_vp/fp programs and when the disk cache
+ * is disabled. If the disk cache is enabled, GLSL programs are
+ * serialized in write_nir_to_cache.
+ */
+ st_serialize_nir(st_program(prog));
+ }
+
/* Create Gallium shaders now instead of on demand. */
if (ST_DEBUG & DEBUG_PRECOMPILE ||
st->shader_has_one_variant[prog->info.stage])