.alpha_ref = state->alpha_state.ref_value
};
- midgard_compile_shader_nir(s, &program, false, screen->gpu_id);
+ midgard_compile_shader_nir(s, &program, false, 0, screen->gpu_id);
/* Prepare the compiled binary for upload */
int size = program.compiled.size;
/* Prevent NULL collision issues.. */
assert(fmt != 0);
- /* Check the cache */
+ /* Check the cache. Key by the RT and format */
struct hash_table_u64 *shaders = blend->rt[rt].shaders;
+ unsigned key = (fmt << 3) | rt;
struct panfrost_blend_shader *shader =
- _mesa_hash_table_u64_search(shaders, fmt);
+ _mesa_hash_table_u64_search(shaders, key);
if (shader)
return shader;
/* Cache miss. Build one instead, cache it, and go */
struct panfrost_blend_shader generated =
- panfrost_compile_blend_shader(ctx, &blend->base, fmt);
+ panfrost_compile_blend_shader(ctx, &blend->base, fmt, rt);
shader = mem_dup(&generated, sizeof(generated));
- _mesa_hash_table_u64_insert(shaders, fmt, shader);
+ _mesa_hash_table_u64_insert(shaders, key, shader);
return shader;
}
panfrost_compile_blend_shader(
struct panfrost_context *ctx,
struct pipe_blend_state *cso,
- enum pipe_format format)
+ enum pipe_format format,
+ unsigned rt)
{
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
struct panfrost_blend_shader res;
/* Compile the built shader */
midgard_program program;
- midgard_compile_shader_nir(shader, &program, true, screen->gpu_id);
+ midgard_compile_shader_nir(shader, &program, true, rt, screen->gpu_id);
/* Allow us to patch later */
res.patch_index = program.blend_patch_offset;
panfrost_compile_blend_shader(
struct panfrost_context *ctx,
struct pipe_blend_state *cso,
- enum pipe_format format);
+ enum pipe_format format,
+ unsigned rt);
#endif
/* Is internally a blend shader? Depends on stage == FRAGMENT */
bool is_blend;
+ /* Render target number for a keyed blend shader. Depends on is_blend */
+ unsigned blend_rt;
+
/* Tracking for blend constant patching */
int blend_constant_offset;
splatter.swizzle[1][c] = 0;
emit_mir_instruction(ctx, splatter);
- emit_fragment_store(ctx, expanded, 0);
+ emit_fragment_store(ctx, expanded, ctx->blend_rt);
} else
- emit_fragment_store(ctx, reg, 0);
+ emit_fragment_store(ctx, reg, ctx->blend_rt);
break;
}
int
-midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend, unsigned gpu_id)
+midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id)
{
struct util_dynarray *compiled = &program->compiled;
ctx->stage = nir->info.stage;
ctx->is_blend = is_blend;
ctx->alpha_ref = program->alpha_ref;
+ ctx->blend_rt = blend_rt;
ctx->quirks = midgard_get_quirks(gpu_id);
/* Start off with a safe cutoff, allowing usage of all 16 work
} midgard_program;
int
-midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend, unsigned gpu_id);
+midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id);
/* NIR options are shared between the standalone compiler and the online
* compiler. Defining it here is the simplest, though maybe not the Right