struct ra_graph;
-struct ra_graph* allocate_registers(compiler_context *ctx);
+struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled);
void install_registers(compiler_context *ctx, struct ra_graph *g);
bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
bool mir_has_multiple_writes(compiler_context *ctx, int src);
* by install_registers */
struct ra_graph *
-allocate_registers(compiler_context *ctx)
+allocate_registers(compiler_context *ctx, bool *spilled)
{
/* The number of vec4 work registers available depends on when the
* uniforms start, so compute that first */
}
}
- if (!ra_allocate(g)) {
- unreachable("Error allocating registers\n");
- }
-
/* Cleanup */
free(live_start);
free(live_end);
+ if (!ra_allocate(g)) {
+ *spilled = true;
+ return NULL;
+ }
+
return g;
}
void
schedule_program(compiler_context *ctx)
{
- /* We run RA prior to scheduling */
+ struct ra_graph *g = NULL;
+ bool spilled = false;
+ int iter_count = 10; /* max iterations */
- mir_foreach_block(ctx, block) {
- schedule_block(ctx, block);
- }
+ do {
+ /* We would like to run RA after scheduling, but spilling can
+ * complicate this */
+
+ mir_foreach_block(ctx, block) {
+ schedule_block(ctx, block);
+ }
- /* Pipeline registers creation is a prepass before RA */
- mir_create_pipeline_registers(ctx);
+ /* Pipeline registers creation is a prepass before RA */
+ mir_create_pipeline_registers(ctx);
+
+ g = allocate_registers(ctx, &spilled);
+ } while(spilled && ((iter_count--) > 0));
+
+ if (iter_count <= 0) {
+ fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
+ assert(0);
+ }
- struct ra_graph *g = allocate_registers(ctx);
install_registers(ctx, g);
}