t.polygon_list_size = panfrost_tiler_full_size(
width, height, t.hierarchy_mask, hierarchy);
- /* Sanity check */
-
if (vertex_count) {
- struct panfrost_bo *tiler_heap;
-
- tiler_heap = panfrost_batch_get_tiler_heap(batch);
t.polygon_list = panfrost_batch_get_polygon_list(batch,
header_size +
t.polygon_list_size);
- /* Allow the entire tiler heap */
- t.heap_start = tiler_heap->gpu;
- t.heap_end = tiler_heap->gpu + tiler_heap->size;
+ t.heap_start = device->tiler_heap->gpu;
+ t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size;
} else {
struct panfrost_bo *tiler_dummy;
* fragment jobs.
*/
struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
-
- panfrost_batch_add_fbo_bos(batch);
panfrost_batch_clear(batch, buffers, color, depth, stencil);
}
-/* Reset per-frame context, called on context initialisation as well as after
- * flushing a frame */
-
-void
-panfrost_invalidate_frame(struct panfrost_context *ctx)
-{
- /* TODO: When does this need to be handled? */
- ctx->active_queries = true;
-}
-
bool
panfrost_writes_point_size(struct panfrost_context *ctx)
{
return vs->writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS;
}
-/* Compute number of UBOs active (more specifically, compute the highest UBO
- * number addressable -- if there are gaps, include them in the count anyway).
- * We always include UBO #0 in the count, since we *need* uniforms enabled for
- * sysvals. */
-
-unsigned
-panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
-{
- unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
- return 32 - __builtin_clz(mask);
-}
-
/* The entire frame is in memory -- send it off to the kernel! */
void
/* Check if we're scissoring at all */
- if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
+ if (!ctx->rasterizer->base.scissor)
return false;
return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
assert(ctx->rasterizer != NULL);
if (!(ctx->draw_modes & (1 << mode))) {
- if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) {
- mode = PIPE_PRIM_TRIANGLE_FAN;
- } else {
- if (info->count < 4) {
- /* Degenerate case? */
- return;
- }
-
- util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
- util_primconvert_draw_vbo(ctx->primconvert, info);
+ if (info->count < 4) {
+ /* Degenerate case? */
return;
}
+
+ util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base);
+ util_primconvert_draw_vbo(ctx->primconvert, info);
+ return;
}
- /* Now that we have a guaranteed terminating path, find the job.
- * Assignment commented out to prevent unused warning */
+ /* Now that we have a guaranteed terminating path, find the job. */
struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
-
- panfrost_batch_add_fbo_bos(batch);
panfrost_batch_set_requirements(batch);
/* Take into account a negative bias */
panfrost_statistics_record(ctx, info);
- /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
- * vertex_count, 1) */
-
panfrost_pack_work_groups_fused(&vertex_prefix, &tiler_prefix,
1, vertex_count, info->instance_count,
1, 1, 1);
ctx->instance_count,
&vertex_postfix, &tiler_postfix,
&primitive_size);
- panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
- panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix);
panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix);
panfrost_emit_viewport(batch, &tiler_postfix);
+ vertex_postfix.shader = panfrost_emit_compute_shader_meta(batch, PIPE_SHADER_VERTEX);
+ tiler_postfix.shader = panfrost_emit_frag_shader_meta(batch);
+
panfrost_vt_update_primitive_size(ctx, &tiler_prefix, &primitive_size);
/* Fire off the draw itself */
so->base = *cso;
+ /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
+ assert(cso->offset_clamp == 0.0);
+
return so;
}
if (!hwcso)
return;
- /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */
- assert(ctx->rasterizer->base.offset_clamp == 0.0);
-
/* Point sprites are emulated */
struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
struct panfrost_context *ctx = pan_context(pctx);
- struct panfrost_shader_state state;
+ struct panfrost_shader_state state = { 0 };
uint64_t outputs_written;
panfrost_shader_compile(ctx, PIPE_SHADER_IR_NIR,
for (unsigned i = 0; i < cso->variant_count; ++i) {
struct panfrost_shader_state *shader_state = &cso->variants[i];
panfrost_bo_unreference(shader_state->bo);
+
+ if (shader_state->upload.rsrc)
+ pipe_resource_reference(&shader_state->upload.rsrc, NULL);
+
shader_state->bo = NULL;
}
free(cso->variants);
+
free(so);
}
util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
ctx->batch = NULL;
- panfrost_invalidate_frame(ctx);
/* We may need to generate a new variant if the fragment shader is
* keyed to the framebuffer format (due to EXT_framebuffer_fetch) */
util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
u_upload_destroy(pipe->stream_uploader);
+ u_upload_destroy(panfrost->state_uploader);
ralloc_free(pipe);
}
gallium->stream_uploader = u_upload_create_default(gallium);
gallium->const_uploader = gallium->stream_uploader;
- assert(gallium->stream_uploader);
+
+ ctx->state_uploader = u_upload_create(gallium, 4096,
+ PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_DYNAMIC, 0);
/* All of our GPUs support ES mode. Midgard supports additionally
* QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */
/* Prepare for render! */
panfrost_batch_init(ctx);
- panfrost_invalidate_frame(ctx);
if (!(dev->quirks & IS_BIFROST)) {
for (unsigned c = 0; c < PIPE_MAX_COLOR_BUFS; ++c)
/* By default mask everything on */
ctx->sample_mask = ~0;
+ ctx->active_queries = true;
return gallium;
}