X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fpan_context.c;h=7b0edd6853d8cfc3a62ab50d66868dfa638829a4;hp=e06d607375f5987a5203e41532031de07a8b074d;hb=80f1d611c5ddca6a719e0a470d3967a3d20ebcda;hpb=f69b6e91164fe672c1ae9e54b6f17387d81cd9e6 diff --git a/src/gallium/drivers/panfrost/pan_context.c b/src/gallium/drivers/panfrost/pan_context.c index e06d607375f..7b0edd6853d 100644 --- a/src/gallium/drivers/panfrost/pan_context.c +++ b/src/gallium/drivers/panfrost/pan_context.c @@ -53,7 +53,8 @@ #include "pan_blend_shaders.h" #include "pan_cmdstream.h" #include "pan_util.h" -#include "pandecode/decode.h" +#include "decode.h" +#include "util/pan_lower_framebuffer.h" struct midgard_tiler_descriptor panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) @@ -75,20 +76,14 @@ panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) t.polygon_list_size = panfrost_tiler_full_size( width, height, t.hierarchy_mask, hierarchy); - /* Sanity check */ - if (vertex_count) { - struct panfrost_bo *tiler_heap; - - tiler_heap = panfrost_batch_get_tiler_heap(batch); t.polygon_list = panfrost_batch_get_polygon_list(batch, header_size + t.polygon_list_size); - /* Allow the entire tiler heap */ - t.heap_start = tiler_heap->gpu; - t.heap_end = tiler_heap->gpu + tiler_heap->size; + t.heap_start = device->tiler_heap->gpu; + t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size; } else { struct panfrost_bo *tiler_dummy; @@ -138,21 +133,9 @@ panfrost_clear( * fragment jobs. */ struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx); - - panfrost_batch_add_fbo_bos(batch); panfrost_batch_clear(batch, buffers, color, depth, stencil); } -/* Reset per-frame context, called on context initialisation as well as after - * flushing a frame */ - -void -panfrost_invalidate_frame(struct panfrost_context *ctx) -{ - /* TODO: When does this need to be handled? */ - ctx->active_queries = true; -} - bool panfrost_writes_point_size(struct panfrost_context *ctx) { @@ -162,73 +145,6 @@ panfrost_writes_point_size(struct panfrost_context *ctx) return vs->writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS; } -void -panfrost_vertex_state_upd_attr_offs(struct panfrost_context *ctx, - struct mali_vertex_tiler_postfix *vertex_postfix) -{ - if (!ctx->vertex) - return; - - struct panfrost_vertex_state *so = ctx->vertex; - - /* Fixup offsets for the second pass. Recall that the hardware - * calculates attribute addresses as: - * - * addr = base + (stride * vtx) + src_offset; - * - * However, on Mali, base must be aligned to 64-bytes, so we - * instead let: - * - * base' = base & ~63 = base - (base & 63) - * - * To compensate when using base' (see emit_vertex_data), we have - * to adjust src_offset by the masked off piece: - * - * addr' = base' + (stride * vtx) + (src_offset + (base & 63)) - * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63) - * = base + (stride * vtx) + src_offset - * = addr; - * - * QED. - */ - - unsigned start = vertex_postfix->offset_start; - - for (unsigned i = 0; i < so->num_elements; ++i) { - unsigned vbi = so->pipe[i].vertex_buffer_index; - struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi]; - - /* Adjust by the masked off bits of the offset. Make sure we - * read src_offset from so->hw (which is not GPU visible) - * rather than target (which is) due to caching effects */ - - unsigned src_offset = so->pipe[i].src_offset; - - /* BOs aligned to 4k so guaranteed aligned to 64 */ - src_offset += (buf->buffer_offset & 63); - - /* Also, somewhat obscurely per-instance data needs to be - * offset in response to a delayed start in an indexed draw */ - - if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) - src_offset -= buf->stride * start; - - so->hw[i].src_offset = src_offset; - } -} - -/* Compute number of UBOs active (more specifically, compute the highest UBO - * number addressable -- if there are gaps, include them in the count anyway). - * We always include UBO #0 in the count, since we *need* uniforms enabled for - * sysvals. */ - -unsigned -panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage) -{ - unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1; - return 32 - __builtin_clz(mask); -} - /* The entire frame is in memory -- send it off to the kernel! */ void @@ -238,42 +154,33 @@ panfrost_flush( unsigned flags) { struct panfrost_context *ctx = pan_context(pipe); - struct util_dynarray fences; + struct panfrost_device *dev = pan_device(pipe->screen); + uint32_t syncobj = 0; - /* We must collect the fences before the flush is done, otherwise we'll - * lose track of them. - */ - if (fence) { - util_dynarray_init(&fences, NULL); - hash_table_foreach(ctx->batches, hentry) { - struct panfrost_batch *batch = hentry->data; - - panfrost_batch_fence_reference(batch->out_sync); - util_dynarray_append(&fences, - struct panfrost_batch_fence *, - batch->out_sync); - } - } + if (fence) + drmSyncobjCreate(dev->fd, 0, &syncobj); /* Submit all pending jobs */ - panfrost_flush_all_batches(ctx, false); + panfrost_flush_all_batches(ctx, syncobj); if (fence) { - struct panfrost_fence *f = panfrost_fence_create(ctx, &fences); + struct panfrost_fence *f = panfrost_fence_create(ctx, syncobj); pipe->screen->fence_reference(pipe->screen, fence, NULL); *fence = (struct pipe_fence_handle *)f; - - util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence) - panfrost_batch_fence_unreference(*fence); - - util_dynarray_fini(&fences); } - if (pan_debug & PAN_DBG_TRACE) + if (dev->debug & PAN_DBG_TRACE) pandecode_next_frame(); } -#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c; +static void +panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags) +{ + struct panfrost_context *ctx = pan_context(pipe); + panfrost_flush_all_batches(ctx, 0); +} + +#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_DRAW_MODE_##c; static int g2m_draw_mode(enum pipe_prim_type mode) @@ -304,7 +211,7 @@ panfrost_scissor_culls_everything(struct panfrost_context *ctx) /* Check if we're scissoring at all */ - if (!(ctx->rasterizer && ctx->rasterizer->base.scissor)) + if (!ctx->rasterizer->base.scissor) return false; return (ss->minx == ss->maxx) || (ss->miny == ss->maxy); @@ -372,26 +279,19 @@ panfrost_draw_vbo( assert(ctx->rasterizer != NULL); if (!(ctx->draw_modes & (1 << mode))) { - if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) { - mode = PIPE_PRIM_TRIANGLE_FAN; - } else { - if (info->count < 4) { - /* Degenerate case? */ - return; - } - - util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); - util_primconvert_draw_vbo(ctx->primconvert, info); + if (info->count < 4) { + /* Degenerate case? */ return; } + + util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); + util_primconvert_draw_vbo(ctx->primconvert, info); + return; } - /* Now that we have a guaranteed terminating path, find the job. - * Assignment commented out to prevent unused warning */ + /* Now that we have a guaranteed terminating path, find the job. */ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - - panfrost_batch_add_fbo_bos(batch); panfrost_batch_set_requirements(batch); /* Take into account a negative bias */ @@ -414,9 +314,6 @@ panfrost_draw_vbo( panfrost_statistics_record(ctx, info); - /* Dispatch "compute jobs" for the vertex/tiler pair as (1, - * vertex_count, 1) */ - panfrost_pack_work_groups_fused(&vertex_prefix, &tiler_prefix, 1, vertex_count, info->instance_count, 1, 1, 1); @@ -428,9 +325,6 @@ panfrost_draw_vbo( ctx->instance_count, &vertex_postfix, &tiler_postfix, &primitive_size); - panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, &vertex_postfix); - panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); - panfrost_emit_vertex_attr_meta(batch, &vertex_postfix); panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix); panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix); @@ -439,6 +333,9 @@ panfrost_draw_vbo( panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); panfrost_emit_viewport(batch, &tiler_postfix); + vertex_postfix.shader = panfrost_emit_compute_shader_meta(batch, PIPE_SHADER_VERTEX); + tiler_postfix.shader = panfrost_emit_frag_shader_meta(batch); + panfrost_vt_update_primitive_size(ctx, &tiler_prefix, &primitive_size); /* Fire off the draw itself */ @@ -470,6 +367,9 @@ panfrost_create_rasterizer_state( so->base = *cso; + /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ + assert(cso->offset_clamp == 0.0); + return so; } @@ -485,9 +385,6 @@ panfrost_bind_rasterizer_state( if (!hwcso) return; - /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ - assert(ctx->rasterizer->base.offset_clamp == 0.0); - /* Point sprites are emulated */ struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT); @@ -509,34 +406,29 @@ panfrost_create_vertex_elements_state( memcpy(so->pipe, elements, sizeof(*elements) * num_elements); for (int i = 0; i < num_elements; ++i) { - so->hw[i].index = i; - enum pipe_format fmt = elements[i].src_format; const struct util_format_description *desc = util_format_description(fmt); - so->hw[i].unknown1 = 0x2; - + unsigned swizzle = 0; if (dev->quirks & HAS_SWIZZLES) - so->hw[i].swizzle = panfrost_translate_swizzle_4(desc->swizzle); + swizzle = panfrost_translate_swizzle_4(desc->swizzle); else - so->hw[i].swizzle = panfrost_bifrost_swizzle(desc->nr_channels); + swizzle = panfrost_bifrost_swizzle(desc->nr_channels); enum mali_format hw_format = panfrost_pipe_format_table[desc->format].hw; - so->hw[i].format = hw_format; + so->formats[i] = (hw_format << 12) | swizzle; assert(hw_format); } /* Let's also prepare vertex builtins */ - so->hw[PAN_VERTEX_ID].format = MALI_R32UI; if (dev->quirks & HAS_SWIZZLES) - so->hw[PAN_VERTEX_ID].swizzle = panfrost_get_default_swizzle(1); + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); else - so->hw[PAN_VERTEX_ID].swizzle = panfrost_bifrost_swizzle(1); + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); - so->hw[PAN_INSTANCE_ID].format = MALI_R32UI; if (dev->quirks & HAS_SWIZZLES) - so->hw[PAN_INSTANCE_ID].swizzle = panfrost_get_default_swizzle(1); + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); else - so->hw[PAN_INSTANCE_ID].swizzle = panfrost_bifrost_swizzle(1); + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); return so; } @@ -557,6 +449,7 @@ panfrost_create_shader_state( enum pipe_shader_type stage) { struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants); + struct panfrost_device *dev = pan_device(pctx->screen); so->base = *cso; /* Token deep copy to prevent memory corruption */ @@ -565,10 +458,10 @@ panfrost_create_shader_state( so->base.tokens = tgsi_dup_tokens(so->base.tokens); /* Precompile for shader-db if we need to */ - if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { + if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { struct panfrost_context *ctx = pan_context(pctx); - struct panfrost_shader_state state; + struct panfrost_shader_state state = { 0 }; uint64_t outputs_written; panfrost_shader_compile(ctx, PIPE_SHADER_IR_NIR, @@ -588,16 +481,21 @@ panfrost_delete_shader_state( struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so; if (cso->base.type == PIPE_SHADER_IR_TGSI) { - DBG("Deleting TGSI shader leaks duplicated tokens\n"); + /* TODO: leaks TGSI tokens! */ } for (unsigned i = 0; i < cso->variant_count; ++i) { struct panfrost_shader_state *shader_state = &cso->variants[i]; panfrost_bo_unreference(shader_state->bo); + + if (shader_state->upload.rsrc) + pipe_resource_reference(&shader_state->upload.rsrc, NULL); + shader_state->bo = NULL; } free(cso->variants); + free(so); } @@ -612,9 +510,9 @@ panfrost_create_sampler_state( so->base = *cso; if (device->quirks & IS_BIFROST) - panfrost_sampler_desc_init_bifrost(cso, &so->bifrost_hw); + panfrost_sampler_desc_init_bifrost(cso, (struct mali_bifrost_sampler_packed *) &so->hw); else - panfrost_sampler_desc_init(cso, &so->midgard_hw); + panfrost_sampler_desc_init(cso, &so->hw); return so; } @@ -641,28 +539,36 @@ panfrost_variant_matches( struct panfrost_shader_state *variant, enum pipe_shader_type type) { + struct panfrost_device *dev = pan_device(ctx->base.screen); struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base; - struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha; bool is_fragment = (type == PIPE_SHADER_FRAGMENT); - if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) { - /* Make sure enable state is at least the same */ - if (alpha->enabled != variant->alpha_state.enabled) { - return false; - } + if (variant->outputs_read) { + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; - /* Check that the contents of the test are the same */ - bool same_func = alpha->func == variant->alpha_state.func; - bool same_ref = alpha->ref_value == variant->alpha_state.ref_value; + unsigned i; + BITSET_FOREACH_SET(i, &variant->outputs_read, 8) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; - if (!(same_func && same_ref)) { - return false; + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; + + const struct util_format_description *desc = + util_format_description(fmt); + + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + if (variant->rt_formats[i] != fmt) + return false; } } + /* Point sprites TODO on bifrost, always pass */ if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable | - variant->point_sprite_mask)) { + variant->point_sprite_mask) + && !(dev->quirks & IS_BIFROST)) { /* Ensure the same varyings are turned to point sprites */ if (rasterizer->sprite_coord_enable != variant->point_sprite_mask) return false; @@ -726,6 +632,7 @@ panfrost_bind_shader_state( enum pipe_shader_type type) { struct panfrost_context *ctx = pan_context(pctx); + struct panfrost_device *dev = pan_device(ctx->base.screen); ctx->shader[type] = hwcso; if (!hwcso) return; @@ -769,9 +676,24 @@ panfrost_bind_shader_state( &variants->variants[variant]; if (type == PIPE_SHADER_FRAGMENT) { - v->alpha_state = ctx->depth_stencil->alpha; + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; + for (unsigned i = 0; i < fb->nr_cbufs; ++i) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; + + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; + + const struct util_format_description *desc = + util_format_description(fmt); - if (ctx->rasterizer) { + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + v->rt_formats[i] = fmt; + } + + /* Point sprites are TODO on Bifrost */ + if (ctx->rasterizer && !(dev->quirks & IS_BIFROST)) { v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable; v->point_sprite_upper_left = ctx->rasterizer->base.sprite_coord_mode == @@ -878,129 +800,148 @@ panfrost_set_stencil_ref( ctx->stencil_ref = *ref; } -static enum mali_texture_type -panfrost_translate_texture_type(enum pipe_texture_target t) { - switch (t) - { - case PIPE_BUFFER: - case PIPE_TEXTURE_1D: - case PIPE_TEXTURE_1D_ARRAY: - return MALI_TEX_1D; - - case PIPE_TEXTURE_2D: - case PIPE_TEXTURE_2D_ARRAY: - case PIPE_TEXTURE_RECT: - return MALI_TEX_2D; - - case PIPE_TEXTURE_3D: - return MALI_TEX_3D; - - case PIPE_TEXTURE_CUBE: - case PIPE_TEXTURE_CUBE_ARRAY: - return MALI_TEX_CUBE; +void +panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so, + struct pipe_context *pctx, + struct pipe_resource *texture) +{ + struct panfrost_device *device = pan_device(pctx->screen); + struct panfrost_resource *prsrc = (struct panfrost_resource *)texture; + enum pipe_format format = so->base.format; + assert(prsrc->bo); - default: - unreachable("Unknown target"); + /* Format to access the stencil portion of a Z32_S8 texture */ + if (format == PIPE_FORMAT_X32_S8X24_UINT) { + assert(prsrc->separate_stencil); + texture = &prsrc->separate_stencil->base; + prsrc = (struct panfrost_resource *)texture; + format = texture->format; } -} -static struct pipe_sampler_view * -panfrost_create_sampler_view( - struct pipe_context *pctx, - struct pipe_resource *texture, - const struct pipe_sampler_view *template) -{ - struct panfrost_device *device = pan_device(pctx->screen); - struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); + const struct util_format_description *desc = util_format_description(format); - pipe_reference(NULL, &texture->reference); + bool fake_rgtc = !panfrost_supports_compressed_format(device, MALI_BC4_UNORM); - struct panfrost_resource *prsrc = (struct panfrost_resource *) texture; - assert(prsrc->bo); + if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC && fake_rgtc) { + if (desc->is_snorm) + format = PIPE_FORMAT_R8G8B8A8_SNORM; + else + format = PIPE_FORMAT_R8G8B8A8_UNORM; + desc = util_format_description(format); + } - so->base = *template; - so->base.texture = texture; - so->base.reference.count = 1; - so->base.context = pctx; + so->texture_bo = prsrc->bo->gpu; + so->modifier = prsrc->modifier; unsigned char user_swizzle[4] = { - template->swizzle_r, - template->swizzle_g, - template->swizzle_b, - template->swizzle_a + so->base.swizzle_r, + so->base.swizzle_g, + so->base.swizzle_b, + so->base.swizzle_a }; /* In the hardware, array_size refers specifically to array textures, * whereas in Gallium, it also covers cubemaps */ unsigned array_size = texture->array_size; + unsigned depth = texture->depth0; - if (template->target == PIPE_TEXTURE_CUBE) { + if (so->base.target == PIPE_TEXTURE_CUBE) { /* TODO: Cubemap arrays */ assert(array_size == 6); array_size /= 6; } - enum mali_texture_type type = - panfrost_translate_texture_type(template->target); + /* MSAA only supported for 2D textures (and 2D texture arrays via an + * extension currently unimplemented */ + + if (so->base.target == PIPE_TEXTURE_2D) { + assert(depth == 1); + depth = texture->nr_samples; + } else { + /* MSAA only supported for 2D textures */ + assert(texture->nr_samples <= 1); + } + + enum mali_texture_dimension type = + panfrost_translate_texture_dimension(so->base.target); if (device->quirks & IS_BIFROST) { - const struct util_format_description *desc = - util_format_description(template->format); unsigned char composed_swizzle[4]; util_format_compose_swizzles(desc->swizzle, user_swizzle, composed_swizzle); unsigned size = panfrost_estimate_texture_payload_size( - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - type, prsrc->layout); + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); - so->bifrost_bo = pan_bo_create(device, size, 0); + so->bo = panfrost_bo_create(device, size, 0); - so->bifrost_descriptor = rzalloc(pctx, struct bifrost_texture_descriptor); panfrost_new_texture_bifrost( - so->bifrost_descriptor, + &so->bifrost_descriptor, texture->width0, texture->height0, - texture->depth0, array_size, - template->format, - type, prsrc->layout, - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, prsrc->cubemap_stride, panfrost_translate_swizzle_4(composed_swizzle), prsrc->bo->gpu, prsrc->slices, - so->bifrost_bo); + so->bo); } else { unsigned size = panfrost_estimate_texture_payload_size( - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - type, prsrc->layout); - size += sizeof(struct mali_texture_descriptor); + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); + size += MALI_MIDGARD_TEXTURE_LENGTH; - so->midgard_bo = pan_bo_create(device, size, 0); + so->bo = panfrost_bo_create(device, size, 0); panfrost_new_texture( - so->midgard_bo->cpu, + so->bo->cpu, texture->width0, texture->height0, - texture->depth0, array_size, - template->format, - type, prsrc->layout, - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, prsrc->cubemap_stride, panfrost_translate_swizzle_4(user_swizzle), prsrc->bo->gpu, prsrc->slices); } +} + +static struct pipe_sampler_view * +panfrost_create_sampler_view( + struct pipe_context *pctx, + struct pipe_resource *texture, + const struct pipe_sampler_view *template) +{ + struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); + + pipe_reference(NULL, &texture->reference); + + so->base = *template; + so->base.texture = texture; + so->base.reference.count = 1; + so->base.context = pctx; + + panfrost_create_sampler_view_bo(so, pctx, texture); return (struct pipe_sampler_view *) so; } @@ -1040,10 +981,7 @@ panfrost_sampler_view_destroy( struct panfrost_sampler_view *view = (struct panfrost_sampler_view *) pview; pipe_resource_reference(&pview->texture, NULL); - panfrost_bo_unreference(view->midgard_bo); - panfrost_bo_unreference(view->bifrost_bo); - if (view->bifrost_descriptor) - ralloc_free(view->bifrost_descriptor); + panfrost_bo_unreference(view->bo); ralloc_free(view); } @@ -1061,49 +999,75 @@ panfrost_set_shader_buffers( buffers, start, count); } -/* Hints that a framebuffer should use AFBC where possible */ - static void -panfrost_hint_afbc( - struct panfrost_device *device, - const struct pipe_framebuffer_state *fb) +panfrost_set_framebuffer_state(struct pipe_context *pctx, + const struct pipe_framebuffer_state *fb) { - /* AFBC implemenation incomplete; hide it */ - if (!(pan_debug & PAN_DBG_AFBC)) return; + struct panfrost_context *ctx = pan_context(pctx); - /* Hint AFBC to the resources bound to each color buffer */ + util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb); + ctx->batch = NULL; - for (unsigned i = 0; i < fb->nr_cbufs; ++i) { - struct pipe_surface *surf = fb->cbufs[i]; - struct panfrost_resource *rsrc = pan_resource(surf->texture); - panfrost_resource_hint_layout(device, rsrc, MALI_TEXTURE_AFBC, 1); - } + /* We may need to generate a new variant if the fragment shader is + * keyed to the framebuffer format (due to EXT_framebuffer_fetch) */ + struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT]; - /* Also hint it to the depth buffer */ + if (fs && fs->variant_count && fs->variants[fs->active_variant].outputs_read) + ctx->base.bind_fs_state(&ctx->base, fs); +} - if (fb->zsbuf) { - struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture); - panfrost_resource_hint_layout(device, rsrc, MALI_TEXTURE_AFBC, 1); +static inline unsigned +pan_pipe_to_stencil_op(enum pipe_stencil_op in) +{ + switch (in) { + case PIPE_STENCIL_OP_KEEP: return MALI_STENCIL_OP_KEEP; + case PIPE_STENCIL_OP_ZERO: return MALI_STENCIL_OP_ZERO; + case PIPE_STENCIL_OP_REPLACE: return MALI_STENCIL_OP_REPLACE; + case PIPE_STENCIL_OP_INCR: return MALI_STENCIL_OP_INCR_SAT; + case PIPE_STENCIL_OP_DECR: return MALI_STENCIL_OP_DECR_SAT; + case PIPE_STENCIL_OP_INCR_WRAP: return MALI_STENCIL_OP_INCR_WRAP; + case PIPE_STENCIL_OP_DECR_WRAP: return MALI_STENCIL_OP_DECR_WRAP; + case PIPE_STENCIL_OP_INVERT: return MALI_STENCIL_OP_INVERT; + default: unreachable("Invalid stencil op"); } } -static void -panfrost_set_framebuffer_state(struct pipe_context *pctx, - const struct pipe_framebuffer_state *fb) +static inline void +pan_pipe_to_stencil(const struct pipe_stencil_state *in, void *out) { - struct panfrost_context *ctx = pan_context(pctx); - - panfrost_hint_afbc(pan_device(pctx->screen), fb); - util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb); - ctx->batch = NULL; - panfrost_invalidate_frame(ctx); + pan_pack(out, STENCIL, cfg) { + cfg.mask = in->valuemask; + cfg.compare_function = panfrost_translate_compare_func(in->func); + cfg.stencil_fail = pan_pipe_to_stencil_op(in->fail_op); + cfg.depth_fail = pan_pipe_to_stencil_op(in->zfail_op); + cfg.depth_pass = pan_pipe_to_stencil_op(in->zpass_op); + } } static void * panfrost_create_depth_stencil_state(struct pipe_context *pipe, - const struct pipe_depth_stencil_alpha_state *depth_stencil) + const struct pipe_depth_stencil_alpha_state *zsa) { - return mem_dup(depth_stencil, sizeof(*depth_stencil)); + struct panfrost_zsa_state *so = CALLOC_STRUCT(panfrost_zsa_state); + so->base = *zsa; + + pan_pipe_to_stencil(&zsa->stencil[0], &so->stencil_front); + pan_pipe_to_stencil(&zsa->stencil[1], &so->stencil_back); + + so->stencil_mask_front = zsa->stencil[0].writemask; + + if (zsa->stencil[1].enabled) + so->stencil_mask_back = zsa->stencil[1].writemask; + else + so->stencil_mask_back = so->stencil_mask_front; + + /* Alpha lowered by frontend */ + assert(!zsa->alpha.enabled); + + /* TODO: Bounds test should be easy */ + assert(!zsa->depth.bounds_test); + + return so; } static void @@ -1111,22 +1075,8 @@ panfrost_bind_depth_stencil_state(struct pipe_context *pipe, void *cso) { struct panfrost_context *ctx = pan_context(pipe); - struct pipe_depth_stencil_alpha_state *depth_stencil = cso; - ctx->depth_stencil = depth_stencil; - - if (!depth_stencil) - return; - - /* Alpha does not exist in the hardware (it's not in ES3), so it's - * emulated in the fragment shader */ - - if (depth_stencil->alpha.enabled) { - /* We need to trigger a new shader (maybe) */ - ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]); - } - - /* Bounds test not implemented */ - assert(!depth_stencil->depth.bounds_test); + struct panfrost_zsa_state *zsa = cso; + ctx->depth_stencil = zsa; } static void @@ -1139,8 +1089,19 @@ static void panfrost_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { + struct panfrost_context *ctx = pan_context(pipe); + ctx->sample_mask = sample_mask; +} + +static void +panfrost_set_min_samples(struct pipe_context *pipe, + unsigned min_samples) +{ + struct panfrost_context *ctx = pan_context(pipe); + ctx->min_samples = min_samples; } + static void panfrost_set_clip_state(struct pipe_context *pipe, const struct pipe_clip_state *clip) @@ -1204,6 +1165,7 @@ panfrost_destroy(struct pipe_context *pipe) util_unreference_framebuffer_state(&panfrost->pipe_framebuffer); u_upload_destroy(pipe->stream_uploader); + u_upload_destroy(panfrost->state_uploader); ralloc_free(pipe); } @@ -1246,7 +1208,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: /* Allocate a bo for the query results to be stored */ if (!query->bo) { - query->bo = pan_bo_create( + query->bo = panfrost_bo_create( pan_device(ctx->base.screen), sizeof(unsigned), 0); } @@ -1267,7 +1229,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) break; default: - DBG("Skipping query %u\n", query->type); + /* TODO: timestamp queries, etc? */ break; } @@ -1311,8 +1273,8 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: - /* Flush first */ - panfrost_flush_all_batches(ctx, true); + panfrost_flush_batches_accessing_bo(ctx, query->bo, false); + panfrost_bo_wait(query->bo, INT64_MAX, false); /* Read back the query results */ unsigned *result = (unsigned *) query->bo->cpu; @@ -1328,12 +1290,12 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_PRIMITIVES_EMITTED: - panfrost_flush_all_batches(ctx, true); + panfrost_flush_all_batches(ctx, 0); vresult->u64 = query->end - query->start; break; default: - DBG("Skipped query get %u\n", query->type); + /* TODO: more queries */ break; } @@ -1400,6 +1362,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) { struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context); struct pipe_context *gallium = (struct pipe_context *) ctx; + struct panfrost_device *dev = pan_device(screen); gallium->screen = screen; @@ -1410,6 +1373,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->flush = panfrost_flush; gallium->clear = panfrost_clear; gallium->draw_vbo = panfrost_draw_vbo; + gallium->texture_barrier = panfrost_texture_barrier; gallium->set_vertex_buffers = panfrost_set_vertex_buffers; gallium->set_constant_buffer = panfrost_set_constant_buffer; @@ -1446,6 +1410,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state; gallium->set_sample_mask = panfrost_set_sample_mask; + gallium->set_min_samples = panfrost_set_min_samples; gallium->set_clip_state = panfrost_set_clip_state; gallium->set_viewport_states = panfrost_set_viewport_states; @@ -1469,10 +1434,19 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->stream_uploader = u_upload_create_default(gallium); gallium->const_uploader = gallium->stream_uploader; - assert(gallium->stream_uploader); - /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */ - ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1; + ctx->state_uploader = u_upload_create(gallium, 4096, + PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_DYNAMIC, 0); + + /* All of our GPUs support ES mode. Midgard supports additionally + * QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */ + + ctx->draw_modes = (1 << (PIPE_PRIM_QUADS + 1)) - 1; + + if (!(dev->quirks & IS_BIFROST)) { + ctx->draw_modes |= (1 << PIPE_PRIM_QUAD_STRIP); + ctx->draw_modes |= (1 << PIPE_PRIM_POLYGON); + } ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes); @@ -1485,7 +1459,15 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) /* Prepare for render! */ panfrost_batch_init(ctx); - panfrost_invalidate_frame(ctx); + + if (!(dev->quirks & IS_BIFROST)) { + for (unsigned c = 0; c < PIPE_MAX_COLOR_BUFS; ++c) + ctx->blit_blend.rt[c].shaders = _mesa_hash_table_u64_create(ctx); + } + + /* By default mask everything on */ + ctx->sample_mask = ~0; + ctx->active_queries = true; return gallium; }