X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fpan_context.c;h=58f08fae0eaf18821ad55e360207cf24628cd5c9;hb=e5c77cbead98cfed0cd84723c7cac8796a2bfa66;hp=c8af10729c910faf6699997cfb4d642858a92d51;hpb=c4400b05be1aa68168e924066b9d05401745a879;p=mesa.git diff --git a/src/gallium/drivers/panfrost/pan_context.c b/src/gallium/drivers/panfrost/pan_context.c index c8af10729c9..58f08fae0ea 100644 --- a/src/gallium/drivers/panfrost/pan_context.c +++ b/src/gallium/drivers/panfrost/pan_context.c @@ -53,7 +53,8 @@ #include "pan_blend_shaders.h" #include "pan_cmdstream.h" #include "pan_util.h" -#include "pandecode/decode.h" +#include "decode.h" +#include "util/pan_lower_framebuffer.h" struct midgard_tiler_descriptor panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) @@ -75,20 +76,14 @@ panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) t.polygon_list_size = panfrost_tiler_full_size( width, height, t.hierarchy_mask, hierarchy); - /* Sanity check */ - if (vertex_count) { - struct panfrost_bo *tiler_heap; - - tiler_heap = panfrost_batch_get_tiler_heap(batch); t.polygon_list = panfrost_batch_get_polygon_list(batch, header_size + t.polygon_list_size); - /* Allow the entire tiler heap */ - t.heap_start = tiler_heap->gpu; - t.heap_end = tiler_heap->gpu + tiler_heap->size; + t.heap_start = device->tiler_heap->gpu; + t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size; } else { struct panfrost_bo *tiler_dummy; @@ -138,21 +133,9 @@ panfrost_clear( * fragment jobs. */ struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx); - - panfrost_batch_add_fbo_bos(batch); panfrost_batch_clear(batch, buffers, color, depth, stencil); } -/* Reset per-frame context, called on context initialisation as well as after - * flushing a frame */ - -void -panfrost_invalidate_frame(struct panfrost_context *ctx) -{ - /* TODO: When does this need to be handled? */ - ctx->active_queries = true; -} - bool panfrost_writes_point_size(struct panfrost_context *ctx) { @@ -162,73 +145,6 @@ panfrost_writes_point_size(struct panfrost_context *ctx) return vs->writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS; } -void -panfrost_vertex_state_upd_attr_offs(struct panfrost_context *ctx, - struct mali_vertex_tiler_postfix *vertex_postfix) -{ - if (!ctx->vertex) - return; - - struct panfrost_vertex_state *so = ctx->vertex; - - /* Fixup offsets for the second pass. Recall that the hardware - * calculates attribute addresses as: - * - * addr = base + (stride * vtx) + src_offset; - * - * However, on Mali, base must be aligned to 64-bytes, so we - * instead let: - * - * base' = base & ~63 = base - (base & 63) - * - * To compensate when using base' (see emit_vertex_data), we have - * to adjust src_offset by the masked off piece: - * - * addr' = base' + (stride * vtx) + (src_offset + (base & 63)) - * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63) - * = base + (stride * vtx) + src_offset - * = addr; - * - * QED. - */ - - unsigned start = vertex_postfix->offset_start; - - for (unsigned i = 0; i < so->num_elements; ++i) { - unsigned vbi = so->pipe[i].vertex_buffer_index; - struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi]; - - /* Adjust by the masked off bits of the offset. Make sure we - * read src_offset from so->hw (which is not GPU visible) - * rather than target (which is) due to caching effects */ - - unsigned src_offset = so->pipe[i].src_offset; - - /* BOs aligned to 4k so guaranteed aligned to 64 */ - src_offset += (buf->buffer_offset & 63); - - /* Also, somewhat obscurely per-instance data needs to be - * offset in response to a delayed start in an indexed draw */ - - if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) - src_offset -= buf->stride * start; - - so->hw[i].src_offset = src_offset; - } -} - -/* Compute number of UBOs active (more specifically, compute the highest UBO - * number addressable -- if there are gaps, include them in the count anyway). - * We always include UBO #0 in the count, since we *need* uniforms enabled for - * sysvals. */ - -unsigned -panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage) -{ - unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1; - return 32 - __builtin_clz(mask); -} - /* The entire frame is in memory -- send it off to the kernel! */ void @@ -238,45 +154,36 @@ panfrost_flush( unsigned flags) { struct panfrost_context *ctx = pan_context(pipe); - struct util_dynarray fences; + struct panfrost_device *dev = pan_device(pipe->screen); + uint32_t syncobj = 0; - /* We must collect the fences before the flush is done, otherwise we'll - * lose track of them. - */ - if (fence) { - util_dynarray_init(&fences, NULL); - hash_table_foreach(ctx->batches, hentry) { - struct panfrost_batch *batch = hentry->data; - - panfrost_batch_fence_reference(batch->out_sync); - util_dynarray_append(&fences, - struct panfrost_batch_fence *, - batch->out_sync); - } - } + if (fence) + drmSyncobjCreate(dev->fd, 0, &syncobj); /* Submit all pending jobs */ - panfrost_flush_all_batches(ctx, false); + panfrost_flush_all_batches(ctx, syncobj); if (fence) { - struct panfrost_fence *f = panfrost_fence_create(ctx, &fences); + struct panfrost_fence *f = panfrost_fence_create(ctx, syncobj); pipe->screen->fence_reference(pipe->screen, fence, NULL); *fence = (struct pipe_fence_handle *)f; - - util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence) - panfrost_batch_fence_unreference(*fence); - - util_dynarray_fini(&fences); } - if (pan_debug & PAN_DBG_TRACE) + if (dev->debug & PAN_DBG_TRACE) pandecode_next_frame(); } -#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c; +static void +panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags) +{ + struct panfrost_context *ctx = pan_context(pipe); + panfrost_flush_all_batches(ctx, 0); +} + +#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_DRAW_MODE_##c; static int -g2m_draw_mode(enum pipe_prim_type mode) +pan_draw_mode(enum pipe_prim_type mode) { switch (mode) { DEFINE_CASE(POINTS); @@ -304,7 +211,7 @@ panfrost_scissor_culls_everything(struct panfrost_context *ctx) /* Check if we're scissoring at all */ - if (!(ctx->rasterizer && ctx->rasterizer->base.scissor)) + if (!ctx->rasterizer->base.scissor) return false; return (ss->minx == ss->maxx) || (ss->miny == ss->maxy); @@ -348,6 +255,7 @@ panfrost_draw_vbo( const struct pipe_draw_info *info) { struct panfrost_context *ctx = pan_context(pipe); + struct panfrost_device *device = pan_device(ctx->base.screen); /* First of all, check the scissor to see if anything is drawn at all. * If it's not, we drop the draw (mostly a conformance issue; @@ -372,26 +280,19 @@ panfrost_draw_vbo( assert(ctx->rasterizer != NULL); if (!(ctx->draw_modes & (1 << mode))) { - if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) { - mode = PIPE_PRIM_TRIANGLE_FAN; - } else { - if (info->count < 4) { - /* Degenerate case? */ - return; - } - - util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); - util_primconvert_draw_vbo(ctx->primconvert, info); + if (info->count < 4) { + /* Degenerate case? */ return; } + + util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); + util_primconvert_draw_vbo(ctx->primconvert, info); + return; } - /* Now that we have a guaranteed terminating path, find the job. - * Assignment commented out to prevent unused warning */ + /* Now that we have a guaranteed terminating path, find the job. */ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - - panfrost_batch_add_fbo_bos(batch); panfrost_batch_set_requirements(batch); /* Take into account a negative bias */ @@ -399,46 +300,125 @@ panfrost_draw_vbo( ctx->instance_count = info->instance_count; ctx->active_prim = info->mode; - struct mali_vertex_tiler_prefix vertex_prefix, tiler_prefix; - struct mali_vertex_tiler_postfix vertex_postfix, tiler_postfix; + struct mali_vertex_tiler_prefix vertex_prefix = { 0 }, tiler_prefix = { 0 }; + struct mali_vertex_tiler_postfix vertex_postfix = { 0 }, tiler_postfix = { 0 }; union midgard_primitive_size primitive_size; unsigned vertex_count; - panfrost_vt_init(ctx, PIPE_SHADER_VERTEX, &vertex_prefix, &vertex_postfix); - panfrost_vt_init(ctx, PIPE_SHADER_FRAGMENT, &tiler_prefix, &tiler_postfix); + if (device->quirks & IS_BIFROST) { + vertex_postfix.gl_enables = 0x2; + tiler_postfix.gl_enables = 0x3; + vertex_postfix.shared_memory = panfrost_vt_emit_shared_memory(batch); + } else { + vertex_postfix.gl_enables = 0x6; + tiler_postfix.gl_enables = 0x7; + vertex_postfix.shared_memory = panfrost_batch_reserve_framebuffer(batch); + } - panfrost_vt_set_draw_info(ctx, info, g2m_draw_mode(mode), - &vertex_postfix, &tiler_prefix, - &tiler_postfix, &vertex_count, - &ctx->padded_count); + tiler_postfix.shared_memory = vertex_postfix.shared_memory; - panfrost_statistics_record(ctx, info); + if (ctx->occlusion_query) { + tiler_postfix.gl_enables |= MALI_OCCLUSION_QUERY; + tiler_postfix.occlusion_counter = ctx->occlusion_query->bo->gpu; + panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo, + PAN_BO_ACCESS_SHARED | + PAN_BO_ACCESS_RW | + PAN_BO_ACCESS_FRAGMENT); + } + + struct pipe_rasterizer_state *rast = &ctx->rasterizer->base; + SET_BIT(tiler_postfix.gl_enables, MALI_FRONT_CCW_TOP, + rast->front_ccw); + SET_BIT(tiler_postfix.gl_enables, MALI_CULL_FACE_FRONT, + (rast->cull_face & PIPE_FACE_FRONT)); + SET_BIT(tiler_postfix.gl_enables, MALI_CULL_FACE_BACK, + (rast->cull_face & PIPE_FACE_BACK)); + SET_BIT(tiler_prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST, + rast->flatshade_first); + + tiler_prefix.draw_mode = pan_draw_mode(mode); - /* Dispatch "compute jobs" for the vertex/tiler pair as (1, - * vertex_count, 1) */ + unsigned draw_flags = 0x3000; + + if (panfrost_writes_point_size(ctx)) + draw_flags |= MALI_DRAW_VARYING_SIZE; + + if (info->primitive_restart) + draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX; + + if (info->index_size) { + unsigned min_index = 0, max_index = 0; + + tiler_prefix.indices = panfrost_get_index_buffer_bounded(ctx, + info, + &min_index, + &max_index); + + /* Use the corresponding values */ + vertex_count = max_index - min_index + 1; + tiler_postfix.offset_start = vertex_postfix.offset_start = min_index + info->index_bias; + tiler_prefix.offset_bias_correction = -min_index; + tiler_prefix.index_count = MALI_POSITIVE(info->count); + draw_flags |= panfrost_translate_index_size(info->index_size); + } else { + vertex_count = ctx->vertex_count; + tiler_postfix.offset_start = vertex_postfix.offset_start = info->start; + tiler_prefix.index_count = MALI_POSITIVE(ctx->vertex_count); + } + + tiler_prefix.unknown_draw = draw_flags; + ctx->offset_start = vertex_postfix.offset_start; + + /* Encode the padded vertex count */ + + if (info->instance_count > 1) { + ctx->padded_count = panfrost_padded_vertex_count(vertex_count); + + unsigned shift = __builtin_ctz(ctx->padded_count); + unsigned k = ctx->padded_count >> (shift + 1); + + tiler_postfix.instance_shift = vertex_postfix.instance_shift = shift; + tiler_postfix.instance_odd = vertex_postfix.instance_odd = k; + } else { + ctx->padded_count = vertex_count; + } + + panfrost_statistics_record(ctx, info); panfrost_pack_work_groups_fused(&vertex_prefix, &tiler_prefix, 1, vertex_count, info->instance_count, 1, 1, 1); /* Emit all sort of descriptors. */ - panfrost_emit_vertex_data(batch, &vertex_postfix); + mali_ptr push_vert = 0, push_frag = 0, attribs = 0; + mali_ptr varyings = 0, vs_vary = 0, fs_vary = 0, pos = 0, psiz = 0; + + vertex_postfix.attribute_meta = panfrost_emit_vertex_data(batch, &attribs); + vertex_postfix.attributes = attribs; panfrost_emit_varying_descriptor(batch, ctx->padded_count * ctx->instance_count, - &vertex_postfix, &tiler_postfix, - &primitive_size); - panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, &vertex_postfix); - panfrost_emit_shader_meta(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); - panfrost_emit_vertex_attr_meta(batch, &vertex_postfix); - panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix); - panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); - panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX, &vertex_postfix); - panfrost_emit_texture_descriptors(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); - panfrost_emit_const_buf(batch, PIPE_SHADER_VERTEX, &vertex_postfix); - panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &tiler_postfix); - panfrost_emit_viewport(batch, &tiler_postfix); - + &vs_vary, &fs_vary, &varyings, + &pos, &psiz); + vertex_postfix.varyings = varyings; + tiler_postfix.varyings = varyings; + vertex_postfix.varying_meta = vs_vary; + tiler_postfix.varying_meta = fs_vary; + tiler_postfix.position_varying = pos; + vertex_postfix.sampler_descriptor = panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX); + tiler_postfix.sampler_descriptor = panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT); + vertex_postfix.textures = panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX); + tiler_postfix.textures = panfrost_emit_texture_descriptors(batch, PIPE_SHADER_FRAGMENT); + vertex_postfix.uniform_buffers = panfrost_emit_const_buf(batch, PIPE_SHADER_VERTEX, &push_vert); + tiler_postfix.uniform_buffers = panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &push_frag); + vertex_postfix.uniforms = push_vert; + tiler_postfix.uniforms = push_frag; + tiler_postfix.viewport = panfrost_emit_viewport(batch); + + vertex_postfix.shader = panfrost_emit_compute_shader_meta(batch, PIPE_SHADER_VERTEX); + tiler_postfix.shader = panfrost_emit_frag_shader_meta(batch); + + primitive_size.pointer = psiz; panfrost_vt_update_primitive_size(ctx, &tiler_prefix, &primitive_size); /* Fire off the draw itself */ @@ -470,6 +450,9 @@ panfrost_create_rasterizer_state( so->base = *cso; + /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ + assert(cso->offset_clamp == 0.0); + return so; } @@ -485,9 +468,6 @@ panfrost_bind_rasterizer_state( if (!hwcso) return; - /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ - assert(ctx->rasterizer->base.offset_clamp == 0.0); - /* Point sprites are emulated */ struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT); @@ -509,32 +489,29 @@ panfrost_create_vertex_elements_state( memcpy(so->pipe, elements, sizeof(*elements) * num_elements); for (int i = 0; i < num_elements; ++i) { - so->hw[i].index = i; - enum pipe_format fmt = elements[i].src_format; const struct util_format_description *desc = util_format_description(fmt); - so->hw[i].unknown1 = 0x2; - + unsigned swizzle = 0; if (dev->quirks & HAS_SWIZZLES) - so->hw[i].swizzle = panfrost_translate_swizzle_4(desc->swizzle); + swizzle = panfrost_translate_swizzle_4(desc->swizzle); else - so->hw[i].swizzle = panfrost_bifrost_swizzle(desc->nr_channels); + swizzle = panfrost_bifrost_swizzle(desc->nr_channels); - so->hw[i].format = panfrost_find_format(desc); + enum mali_format hw_format = panfrost_pipe_format_table[desc->format].hw; + so->formats[i] = (hw_format << 12) | swizzle; + assert(hw_format); } /* Let's also prepare vertex builtins */ - so->hw[PAN_VERTEX_ID].format = MALI_R32UI; if (dev->quirks & HAS_SWIZZLES) - so->hw[PAN_VERTEX_ID].swizzle = panfrost_get_default_swizzle(1); + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); else - so->hw[PAN_VERTEX_ID].swizzle = panfrost_bifrost_swizzle(1); + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); - so->hw[PAN_INSTANCE_ID].format = MALI_R32UI; if (dev->quirks & HAS_SWIZZLES) - so->hw[PAN_INSTANCE_ID].swizzle = panfrost_get_default_swizzle(1); + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); else - so->hw[PAN_INSTANCE_ID].swizzle = panfrost_bifrost_swizzle(1); + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); return so; } @@ -555,6 +532,7 @@ panfrost_create_shader_state( enum pipe_shader_type stage) { struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants); + struct panfrost_device *dev = pan_device(pctx->screen); so->base = *cso; /* Token deep copy to prevent memory corruption */ @@ -563,10 +541,10 @@ panfrost_create_shader_state( so->base.tokens = tgsi_dup_tokens(so->base.tokens); /* Precompile for shader-db if we need to */ - if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { + if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { struct panfrost_context *ctx = pan_context(pctx); - struct panfrost_shader_state state; + struct panfrost_shader_state state = { 0 }; uint64_t outputs_written; panfrost_shader_compile(ctx, PIPE_SHADER_IR_NIR, @@ -586,16 +564,21 @@ panfrost_delete_shader_state( struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so; if (cso->base.type == PIPE_SHADER_IR_TGSI) { - DBG("Deleting TGSI shader leaks duplicated tokens\n"); + /* TODO: leaks TGSI tokens! */ } for (unsigned i = 0; i < cso->variant_count; ++i) { struct panfrost_shader_state *shader_state = &cso->variants[i]; panfrost_bo_unreference(shader_state->bo); + + if (shader_state->upload.rsrc) + pipe_resource_reference(&shader_state->upload.rsrc, NULL); + shader_state->bo = NULL; } free(cso->variants); + free(so); } @@ -610,9 +593,9 @@ panfrost_create_sampler_state( so->base = *cso; if (device->quirks & IS_BIFROST) - panfrost_sampler_desc_init_bifrost(cso, &so->bifrost_hw); + panfrost_sampler_desc_init_bifrost(cso, (struct mali_bifrost_sampler_packed *) &so->hw); else - panfrost_sampler_desc_init(cso, &so->midgard_hw); + panfrost_sampler_desc_init(cso, &so->hw); return so; } @@ -639,28 +622,36 @@ panfrost_variant_matches( struct panfrost_shader_state *variant, enum pipe_shader_type type) { + struct panfrost_device *dev = pan_device(ctx->base.screen); struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base; - struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha; bool is_fragment = (type == PIPE_SHADER_FRAGMENT); - if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) { - /* Make sure enable state is at least the same */ - if (alpha->enabled != variant->alpha_state.enabled) { - return false; - } + if (variant->outputs_read) { + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; - /* Check that the contents of the test are the same */ - bool same_func = alpha->func == variant->alpha_state.func; - bool same_ref = alpha->ref_value == variant->alpha_state.ref_value; + unsigned i; + BITSET_FOREACH_SET(i, &variant->outputs_read, 8) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; - if (!(same_func && same_ref)) { - return false; + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; + + const struct util_format_description *desc = + util_format_description(fmt); + + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + if (variant->rt_formats[i] != fmt) + return false; } } + /* Point sprites TODO on bifrost, always pass */ if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable | - variant->point_sprite_mask)) { + variant->point_sprite_mask) + && !(dev->quirks & IS_BIFROST)) { /* Ensure the same varyings are turned to point sprites */ if (rasterizer->sprite_coord_enable != variant->point_sprite_mask) return false; @@ -724,6 +715,7 @@ panfrost_bind_shader_state( enum pipe_shader_type type) { struct panfrost_context *ctx = pan_context(pctx); + struct panfrost_device *dev = pan_device(ctx->base.screen); ctx->shader[type] = hwcso; if (!hwcso) return; @@ -767,9 +759,24 @@ panfrost_bind_shader_state( &variants->variants[variant]; if (type == PIPE_SHADER_FRAGMENT) { - v->alpha_state = ctx->depth_stencil->alpha; + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; + for (unsigned i = 0; i < fb->nr_cbufs; ++i) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; + + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; - if (ctx->rasterizer) { + const struct util_format_description *desc = + util_format_description(fmt); + + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + v->rt_formats[i] = fmt; + } + + /* Point sprites are TODO on Bifrost */ + if (ctx->rasterizer && !(dev->quirks & IS_BIFROST)) { v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable; v->point_sprite_upper_left = ctx->rasterizer->base.sprite_coord_mode == @@ -876,129 +883,148 @@ panfrost_set_stencil_ref( ctx->stencil_ref = *ref; } -static enum mali_texture_type -panfrost_translate_texture_type(enum pipe_texture_target t) { - switch (t) - { - case PIPE_BUFFER: - case PIPE_TEXTURE_1D: - case PIPE_TEXTURE_1D_ARRAY: - return MALI_TEX_1D; - - case PIPE_TEXTURE_2D: - case PIPE_TEXTURE_2D_ARRAY: - case PIPE_TEXTURE_RECT: - return MALI_TEX_2D; - - case PIPE_TEXTURE_3D: - return MALI_TEX_3D; - - case PIPE_TEXTURE_CUBE: - case PIPE_TEXTURE_CUBE_ARRAY: - return MALI_TEX_CUBE; +void +panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so, + struct pipe_context *pctx, + struct pipe_resource *texture) +{ + struct panfrost_device *device = pan_device(pctx->screen); + struct panfrost_resource *prsrc = (struct panfrost_resource *)texture; + enum pipe_format format = so->base.format; + assert(prsrc->bo); - default: - unreachable("Unknown target"); + /* Format to access the stencil portion of a Z32_S8 texture */ + if (format == PIPE_FORMAT_X32_S8X24_UINT) { + assert(prsrc->separate_stencil); + texture = &prsrc->separate_stencil->base; + prsrc = (struct panfrost_resource *)texture; + format = texture->format; } -} -static struct pipe_sampler_view * -panfrost_create_sampler_view( - struct pipe_context *pctx, - struct pipe_resource *texture, - const struct pipe_sampler_view *template) -{ - struct panfrost_device *device = pan_device(pctx->screen); - struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); + const struct util_format_description *desc = util_format_description(format); - pipe_reference(NULL, &texture->reference); + bool fake_rgtc = !panfrost_supports_compressed_format(device, MALI_BC4_UNORM); - struct panfrost_resource *prsrc = (struct panfrost_resource *) texture; - assert(prsrc->bo); + if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC && fake_rgtc) { + if (desc->is_snorm) + format = PIPE_FORMAT_R8G8B8A8_SNORM; + else + format = PIPE_FORMAT_R8G8B8A8_UNORM; + desc = util_format_description(format); + } - so->base = *template; - so->base.texture = texture; - so->base.reference.count = 1; - so->base.context = pctx; + so->texture_bo = prsrc->bo->gpu; + so->modifier = prsrc->modifier; unsigned char user_swizzle[4] = { - template->swizzle_r, - template->swizzle_g, - template->swizzle_b, - template->swizzle_a + so->base.swizzle_r, + so->base.swizzle_g, + so->base.swizzle_b, + so->base.swizzle_a }; /* In the hardware, array_size refers specifically to array textures, * whereas in Gallium, it also covers cubemaps */ unsigned array_size = texture->array_size; + unsigned depth = texture->depth0; - if (template->target == PIPE_TEXTURE_CUBE) { + if (so->base.target == PIPE_TEXTURE_CUBE) { /* TODO: Cubemap arrays */ assert(array_size == 6); array_size /= 6; } - enum mali_texture_type type = - panfrost_translate_texture_type(template->target); + /* MSAA only supported for 2D textures (and 2D texture arrays via an + * extension currently unimplemented */ + + if (so->base.target == PIPE_TEXTURE_2D) { + assert(depth == 1); + depth = texture->nr_samples; + } else { + /* MSAA only supported for 2D textures */ + assert(texture->nr_samples <= 1); + } + + enum mali_texture_dimension type = + panfrost_translate_texture_dimension(so->base.target); if (device->quirks & IS_BIFROST) { - const struct util_format_description *desc = - util_format_description(template->format); unsigned char composed_swizzle[4]; util_format_compose_swizzles(desc->swizzle, user_swizzle, composed_swizzle); unsigned size = panfrost_estimate_texture_payload_size( - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - type, prsrc->layout); + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); - so->bifrost_bo = pan_bo_create(device, size, 0); + so->bo = panfrost_bo_create(device, size, 0); - so->bifrost_descriptor = rzalloc(pctx, struct bifrost_texture_descriptor); panfrost_new_texture_bifrost( - so->bifrost_descriptor, + &so->bifrost_descriptor, texture->width0, texture->height0, - texture->depth0, array_size, - template->format, - type, prsrc->layout, - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, prsrc->cubemap_stride, panfrost_translate_swizzle_4(composed_swizzle), prsrc->bo->gpu, prsrc->slices, - so->bifrost_bo); + so->bo); } else { unsigned size = panfrost_estimate_texture_payload_size( - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - type, prsrc->layout); - size += sizeof(struct mali_texture_descriptor); + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); + size += MALI_MIDGARD_TEXTURE_LENGTH; - so->midgard_bo = pan_bo_create(device, size, 0); + so->bo = panfrost_bo_create(device, size, 0); panfrost_new_texture( - so->midgard_bo->cpu, + so->bo->cpu, texture->width0, texture->height0, - texture->depth0, array_size, - template->format, - type, prsrc->layout, - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, prsrc->cubemap_stride, panfrost_translate_swizzle_4(user_swizzle), prsrc->bo->gpu, prsrc->slices); } +} + +static struct pipe_sampler_view * +panfrost_create_sampler_view( + struct pipe_context *pctx, + struct pipe_resource *texture, + const struct pipe_sampler_view *template) +{ + struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); + + pipe_reference(NULL, &texture->reference); + + so->base = *template; + so->base.texture = texture; + so->base.reference.count = 1; + so->base.context = pctx; + + panfrost_create_sampler_view_bo(so, pctx, texture); return (struct pipe_sampler_view *) so; } @@ -1038,10 +1064,7 @@ panfrost_sampler_view_destroy( struct panfrost_sampler_view *view = (struct panfrost_sampler_view *) pview; pipe_resource_reference(&pview->texture, NULL); - panfrost_bo_unreference(view->midgard_bo); - panfrost_bo_unreference(view->bifrost_bo); - if (view->bifrost_descriptor) - ralloc_free(view->bifrost_descriptor); + panfrost_bo_unreference(view->bo); ralloc_free(view); } @@ -1059,49 +1082,76 @@ panfrost_set_shader_buffers( buffers, start, count); } -/* Hints that a framebuffer should use AFBC where possible */ - static void -panfrost_hint_afbc( - struct panfrost_device *device, - const struct pipe_framebuffer_state *fb) +panfrost_set_framebuffer_state(struct pipe_context *pctx, + const struct pipe_framebuffer_state *fb) { - /* AFBC implemenation incomplete; hide it */ - if (!(pan_debug & PAN_DBG_AFBC)) return; + struct panfrost_context *ctx = pan_context(pctx); - /* Hint AFBC to the resources bound to each color buffer */ + util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb); + ctx->batch = NULL; - for (unsigned i = 0; i < fb->nr_cbufs; ++i) { - struct pipe_surface *surf = fb->cbufs[i]; - struct panfrost_resource *rsrc = pan_resource(surf->texture); - panfrost_resource_hint_layout(device, rsrc, MALI_TEXTURE_AFBC, 1); - } + /* We may need to generate a new variant if the fragment shader is + * keyed to the framebuffer format (due to EXT_framebuffer_fetch) */ + struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT]; - /* Also hint it to the depth buffer */ + if (fs && fs->variant_count && fs->variants[fs->active_variant].outputs_read) + ctx->base.bind_fs_state(&ctx->base, fs); +} - if (fb->zsbuf) { - struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture); - panfrost_resource_hint_layout(device, rsrc, MALI_TEXTURE_AFBC, 1); +static inline unsigned +pan_pipe_to_stencil_op(enum pipe_stencil_op in) +{ + switch (in) { + case PIPE_STENCIL_OP_KEEP: return MALI_STENCIL_OP_KEEP; + case PIPE_STENCIL_OP_ZERO: return MALI_STENCIL_OP_ZERO; + case PIPE_STENCIL_OP_REPLACE: return MALI_STENCIL_OP_REPLACE; + case PIPE_STENCIL_OP_INCR: return MALI_STENCIL_OP_INCR_SAT; + case PIPE_STENCIL_OP_DECR: return MALI_STENCIL_OP_DECR_SAT; + case PIPE_STENCIL_OP_INCR_WRAP: return MALI_STENCIL_OP_INCR_WRAP; + case PIPE_STENCIL_OP_DECR_WRAP: return MALI_STENCIL_OP_DECR_WRAP; + case PIPE_STENCIL_OP_INVERT: return MALI_STENCIL_OP_INVERT; + default: unreachable("Invalid stencil op"); } } -static void -panfrost_set_framebuffer_state(struct pipe_context *pctx, - const struct pipe_framebuffer_state *fb) +static inline void +pan_pipe_to_stencil(const struct pipe_stencil_state *in, void *out) { - struct panfrost_context *ctx = pan_context(pctx); - - panfrost_hint_afbc(pan_device(pctx->screen), fb); - util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb); - ctx->batch = NULL; - panfrost_invalidate_frame(ctx); + pan_pack(out, STENCIL, cfg) { + cfg.mask = in->valuemask; + cfg.compare_function = panfrost_translate_compare_func(in->func); + cfg.stencil_fail = pan_pipe_to_stencil_op(in->fail_op); + cfg.depth_fail = pan_pipe_to_stencil_op(in->zfail_op); + cfg.depth_pass = pan_pipe_to_stencil_op(in->zpass_op); + } } static void * panfrost_create_depth_stencil_state(struct pipe_context *pipe, - const struct pipe_depth_stencil_alpha_state *depth_stencil) + const struct pipe_depth_stencil_alpha_state *zsa) { - return mem_dup(depth_stencil, sizeof(*depth_stencil)); + struct panfrost_zsa_state *so = CALLOC_STRUCT(panfrost_zsa_state); + so->base = *zsa; + + pan_pipe_to_stencil(&zsa->stencil[0], &so->stencil_front); + so->stencil_mask_front = zsa->stencil[0].writemask; + + if (zsa->stencil[1].enabled) { + pan_pipe_to_stencil(&zsa->stencil[1], &so->stencil_back); + so->stencil_mask_back = zsa->stencil[1].writemask; + } else { + so->stencil_back = so->stencil_front; + so->stencil_mask_back = so->stencil_mask_front; + } + + /* Alpha lowered by frontend */ + assert(!zsa->alpha.enabled); + + /* TODO: Bounds test should be easy */ + assert(!zsa->depth.bounds_test); + + return so; } static void @@ -1109,22 +1159,8 @@ panfrost_bind_depth_stencil_state(struct pipe_context *pipe, void *cso) { struct panfrost_context *ctx = pan_context(pipe); - struct pipe_depth_stencil_alpha_state *depth_stencil = cso; - ctx->depth_stencil = depth_stencil; - - if (!depth_stencil) - return; - - /* Alpha does not exist in the hardware (it's not in ES3), so it's - * emulated in the fragment shader */ - - if (depth_stencil->alpha.enabled) { - /* We need to trigger a new shader (maybe) */ - ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]); - } - - /* Bounds test not implemented */ - assert(!depth_stencil->depth.bounds_test); + struct panfrost_zsa_state *zsa = cso; + ctx->depth_stencil = zsa; } static void @@ -1137,8 +1173,19 @@ static void panfrost_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { + struct panfrost_context *ctx = pan_context(pipe); + ctx->sample_mask = sample_mask; } +static void +panfrost_set_min_samples(struct pipe_context *pipe, + unsigned min_samples) +{ + struct panfrost_context *ctx = pan_context(pipe); + ctx->min_samples = min_samples; +} + + static void panfrost_set_clip_state(struct pipe_context *pipe, const struct pipe_clip_state *clip) @@ -1202,6 +1249,7 @@ panfrost_destroy(struct pipe_context *pipe) util_unreference_framebuffer_state(&panfrost->pipe_framebuffer); u_upload_destroy(pipe->stream_uploader); + u_upload_destroy(panfrost->state_uploader); ralloc_free(pipe); } @@ -1244,7 +1292,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: /* Allocate a bo for the query results to be stored */ if (!query->bo) { - query->bo = pan_bo_create( + query->bo = panfrost_bo_create( pan_device(ctx->base.screen), sizeof(unsigned), 0); } @@ -1265,7 +1313,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) break; default: - DBG("Skipping query %u\n", query->type); + /* TODO: timestamp queries, etc? */ break; } @@ -1309,8 +1357,8 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: - /* Flush first */ - panfrost_flush_all_batches(ctx, true); + panfrost_flush_batches_accessing_bo(ctx, query->bo, false); + panfrost_bo_wait(query->bo, INT64_MAX, false); /* Read back the query results */ unsigned *result = (unsigned *) query->bo->cpu; @@ -1326,12 +1374,12 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_PRIMITIVES_EMITTED: - panfrost_flush_all_batches(ctx, true); + panfrost_flush_all_batches(ctx, 0); vresult->u64 = query->end - query->start; break; default: - DBG("Skipped query get %u\n", query->type); + /* TODO: more queries */ break; } @@ -1398,6 +1446,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) { struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context); struct pipe_context *gallium = (struct pipe_context *) ctx; + struct panfrost_device *dev = pan_device(screen); gallium->screen = screen; @@ -1408,6 +1457,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->flush = panfrost_flush; gallium->clear = panfrost_clear; gallium->draw_vbo = panfrost_draw_vbo; + gallium->texture_barrier = panfrost_texture_barrier; gallium->set_vertex_buffers = panfrost_set_vertex_buffers; gallium->set_constant_buffer = panfrost_set_constant_buffer; @@ -1444,6 +1494,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state; gallium->set_sample_mask = panfrost_set_sample_mask; + gallium->set_min_samples = panfrost_set_min_samples; gallium->set_clip_state = panfrost_set_clip_state; gallium->set_viewport_states = panfrost_set_viewport_states; @@ -1465,13 +1516,21 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) panfrost_blend_context_init(gallium); panfrost_compute_context_init(gallium); - /* XXX: leaks */ gallium->stream_uploader = u_upload_create_default(gallium); gallium->const_uploader = gallium->stream_uploader; - assert(gallium->stream_uploader); - /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */ - ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1; + ctx->state_uploader = u_upload_create(gallium, 4096, + PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_DYNAMIC, 0); + + /* All of our GPUs support ES mode. Midgard supports additionally + * QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */ + + ctx->draw_modes = (1 << (PIPE_PRIM_QUADS + 1)) - 1; + + if (!(dev->quirks & IS_BIFROST)) { + ctx->draw_modes |= (1 << PIPE_PRIM_QUAD_STRIP); + ctx->draw_modes |= (1 << PIPE_PRIM_POLYGON); + } ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes); @@ -1484,7 +1543,15 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) /* Prepare for render! */ panfrost_batch_init(ctx); - panfrost_invalidate_frame(ctx); + + if (!(dev->quirks & IS_BIFROST)) { + for (unsigned c = 0; c < PIPE_MAX_COLOR_BUFS; ++c) + ctx->blit_blend.rt[c].shaders = _mesa_hash_table_u64_create(ctx); + } + + /* By default mask everything on */ + ctx->sample_mask = ~0; + ctx->active_queries = true; return gallium; }