X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fpan_context.c;h=58f08fae0eaf18821ad55e360207cf24628cd5c9;hb=e5c77cbead98cfed0cd84723c7cac8796a2bfa66;hp=0f2d8b5a72870ca994f7f2f16a8a649dd867d137;hpb=8ac17139b1c9b5f8b017c389a29fd373ecfd9e55;p=mesa.git diff --git a/src/gallium/drivers/panfrost/pan_context.c b/src/gallium/drivers/panfrost/pan_context.c index 0f2d8b5a728..58f08fae0ea 100644 --- a/src/gallium/drivers/panfrost/pan_context.c +++ b/src/gallium/drivers/panfrost/pan_context.c @@ -53,13 +53,14 @@ #include "pan_blend_shaders.h" #include "pan_cmdstream.h" #include "pan_util.h" -#include "pandecode/decode.h" +#include "decode.h" +#include "util/pan_lower_framebuffer.h" struct midgard_tiler_descriptor panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) { - struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen); - bool hierarchy = !(screen->quirks & MIDGARD_NO_HIER_TILING); + struct panfrost_device *device = pan_device(batch->ctx->base.screen); + bool hierarchy = !(device->quirks & MIDGARD_NO_HIER_TILING); struct midgard_tiler_descriptor t = {0}; unsigned height = batch->key.height; unsigned width = batch->key.width; @@ -75,20 +76,14 @@ panfrost_emit_midg_tiler(struct panfrost_batch *batch, unsigned vertex_count) t.polygon_list_size = panfrost_tiler_full_size( width, height, t.hierarchy_mask, hierarchy); - /* Sanity check */ - if (vertex_count) { - struct panfrost_bo *tiler_heap; - - tiler_heap = panfrost_batch_get_tiler_heap(batch); t.polygon_list = panfrost_batch_get_polygon_list(batch, header_size + t.polygon_list_size); - /* Allow the entire tiler heap */ - t.heap_start = tiler_heap->gpu; - t.heap_end = tiler_heap->gpu + tiler_heap->size; + t.heap_start = device->tiler_heap->gpu; + t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size; } else { struct panfrost_bo *tiler_dummy; @@ -125,6 +120,7 @@ static void panfrost_clear( struct pipe_context *pipe, unsigned buffers, + const struct pipe_scissor_state *scissor_state, const union pipe_color_union *color, double depth, unsigned stencil) { @@ -137,643 +133,16 @@ panfrost_clear( * fragment jobs. */ struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx); - - panfrost_batch_add_fbo_bos(batch); panfrost_batch_clear(batch, buffers, color, depth, stencil); } -/* Reset per-frame context, called on context initialisation as well as after - * flushing a frame */ - -void -panfrost_invalidate_frame(struct panfrost_context *ctx) -{ - for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i) - ctx->payloads[i].postfix.shared_memory = 0; - - /* TODO: When does this need to be handled? */ - ctx->active_queries = true; -} - -/* In practice, every field of these payloads should be configurable - * arbitrarily, which means these functions are basically catch-all's for - * as-of-yet unwavering unknowns */ - -static void -panfrost_emit_vertex_payload(struct panfrost_context *ctx) -{ - /* 0x2 bit clear on 32-bit T6XX */ - - struct midgard_payload_vertex_tiler payload = { - .gl_enables = 0x4 | 0x2, - }; - - /* Vertex and compute are closely coupled, so share a payload */ - - memcpy(&ctx->payloads[PIPE_SHADER_VERTEX], &payload, sizeof(payload)); - memcpy(&ctx->payloads[PIPE_SHADER_COMPUTE], &payload, sizeof(payload)); -} - -static unsigned -translate_tex_wrap(enum pipe_tex_wrap w) -{ - switch (w) { - case PIPE_TEX_WRAP_REPEAT: - return MALI_WRAP_REPEAT; - - case PIPE_TEX_WRAP_CLAMP: - return MALI_WRAP_CLAMP; - - case PIPE_TEX_WRAP_CLAMP_TO_EDGE: - return MALI_WRAP_CLAMP_TO_EDGE; - - case PIPE_TEX_WRAP_CLAMP_TO_BORDER: - return MALI_WRAP_CLAMP_TO_BORDER; - - case PIPE_TEX_WRAP_MIRROR_REPEAT: - return MALI_WRAP_MIRRORED_REPEAT; - - case PIPE_TEX_WRAP_MIRROR_CLAMP: - return MALI_WRAP_MIRRORED_CLAMP; - - case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: - return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE; - - case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: - return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER; - - default: - unreachable("Invalid wrap"); - } -} - -static unsigned -panfrost_translate_compare_func(enum pipe_compare_func in) -{ - switch (in) { - case PIPE_FUNC_NEVER: - return MALI_FUNC_NEVER; - - case PIPE_FUNC_LESS: - return MALI_FUNC_LESS; - - case PIPE_FUNC_EQUAL: - return MALI_FUNC_EQUAL; - - case PIPE_FUNC_LEQUAL: - return MALI_FUNC_LEQUAL; - - case PIPE_FUNC_GREATER: - return MALI_FUNC_GREATER; - - case PIPE_FUNC_NOTEQUAL: - return MALI_FUNC_NOTEQUAL; - - case PIPE_FUNC_GEQUAL: - return MALI_FUNC_GEQUAL; - - case PIPE_FUNC_ALWAYS: - return MALI_FUNC_ALWAYS; - - default: - unreachable("Invalid func"); - } -} - -static unsigned -panfrost_translate_stencil_op(enum pipe_stencil_op in) -{ - switch (in) { - case PIPE_STENCIL_OP_KEEP: - return MALI_STENCIL_KEEP; - - case PIPE_STENCIL_OP_ZERO: - return MALI_STENCIL_ZERO; - - case PIPE_STENCIL_OP_REPLACE: - return MALI_STENCIL_REPLACE; - - case PIPE_STENCIL_OP_INCR: - return MALI_STENCIL_INCR; - - case PIPE_STENCIL_OP_DECR: - return MALI_STENCIL_DECR; - - case PIPE_STENCIL_OP_INCR_WRAP: - return MALI_STENCIL_INCR_WRAP; - - case PIPE_STENCIL_OP_DECR_WRAP: - return MALI_STENCIL_DECR_WRAP; - - case PIPE_STENCIL_OP_INVERT: - return MALI_STENCIL_INVERT; - - default: - unreachable("Invalid stencil op"); - } -} - -static void -panfrost_make_stencil_state(const struct pipe_stencil_state *in, struct mali_stencil_test *out) -{ - out->ref = 0; /* Gallium gets it from elsewhere */ - - out->mask = in->valuemask; - out->func = panfrost_translate_compare_func(in->func); - out->sfail = panfrost_translate_stencil_op(in->fail_op); - out->dpfail = panfrost_translate_stencil_op(in->zfail_op); - out->dppass = panfrost_translate_stencil_op(in->zpass_op); -} - -static void -panfrost_default_shader_backend(struct panfrost_context *ctx) -{ - struct panfrost_screen *screen = pan_screen(ctx->base.screen); - struct mali_shader_meta shader = { - .alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000), - - .unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010, - .unknown2_4 = MALI_NO_MSAA | 0x4e0, - }; - - /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this is - * required (independent of 32-bit/64-bit descriptors), or why it's not - * used on later GPU revisions. Otherwise, all shader jobs fault on - * these earlier chips (perhaps this is a chicken bit of some kind). - * More investigation is needed. */ - - if (screen->quirks & MIDGARD_SFBD) - shader.unknown2_4 |= 0x10; - - struct pipe_stencil_state default_stencil = { - .enabled = 0, - .func = PIPE_FUNC_ALWAYS, - .fail_op = MALI_STENCIL_KEEP, - .zfail_op = MALI_STENCIL_KEEP, - .zpass_op = MALI_STENCIL_KEEP, - .writemask = 0xFF, - .valuemask = 0xFF - }; - - panfrost_make_stencil_state(&default_stencil, &shader.stencil_front); - shader.stencil_mask_front = default_stencil.writemask; - - panfrost_make_stencil_state(&default_stencil, &shader.stencil_back); - shader.stencil_mask_back = default_stencil.writemask; - - if (default_stencil.enabled) - shader.unknown2_4 |= MALI_STENCIL_TEST; - - memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader)); -} - bool panfrost_writes_point_size(struct panfrost_context *ctx) { assert(ctx->shader[PIPE_SHADER_VERTEX]); struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX); - return vs->writes_point_size && ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS; -} - -/* Stage the attribute descriptors so we can adjust src_offset - * to let BOs align nicely */ - -static void -panfrost_stage_attributes(struct panfrost_context *ctx) -{ - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - struct panfrost_vertex_state *so = ctx->vertex; - - size_t sz = sizeof(struct mali_attr_meta) * PAN_MAX_ATTRIBUTE; - struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz); - struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu; - - /* Copy as-is for the first pass */ - memcpy(target, so->hw, sz); - - /* Fixup offsets for the second pass. Recall that the hardware - * calculates attribute addresses as: - * - * addr = base + (stride * vtx) + src_offset; - * - * However, on Mali, base must be aligned to 64-bytes, so we - * instead let: - * - * base' = base & ~63 = base - (base & 63) - * - * To compensate when using base' (see emit_vertex_data), we have - * to adjust src_offset by the masked off piece: - * - * addr' = base' + (stride * vtx) + (src_offset + (base & 63)) - * = base - (base & 63) + (stride * vtx) + src_offset + (base & 63) - * = base + (stride * vtx) + src_offset - * = addr; - * - * QED. - */ - - unsigned start = ctx->payloads[PIPE_SHADER_VERTEX].offset_start; - - for (unsigned i = 0; i < so->num_elements; ++i) { - unsigned vbi = so->pipe[i].vertex_buffer_index; - struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi]; - struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource); - mali_ptr addr = rsrc->bo->gpu + buf->buffer_offset; - - /* Adjust by the masked off bits of the offset. Make sure we - * read src_offset from so->hw (which is not GPU visible) - * rather than target (which is) due to caching effects */ - - unsigned src_offset = so->hw[i].src_offset; - src_offset += (addr & 63); - - /* Also, somewhat obscurely per-instance data needs to be - * offset in response to a delayed start in an indexed draw */ - - if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) - src_offset -= buf->stride * start; - - target[i].src_offset = src_offset; - } - - /* Let's also include vertex builtins */ - - struct mali_attr_meta builtin = { - .format = MALI_R32UI, - .swizzle = panfrost_get_default_swizzle(1) - }; - - /* See mali_attr_meta specification for the magic number */ - - builtin.index = so->vertexid_index; - memcpy(&target[PAN_VERTEX_ID], &builtin, 4); - - builtin.index = so->vertexid_index + 1; - memcpy(&target[PAN_INSTANCE_ID], &builtin, 4); - - ctx->payloads[PIPE_SHADER_VERTEX].postfix.attribute_meta = transfer.gpu; -} - -static void -panfrost_upload_sampler_descriptors(struct panfrost_context *ctx) -{ - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - size_t desc_size = sizeof(struct mali_sampler_descriptor); - - for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) { - mali_ptr upload = 0; - - if (ctx->sampler_count[t]) { - size_t transfer_size = desc_size * ctx->sampler_count[t]; - - struct panfrost_transfer transfer = - panfrost_allocate_transient(batch, transfer_size); - - struct mali_sampler_descriptor *desc = - (struct mali_sampler_descriptor *) transfer.cpu; - - for (int i = 0; i < ctx->sampler_count[t]; ++i) - desc[i] = ctx->samplers[t][i]->hw; - - upload = transfer.gpu; - } - - ctx->payloads[t].postfix.sampler_descriptor = upload; - } -} - -static mali_ptr -panfrost_upload_tex( - struct panfrost_context *ctx, - enum pipe_shader_type st, - struct panfrost_sampler_view *view) -{ - if (!view) - return (mali_ptr) 0; - - struct pipe_sampler_view *pview = &view->base; - struct panfrost_resource *rsrc = pan_resource(pview->texture); - - /* Add the BO to the job so it's retained until the job is done. */ - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - - panfrost_batch_add_bo(batch, rsrc->bo, - PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ | - panfrost_bo_access_for_stage(st)); - - panfrost_batch_add_bo(batch, view->bo, - PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ | - panfrost_bo_access_for_stage(st)); - - return view->bo->gpu; -} - -static void -panfrost_upload_texture_descriptors(struct panfrost_context *ctx) -{ - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - - for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) { - mali_ptr trampoline = 0; - - if (ctx->sampler_view_count[t]) { - uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS]; - - for (int i = 0; i < ctx->sampler_view_count[t]; ++i) - trampolines[i] = - panfrost_upload_tex(ctx, t, ctx->sampler_views[t][i]); - - trampoline = panfrost_upload_transient(batch, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]); - } - - ctx->payloads[t].postfix.texture_trampoline = trampoline; - } -} - -/* Compute number of UBOs active (more specifically, compute the highest UBO - * number addressable -- if there are gaps, include them in the count anyway). - * We always include UBO #0 in the count, since we *need* uniforms enabled for - * sysvals. */ - -unsigned -panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage) -{ - unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1; - return 32 - __builtin_clz(mask); -} - -/* Fixes up a shader state with current state */ - -void -panfrost_patch_shader_state(struct panfrost_context *ctx, - enum pipe_shader_type stage) -{ - struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, stage); - - if (!ss) - return; - - ss->tripipe->texture_count = ctx->sampler_view_count[stage]; - ss->tripipe->sampler_count = ctx->sampler_count[stage]; - - ss->tripipe->midgard1.flags_lo = 0x220; - - unsigned ubo_count = panfrost_ubo_count(ctx, stage); - ss->tripipe->midgard1.uniform_buffer_count = ubo_count; -} - -/* Go through dirty flags and actualise them in the cmdstream. */ - -void -panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) -{ - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - struct panfrost_screen *screen = pan_screen(ctx->base.screen); - - panfrost_batch_add_fbo_bos(batch); - - for (int i = 0; i < PIPE_SHADER_TYPES; ++i) - panfrost_vt_attach_framebuffer(ctx, &ctx->payloads[i]); - - if (with_vertex_data) { - panfrost_emit_vertex_data(batch); - - /* Varyings emitted for -all- geometry */ - unsigned total_count = ctx->padded_count * ctx->instance_count; - panfrost_emit_varying_descriptor(ctx, total_count); - } - - - if (ctx->rasterizer) { - bool msaa = ctx->rasterizer->base.multisample; - ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables = ctx->rasterizer->tiler_gl_enables; - - /* TODO: Sample size */ - SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_MSAA, msaa); - SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa); - } - - panfrost_batch_set_requirements(batch); - - if (ctx->occlusion_query) { - ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables |= MALI_OCCLUSION_QUERY; - ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.occlusion_counter = ctx->occlusion_query->bo->gpu; - } - - panfrost_patch_shader_state(ctx, PIPE_SHADER_VERTEX); - panfrost_emit_shader_meta(batch, PIPE_SHADER_VERTEX, - &ctx->payloads[PIPE_SHADER_VERTEX]); - panfrost_patch_shader_state(ctx, PIPE_SHADER_COMPUTE); - panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, - &ctx->payloads[PIPE_SHADER_COMPUTE]); - - if (ctx->shader[PIPE_SHADER_VERTEX] && ctx->shader[PIPE_SHADER_FRAGMENT]) { - /* Check if we need to link the gl_PointSize varying */ - if (!panfrost_writes_point_size(ctx)) { - /* If the size is constant, write it out. Otherwise, - * don't touch primitive_size (since we would clobber - * the pointer there) */ - - bool points = ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode == MALI_POINTS; - - ctx->payloads[PIPE_SHADER_FRAGMENT].primitive_size.constant = points ? - ctx->rasterizer->base.point_size : - ctx->rasterizer->base.line_width; - } - } - - if (ctx->shader[PIPE_SHADER_FRAGMENT]) { - struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT); - - panfrost_patch_shader_state(ctx, PIPE_SHADER_FRAGMENT); - -#define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name - - COPY(shader); - COPY(attribute_count); - COPY(varying_count); - COPY(texture_count); - COPY(sampler_count); - COPY(midgard1.uniform_count); - COPY(midgard1.uniform_buffer_count); - COPY(midgard1.work_count); - COPY(midgard1.flags_lo); - COPY(midgard1.flags_hi); - -#undef COPY - - /* Get blending setup */ - unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1); - - struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS]; - unsigned shader_offset = 0; - struct panfrost_bo *shader_bo = NULL; - - for (unsigned c = 0; c < rt_count; ++c) { - blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo, &shader_offset); - } - - /* If there is a blend shader, work registers are shared. XXX: opt */ - - for (unsigned c = 0; c < rt_count; ++c) { - if (blend[c].is_shader) - ctx->fragment_shader_core.midgard1.work_count = 16; - } - - /* Depending on whether it's legal to in the given shader, we - * try to enable early-z testing (or forward-pixel kill?) */ - - SET_BIT(ctx->fragment_shader_core.midgard1.flags_lo, MALI_EARLY_Z, - !variant->can_discard && !variant->writes_depth); - - /* Add the writes Z/S flags if needed. */ - SET_BIT(ctx->fragment_shader_core.midgard1.flags_lo, - MALI_WRITES_Z, variant->writes_depth); - SET_BIT(ctx->fragment_shader_core.midgard1.flags_hi, - MALI_WRITES_S, variant->writes_stencil); - - /* Any time texturing is used, derivatives are implicitly - * calculated, so we need to enable helper invocations */ - - SET_BIT(ctx->fragment_shader_core.midgard1.flags_lo, MALI_HELPER_INVOCATIONS, variant->helper_invocations); - - /* Assign the stencil refs late */ - - unsigned front_ref = ctx->stencil_ref.ref_value[0]; - unsigned back_ref = ctx->stencil_ref.ref_value[1]; - bool back_enab = ctx->depth_stencil->stencil[1].enabled; - - ctx->fragment_shader_core.stencil_front.ref = front_ref; - ctx->fragment_shader_core.stencil_back.ref = back_enab ? back_ref : front_ref; - - /* CAN_DISCARD should be set if the fragment shader possibly - * contains a 'discard' instruction. It is likely this is - * related to optimizations related to forward-pixel kill, as - * per "Mali Performance 3: Is EGL_BUFFER_PRESERVED a good - * thing?" by Peter Harris - */ - - SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, variant->can_discard); - SET_BIT(ctx->fragment_shader_core.midgard1.flags_lo, 0x400, variant->can_discard); - - /* Even on MFBD, the shader descriptor gets blend shaders. It's - * *also* copied to the blend_meta appended (by convention), - * but this is the field actually read by the hardware. (Or - * maybe both are read...?). Specify the last RTi with a blend - * shader. */ - - ctx->fragment_shader_core.blend.shader = 0; - - for (signed rt = (rt_count - 1); rt >= 0; --rt) { - if (blend[rt].is_shader) { - ctx->fragment_shader_core.blend.shader = - blend[rt].shader.gpu | blend[rt].shader.first_tag; - break; - } - } - - if (screen->quirks & MIDGARD_SFBD) { - /* When only a single render target platform is used, the blend - * information is inside the shader meta itself. We - * additionally need to signal CAN_DISCARD for nontrivial blend - * modes (so we're able to read back the destination buffer) */ - - SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_HAS_BLEND_SHADER, blend[0].is_shader); - - if (!blend[0].is_shader) { - ctx->fragment_shader_core.blend.equation = - *blend[0].equation.equation; - ctx->fragment_shader_core.blend.constant = - blend[0].equation.constant; - } - - SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_CAN_DISCARD, !blend[0].no_blending); - } - - size_t size = sizeof(struct mali_shader_meta) + (sizeof(struct midgard_blend_rt) * rt_count); - struct panfrost_transfer transfer = panfrost_allocate_transient(batch, size); - memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta)); - - ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.shader = transfer.gpu; - - if (!(screen->quirks & MIDGARD_SFBD)) { - /* Additional blend descriptor tacked on for jobs using MFBD */ - - struct midgard_blend_rt rts[4]; - - for (unsigned i = 0; i < rt_count; ++i) { - rts[i].flags = 0x200; - - bool is_srgb = - (ctx->pipe_framebuffer.nr_cbufs > i) && - (ctx->pipe_framebuffer.cbufs[i]) && - util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format); - - SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader); - SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending); - SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb); - SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither); - - if (blend[i].is_shader) { - rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag; - } else { - rts[i].blend.equation = *blend[i].equation.equation; - rts[i].blend.constant = blend[i].equation.constant; - } - } - - memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * rt_count); - } - } - - /* We stage to transient, so always dirty.. */ - if (ctx->vertex) - panfrost_stage_attributes(ctx); - - panfrost_upload_sampler_descriptors(ctx); - panfrost_upload_texture_descriptors(ctx); - - for (int i = 0; i < PIPE_SHADER_TYPES; ++i) - panfrost_emit_const_buf(batch, i, &ctx->payloads[i]); - - /* TODO: Upload the viewport somewhere more appropriate */ - - panfrost_emit_viewport(batch, &ctx->payloads[PIPE_SHADER_FRAGMENT]); -} - -/* Corresponds to exactly one draw, but does not submit anything */ - -static void -panfrost_queue_draw(struct panfrost_context *ctx) -{ - /* Handle dirty flags now */ - panfrost_emit_for_draw(ctx, true); - - /* If rasterizer discard is enable, only submit the vertex */ - - bool rasterizer_discard = ctx->rasterizer - && ctx->rasterizer->base.rasterizer_discard; - - - struct midgard_payload_vertex_tiler *vertex_payload = &ctx->payloads[PIPE_SHADER_VERTEX]; - struct midgard_payload_vertex_tiler *tiler_payload = &ctx->payloads[PIPE_SHADER_FRAGMENT]; - - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep; - - if (wallpapering) { - /* Inject in reverse order, with "predicted" job indices. THIS IS A HACK XXX */ - panfrost_new_job(batch, JOB_TYPE_TILER, false, batch->job_index + 2, tiler_payload, sizeof(*tiler_payload), true); - panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), true); - } else { - unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0, vertex_payload, sizeof(*vertex_payload), false); - - if (!rasterizer_discard) - panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tiler_payload, sizeof(*tiler_payload), false); - } - - panfrost_batch_adjust_stack_size(batch); + return vs->writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS; } /* The entire frame is in memory -- send it off to the kernel! */ @@ -785,45 +154,36 @@ panfrost_flush( unsigned flags) { struct panfrost_context *ctx = pan_context(pipe); - struct util_dynarray fences; + struct panfrost_device *dev = pan_device(pipe->screen); + uint32_t syncobj = 0; - /* We must collect the fences before the flush is done, otherwise we'll - * lose track of them. - */ - if (fence) { - util_dynarray_init(&fences, NULL); - hash_table_foreach(ctx->batches, hentry) { - struct panfrost_batch *batch = hentry->data; - - panfrost_batch_fence_reference(batch->out_sync); - util_dynarray_append(&fences, - struct panfrost_batch_fence *, - batch->out_sync); - } - } + if (fence) + drmSyncobjCreate(dev->fd, 0, &syncobj); /* Submit all pending jobs */ - panfrost_flush_all_batches(ctx, false); + panfrost_flush_all_batches(ctx, syncobj); if (fence) { - struct panfrost_fence *f = panfrost_fence_create(ctx, &fences); + struct panfrost_fence *f = panfrost_fence_create(ctx, syncobj); pipe->screen->fence_reference(pipe->screen, fence, NULL); *fence = (struct pipe_fence_handle *)f; - - util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence) - panfrost_batch_fence_unreference(*fence); - - util_dynarray_fini(&fences); } - if (pan_debug & PAN_DBG_TRACE) + if (dev->debug & PAN_DBG_TRACE) pandecode_next_frame(); } -#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_##c; +static void +panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags) +{ + struct panfrost_context *ctx = pan_context(pipe); + panfrost_flush_all_batches(ctx, 0); +} + +#define DEFINE_CASE(c) case PIPE_PRIM_##c: return MALI_DRAW_MODE_##c; static int -g2m_draw_mode(enum pipe_prim_type mode) +pan_draw_mode(enum pipe_prim_type mode) { switch (mode) { DEFINE_CASE(POINTS); @@ -844,78 +204,6 @@ g2m_draw_mode(enum pipe_prim_type mode) #undef DEFINE_CASE -static unsigned -panfrost_translate_index_size(unsigned size) -{ - switch (size) { - case 1: - return MALI_DRAW_INDEXED_UINT8; - - case 2: - return MALI_DRAW_INDEXED_UINT16; - - case 4: - return MALI_DRAW_INDEXED_UINT32; - - default: - unreachable("Invalid index size"); - } -} - -/* Gets a GPU address for the associated index buffer. Only gauranteed to be - * good for the duration of the draw (transient), could last longer. Also get - * the bounds on the index buffer for the range accessed by the draw. We do - * these operations together because there are natural optimizations which - * require them to be together. */ - -static mali_ptr -panfrost_get_index_buffer_bounded(struct panfrost_context *ctx, const struct pipe_draw_info *info, unsigned *min_index, unsigned *max_index) -{ - struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource); - - off_t offset = info->start * info->index_size; - struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); - mali_ptr out = 0; - - bool needs_indices = true; - - if (info->max_index != ~0u) { - *min_index = info->min_index; - *max_index = info->max_index; - needs_indices = false; - } - - if (!info->has_user_indices) { - /* Only resources can be directly mapped */ - panfrost_batch_add_bo(batch, rsrc->bo, - PAN_BO_ACCESS_SHARED | - PAN_BO_ACCESS_READ | - PAN_BO_ACCESS_VERTEX_TILER); - out = rsrc->bo->gpu + offset; - - /* Check the cache */ - needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache, info->start, info->count, - min_index, max_index); - } else { - /* Otherwise, we need to upload to transient memory */ - const uint8_t *ibuf8 = (const uint8_t *) info->index.user; - out = panfrost_upload_transient(batch, ibuf8 + offset, info->count * info->index_size); - } - - if (needs_indices) { - /* Fallback */ - u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index); - - if (!info->has_user_indices) { - panfrost_minmax_cache_add(rsrc->index_cache, info->start, info->count, - *min_index, *max_index); - } - } - - - return out; -} - static bool panfrost_scissor_culls_everything(struct panfrost_context *ctx) { @@ -923,7 +211,7 @@ panfrost_scissor_culls_everything(struct panfrost_context *ctx) /* Check if we're scissoring at all */ - if (!(ctx->rasterizer && ctx->rasterizer->base.scissor)) + if (!ctx->rasterizer->base.scissor) return false; return (ss->minx == ss->maxx) || (ss->miny == ss->maxy); @@ -949,12 +237,25 @@ panfrost_statistics_record( ctx->tf_prims_generated += prims; } +static void +panfrost_update_streamout_offsets(struct panfrost_context *ctx) +{ + for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) { + unsigned count; + + count = u_stream_outputs_for_vertices(ctx->active_prim, + ctx->vertex_count); + ctx->streamout.offsets[i] += count; + } +} + static void panfrost_draw_vbo( struct pipe_context *pipe, const struct pipe_draw_info *info) { struct panfrost_context *ctx = pan_context(pipe); + struct panfrost_device *device = pan_device(ctx->base.screen); /* First of all, check the scissor to see if anything is drawn at all. * If it's not, we drop the draw (mostly a conformance issue; @@ -979,92 +280,94 @@ panfrost_draw_vbo( assert(ctx->rasterizer != NULL); if (!(ctx->draw_modes & (1 << mode))) { - if (mode == PIPE_PRIM_QUADS && info->count == 4 && !ctx->rasterizer->base.flatshade) { - mode = PIPE_PRIM_TRIANGLE_FAN; - } else { - if (info->count < 4) { - /* Degenerate case? */ - return; - } - - util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); - util_primconvert_draw_vbo(ctx->primconvert, info); + if (info->count < 4) { + /* Degenerate case? */ return; } - } - ctx->payloads[PIPE_SHADER_VERTEX].offset_start = info->start; - ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = info->start; - - /* Now that we have a guaranteed terminating path, find the job. - * Assignment commented out to prevent unused warning */ + util_primconvert_save_rasterizer_state(ctx->primconvert, &ctx->rasterizer->base); + util_primconvert_draw_vbo(ctx->primconvert, info); + return; + } - /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx); + /* Now that we have a guaranteed terminating path, find the job. */ - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode); + struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx); + panfrost_batch_set_requirements(batch); /* Take into account a negative bias */ ctx->vertex_count = info->count + abs(info->index_bias); ctx->instance_count = info->instance_count; ctx->active_prim = info->mode; - /* For non-indexed draws, they're the same */ - unsigned vertex_count = ctx->vertex_count; + struct mali_vertex_tiler_prefix vertex_prefix = { 0 }, tiler_prefix = { 0 }; + struct mali_vertex_tiler_postfix vertex_postfix = { 0 }, tiler_postfix = { 0 }; + union midgard_primitive_size primitive_size; + unsigned vertex_count; - unsigned draw_flags = 0; + if (device->quirks & IS_BIFROST) { + vertex_postfix.gl_enables = 0x2; + tiler_postfix.gl_enables = 0x3; + vertex_postfix.shared_memory = panfrost_vt_emit_shared_memory(batch); + } else { + vertex_postfix.gl_enables = 0x6; + tiler_postfix.gl_enables = 0x7; + vertex_postfix.shared_memory = panfrost_batch_reserve_framebuffer(batch); + } - /* The draw flags interpret how primitive size is interpreted */ + tiler_postfix.shared_memory = vertex_postfix.shared_memory; - if (panfrost_writes_point_size(ctx)) - draw_flags |= MALI_DRAW_VARYING_SIZE; + if (ctx->occlusion_query) { + tiler_postfix.gl_enables |= MALI_OCCLUSION_QUERY; + tiler_postfix.occlusion_counter = ctx->occlusion_query->bo->gpu; + panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo, + PAN_BO_ACCESS_SHARED | + PAN_BO_ACCESS_RW | + PAN_BO_ACCESS_FRAGMENT); + } - if (info->primitive_restart) - draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX; + struct pipe_rasterizer_state *rast = &ctx->rasterizer->base; + SET_BIT(tiler_postfix.gl_enables, MALI_FRONT_CCW_TOP, + rast->front_ccw); + SET_BIT(tiler_postfix.gl_enables, MALI_CULL_FACE_FRONT, + (rast->cull_face & PIPE_FACE_FRONT)); + SET_BIT(tiler_postfix.gl_enables, MALI_CULL_FACE_BACK, + (rast->cull_face & PIPE_FACE_BACK)); + SET_BIT(tiler_prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST, + rast->flatshade_first); - /* These doesn't make much sense */ + tiler_prefix.draw_mode = pan_draw_mode(mode); - draw_flags |= 0x3000; + unsigned draw_flags = 0x3000; - if (ctx->rasterizer && ctx->rasterizer->base.flatshade_first) - draw_flags |= MALI_DRAW_FLATSHADE_FIRST; + if (panfrost_writes_point_size(ctx)) + draw_flags |= MALI_DRAW_VARYING_SIZE; - panfrost_statistics_record(ctx, info); + if (info->primitive_restart) + draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX; if (info->index_size) { unsigned min_index = 0, max_index = 0; - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = - panfrost_get_index_buffer_bounded(ctx, info, &min_index, &max_index); + + tiler_prefix.indices = panfrost_get_index_buffer_bounded(ctx, + info, + &min_index, + &max_index); /* Use the corresponding values */ vertex_count = max_index - min_index + 1; - ctx->payloads[PIPE_SHADER_VERTEX].offset_start = min_index + info->index_bias; - ctx->payloads[PIPE_SHADER_FRAGMENT].offset_start = min_index + info->index_bias; - - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = -min_index; - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(info->count); - + tiler_postfix.offset_start = vertex_postfix.offset_start = min_index + info->index_bias; + tiler_prefix.offset_bias_correction = -min_index; + tiler_prefix.index_count = MALI_POSITIVE(info->count); draw_flags |= panfrost_translate_index_size(info->index_size); } else { - /* Index count == vertex count, if no indexing is applied, as - * if it is internally indexed in the expected order */ - - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.offset_bias_correction = 0; - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.index_count = MALI_POSITIVE(ctx->vertex_count); - - /* Reverse index state */ - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.indices = (mali_ptr) 0; + vertex_count = ctx->vertex_count; + tiler_postfix.offset_start = vertex_postfix.offset_start = info->start; + tiler_prefix.index_count = MALI_POSITIVE(ctx->vertex_count); } - /* Dispatch "compute jobs" for the vertex/tiler pair as (1, - * vertex_count, 1) */ - - panfrost_pack_work_groups_fused( - &ctx->payloads[PIPE_SHADER_VERTEX].prefix, - &ctx->payloads[PIPE_SHADER_FRAGMENT].prefix, - 1, vertex_count, info->instance_count, - 1, 1, 1); - - ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.unknown_draw = draw_flags; + tiler_prefix.unknown_draw = draw_flags; + ctx->offset_start = vertex_postfix.offset_start; /* Encode the padded vertex count */ @@ -1074,32 +377,60 @@ panfrost_draw_vbo( unsigned shift = __builtin_ctz(ctx->padded_count); unsigned k = ctx->padded_count >> (shift + 1); - ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = shift; - ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = shift; - - ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = k; - ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = k; + tiler_postfix.instance_shift = vertex_postfix.instance_shift = shift; + tiler_postfix.instance_odd = vertex_postfix.instance_odd = k; } else { ctx->padded_count = vertex_count; - - /* Reset instancing state */ - ctx->payloads[PIPE_SHADER_VERTEX].instance_shift = 0; - ctx->payloads[PIPE_SHADER_VERTEX].instance_odd = 0; - ctx->payloads[PIPE_SHADER_FRAGMENT].instance_shift = 0; - ctx->payloads[PIPE_SHADER_FRAGMENT].instance_odd = 0; } - /* Fire off the draw itself */ - panfrost_queue_draw(ctx); + panfrost_statistics_record(ctx, info); - /* Increment transform feedback offsets */ + panfrost_pack_work_groups_fused(&vertex_prefix, &tiler_prefix, + 1, vertex_count, info->instance_count, + 1, 1, 1); + + /* Emit all sort of descriptors. */ + mali_ptr push_vert = 0, push_frag = 0, attribs = 0; + mali_ptr varyings = 0, vs_vary = 0, fs_vary = 0, pos = 0, psiz = 0; + + vertex_postfix.attribute_meta = panfrost_emit_vertex_data(batch, &attribs); + vertex_postfix.attributes = attribs; + panfrost_emit_varying_descriptor(batch, + ctx->padded_count * + ctx->instance_count, + &vs_vary, &fs_vary, &varyings, + &pos, &psiz); + vertex_postfix.varyings = varyings; + tiler_postfix.varyings = varyings; + vertex_postfix.varying_meta = vs_vary; + tiler_postfix.varying_meta = fs_vary; + tiler_postfix.position_varying = pos; + vertex_postfix.sampler_descriptor = panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_VERTEX); + tiler_postfix.sampler_descriptor = panfrost_emit_sampler_descriptors(batch, PIPE_SHADER_FRAGMENT); + vertex_postfix.textures = panfrost_emit_texture_descriptors(batch, PIPE_SHADER_VERTEX); + tiler_postfix.textures = panfrost_emit_texture_descriptors(batch, PIPE_SHADER_FRAGMENT); + vertex_postfix.uniform_buffers = panfrost_emit_const_buf(batch, PIPE_SHADER_VERTEX, &push_vert); + tiler_postfix.uniform_buffers = panfrost_emit_const_buf(batch, PIPE_SHADER_FRAGMENT, &push_frag); + vertex_postfix.uniforms = push_vert; + tiler_postfix.uniforms = push_frag; + tiler_postfix.viewport = panfrost_emit_viewport(batch); + + vertex_postfix.shader = panfrost_emit_compute_shader_meta(batch, PIPE_SHADER_VERTEX); + tiler_postfix.shader = panfrost_emit_frag_shader_meta(batch); + + primitive_size.pointer = psiz; + panfrost_vt_update_primitive_size(ctx, &tiler_prefix, &primitive_size); - for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) { - unsigned output_count = u_stream_outputs_for_vertices( - ctx->active_prim, ctx->vertex_count); + /* Fire off the draw itself */ + panfrost_emit_vertex_tiler_jobs(batch, &vertex_prefix, &vertex_postfix, + &tiler_prefix, &tiler_postfix, + &primitive_size); - ctx->streamout.offsets[i] += output_count; - } + /* Adjust the batch stack size based on the new shader stack sizes. */ + panfrost_batch_adjust_stack_size(batch); + + /* Increment transform feedback offsets */ + panfrost_update_streamout_offsets(ctx); } /* CSO state */ @@ -1119,17 +450,8 @@ panfrost_create_rasterizer_state( so->base = *cso; - /* Bitmask, unknown meaning of the start value. 0x105 on 32-bit T6XX */ - so->tiler_gl_enables = 0x7; - - if (cso->front_ccw) - so->tiler_gl_enables |= MALI_FRONT_CCW_TOP; - - if (cso->cull_face & PIPE_FACE_FRONT) - so->tiler_gl_enables |= MALI_CULL_FACE_FRONT; - - if (cso->cull_face & PIPE_FACE_BACK) - so->tiler_gl_enables |= MALI_CULL_FACE_BACK; + /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ + assert(cso->offset_clamp == 0.0); return so; } @@ -1146,17 +468,6 @@ panfrost_bind_rasterizer_state( if (!hwcso) return; - ctx->fragment_shader_core.depth_units = ctx->rasterizer->base.offset_units * 2.0f; - ctx->fragment_shader_core.depth_factor = ctx->rasterizer->base.offset_scale; - - /* Gauranteed with the core GL call, so don't expose ARB_polygon_offset */ - assert(ctx->rasterizer->base.offset_clamp == 0.0); - - /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */ - - SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_A, ctx->rasterizer->base.offset_tri); - SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_DEPTH_RANGE_B, ctx->rasterizer->base.offset_tri); - /* Point sprites are emulated */ struct panfrost_shader_state *variant = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT); @@ -1172,23 +483,35 @@ panfrost_create_vertex_elements_state( const struct pipe_vertex_element *elements) { struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state); + struct panfrost_device *dev = pan_device(pctx->screen); so->num_elements = num_elements; memcpy(so->pipe, elements, sizeof(*elements) * num_elements); for (int i = 0; i < num_elements; ++i) { - so->hw[i].index = i; - enum pipe_format fmt = elements[i].src_format; const struct util_format_description *desc = util_format_description(fmt); - so->hw[i].unknown1 = 0x2; - so->hw[i].swizzle = panfrost_get_default_swizzle(desc->nr_channels); + unsigned swizzle = 0; + if (dev->quirks & HAS_SWIZZLES) + swizzle = panfrost_translate_swizzle_4(desc->swizzle); + else + swizzle = panfrost_bifrost_swizzle(desc->nr_channels); + + enum mali_format hw_format = panfrost_pipe_format_table[desc->format].hw; + so->formats[i] = (hw_format << 12) | swizzle; + assert(hw_format); + } - so->hw[i].format = panfrost_find_format(desc); + /* Let's also prepare vertex builtins */ + if (dev->quirks & HAS_SWIZZLES) + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); + else + so->formats[PAN_VERTEX_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); - /* The field itself should probably be shifted over */ - so->hw[i].src_offset = elements[i].src_offset; - } + if (dev->quirks & HAS_SWIZZLES) + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_get_default_swizzle(1); + else + so->formats[PAN_INSTANCE_ID] = (MALI_R32UI << 12) | panfrost_bifrost_swizzle(1); return so; } @@ -1209,6 +532,7 @@ panfrost_create_shader_state( enum pipe_shader_type stage) { struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants); + struct panfrost_device *dev = pan_device(pctx->screen); so->base = *cso; /* Token deep copy to prevent memory corruption */ @@ -1217,18 +541,16 @@ panfrost_create_shader_state( so->base.tokens = tgsi_dup_tokens(so->base.tokens); /* Precompile for shader-db if we need to */ - if (unlikely((pan_debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { + if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) { struct panfrost_context *ctx = pan_context(pctx); - struct mali_shader_meta meta; - struct panfrost_shader_state state; + struct panfrost_shader_state state = { 0 }; uint64_t outputs_written; - panfrost_shader_compile(ctx, &meta, - PIPE_SHADER_IR_NIR, - so->base.ir.nir, - tgsi_processor_to_shader_stage(stage), &state, - &outputs_written); + panfrost_shader_compile(ctx, PIPE_SHADER_IR_NIR, + so->base.ir.nir, + tgsi_processor_to_shader_stage(stage), + &state, &outputs_written); } return so; @@ -1242,16 +564,21 @@ panfrost_delete_shader_state( struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so; if (cso->base.type == PIPE_SHADER_IR_TGSI) { - DBG("Deleting TGSI shader leaks duplicated tokens\n"); + /* TODO: leaks TGSI tokens! */ } for (unsigned i = 0; i < cso->variant_count; ++i) { struct panfrost_shader_state *shader_state = &cso->variants[i]; panfrost_bo_unreference(shader_state->bo); + + if (shader_state->upload.rsrc) + pipe_resource_reference(&shader_state->upload.rsrc, NULL); + shader_state->bo = NULL; } free(cso->variants); + free(so); } @@ -1261,58 +588,14 @@ panfrost_create_sampler_state( const struct pipe_sampler_state *cso) { struct panfrost_sampler_state *so = CALLOC_STRUCT(panfrost_sampler_state); - so->base = *cso; - - /* sampler_state corresponds to mali_sampler_descriptor, which we can generate entirely here */ - - bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST; - bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST; - bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR; - - unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0; - unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0; - unsigned mip_filter = mip_linear ? - (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0; - unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0; - - struct mali_sampler_descriptor sampler_descriptor = { - .filter_mode = min_filter | mag_filter | mip_filter | normalized, - .wrap_s = translate_tex_wrap(cso->wrap_s), - .wrap_t = translate_tex_wrap(cso->wrap_t), - .wrap_r = translate_tex_wrap(cso->wrap_r), - .compare_func = panfrost_flip_compare_func( - panfrost_translate_compare_func( - cso->compare_func)), - .border_color = { - cso->border_color.f[0], - cso->border_color.f[1], - cso->border_color.f[2], - cso->border_color.f[3] - }, - .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */ - .max_lod = FIXED_16(cso->max_lod, false), - .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */ - .seamless_cube_map = cso->seamless_cube_map, - }; - - /* If necessary, we disable mipmapping in the sampler descriptor by - * clamping the LOD as tight as possible (from 0 to epsilon, - * essentially -- remember these are fixed point numbers, so - * epsilon=1/256) */ + struct panfrost_device *device = pan_device(pctx->screen); - if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) { - sampler_descriptor.max_lod = sampler_descriptor.min_lod; - - /* Enforce that there is something in the middle by adding epsilon*/ - - if (sampler_descriptor.min_lod == sampler_descriptor.max_lod) - sampler_descriptor.max_lod++; - - /* Sanity check */ - assert(sampler_descriptor.max_lod > sampler_descriptor.min_lod); - } + so->base = *cso; - so->hw = sampler_descriptor; + if (device->quirks & IS_BIFROST) + panfrost_sampler_desc_init_bifrost(cso, (struct mali_bifrost_sampler_packed *) &so->hw); + else + panfrost_sampler_desc_init(cso, &so->hw); return so; } @@ -1339,28 +622,36 @@ panfrost_variant_matches( struct panfrost_shader_state *variant, enum pipe_shader_type type) { + struct panfrost_device *dev = pan_device(ctx->base.screen); struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base; - struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha; bool is_fragment = (type == PIPE_SHADER_FRAGMENT); - if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) { - /* Make sure enable state is at least the same */ - if (alpha->enabled != variant->alpha_state.enabled) { - return false; - } + if (variant->outputs_read) { + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; - /* Check that the contents of the test are the same */ - bool same_func = alpha->func == variant->alpha_state.func; - bool same_ref = alpha->ref_value == variant->alpha_state.ref_value; + unsigned i; + BITSET_FOREACH_SET(i, &variant->outputs_read, 8) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; - if (!(same_func && same_ref)) { - return false; + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; + + const struct util_format_description *desc = + util_format_description(fmt); + + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + if (variant->rt_formats[i] != fmt) + return false; } } + /* Point sprites TODO on bifrost, always pass */ if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable | - variant->point_sprite_mask)) { + variant->point_sprite_mask) + && !(dev->quirks & IS_BIFROST)) { /* Ensure the same varyings are turned to point sprites */ if (rasterizer->sprite_coord_enable != variant->point_sprite_mask) return false; @@ -1424,6 +715,7 @@ panfrost_bind_shader_state( enum pipe_shader_type type) { struct panfrost_context *ctx = pan_context(pctx); + struct panfrost_device *dev = pan_device(ctx->base.screen); ctx->shader[type] = hwcso; if (!hwcso) return; @@ -1467,18 +759,30 @@ panfrost_bind_shader_state( &variants->variants[variant]; if (type == PIPE_SHADER_FRAGMENT) { - v->alpha_state = ctx->depth_stencil->alpha; + struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer; + for (unsigned i = 0; i < fb->nr_cbufs; ++i) { + enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM; + + if ((fb->nr_cbufs > i) && fb->cbufs[i]) + fmt = fb->cbufs[i]->format; - if (ctx->rasterizer) { + const struct util_format_description *desc = + util_format_description(fmt); + + if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE) + fmt = PIPE_FORMAT_NONE; + + v->rt_formats[i] = fmt; + } + + /* Point sprites are TODO on Bifrost */ + if (ctx->rasterizer && !(dev->quirks & IS_BIFROST)) { v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable; v->point_sprite_upper_left = ctx->rasterizer->base.sprite_coord_mode == PIPE_SPRITE_COORD_UPPER_LEFT; } } - - variants->variants[variant].tripipe = calloc(1, sizeof(struct mali_shader_meta)); - } /* Select this variant */ @@ -1492,12 +796,12 @@ panfrost_bind_shader_state( if (!shader_state->compiled) { uint64_t outputs_written = 0; - panfrost_shader_compile(ctx, shader_state->tripipe, - variants->base.type, - variants->base.type == PIPE_SHADER_IR_NIR ? - variants->base.ir.nir : - variants->base.tokens, - tgsi_processor_to_shader_stage(type), shader_state, + panfrost_shader_compile(ctx, variants->base.type, + variants->base.type == PIPE_SHADER_IR_NIR ? + variants->base.ir.nir : + variants->base.tokens, + tgsi_processor_to_shader_stage(type), + shader_state, &outputs_written); shader_state->compiled = true; @@ -1579,29 +883,129 @@ panfrost_set_stencil_ref( ctx->stencil_ref = *ref; } -static enum mali_texture_type -panfrost_translate_texture_type(enum pipe_texture_target t) { - switch (t) - { - case PIPE_BUFFER: - case PIPE_TEXTURE_1D: - case PIPE_TEXTURE_1D_ARRAY: - return MALI_TEX_1D; +void +panfrost_create_sampler_view_bo(struct panfrost_sampler_view *so, + struct pipe_context *pctx, + struct pipe_resource *texture) +{ + struct panfrost_device *device = pan_device(pctx->screen); + struct panfrost_resource *prsrc = (struct panfrost_resource *)texture; + enum pipe_format format = so->base.format; + assert(prsrc->bo); - case PIPE_TEXTURE_2D: - case PIPE_TEXTURE_2D_ARRAY: - case PIPE_TEXTURE_RECT: - return MALI_TEX_2D; + /* Format to access the stencil portion of a Z32_S8 texture */ + if (format == PIPE_FORMAT_X32_S8X24_UINT) { + assert(prsrc->separate_stencil); + texture = &prsrc->separate_stencil->base; + prsrc = (struct panfrost_resource *)texture; + format = texture->format; + } - case PIPE_TEXTURE_3D: - return MALI_TEX_3D; + const struct util_format_description *desc = util_format_description(format); - case PIPE_TEXTURE_CUBE: - case PIPE_TEXTURE_CUBE_ARRAY: - return MALI_TEX_CUBE; + bool fake_rgtc = !panfrost_supports_compressed_format(device, MALI_BC4_UNORM); - default: - unreachable("Unknown target"); + if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC && fake_rgtc) { + if (desc->is_snorm) + format = PIPE_FORMAT_R8G8B8A8_SNORM; + else + format = PIPE_FORMAT_R8G8B8A8_UNORM; + desc = util_format_description(format); + } + + so->texture_bo = prsrc->bo->gpu; + so->modifier = prsrc->modifier; + + unsigned char user_swizzle[4] = { + so->base.swizzle_r, + so->base.swizzle_g, + so->base.swizzle_b, + so->base.swizzle_a + }; + + /* In the hardware, array_size refers specifically to array textures, + * whereas in Gallium, it also covers cubemaps */ + + unsigned array_size = texture->array_size; + unsigned depth = texture->depth0; + + if (so->base.target == PIPE_TEXTURE_CUBE) { + /* TODO: Cubemap arrays */ + assert(array_size == 6); + array_size /= 6; + } + + /* MSAA only supported for 2D textures (and 2D texture arrays via an + * extension currently unimplemented */ + + if (so->base.target == PIPE_TEXTURE_2D) { + assert(depth == 1); + depth = texture->nr_samples; + } else { + /* MSAA only supported for 2D textures */ + assert(texture->nr_samples <= 1); + } + + enum mali_texture_dimension type = + panfrost_translate_texture_dimension(so->base.target); + + if (device->quirks & IS_BIFROST) { + unsigned char composed_swizzle[4]; + util_format_compose_swizzles(desc->swizzle, user_swizzle, composed_swizzle); + + unsigned size = panfrost_estimate_texture_payload_size( + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); + + so->bo = panfrost_bo_create(device, size, 0); + + panfrost_new_texture_bifrost( + &so->bifrost_descriptor, + texture->width0, texture->height0, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + prsrc->cubemap_stride, + panfrost_translate_swizzle_4(composed_swizzle), + prsrc->bo->gpu, + prsrc->slices, + so->bo); + } else { + unsigned size = panfrost_estimate_texture_payload_size( + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + type, prsrc->modifier); + size += MALI_MIDGARD_TEXTURE_LENGTH; + + so->bo = panfrost_bo_create(device, size, 0); + + panfrost_new_texture( + so->bo->cpu, + texture->width0, texture->height0, + depth, array_size, + format, + type, prsrc->modifier, + so->base.u.tex.first_level, + so->base.u.tex.last_level, + so->base.u.tex.first_layer, + so->base.u.tex.last_layer, + texture->nr_samples, + prsrc->cubemap_stride, + panfrost_translate_swizzle_4(user_swizzle), + prsrc->bo->gpu, + prsrc->slices); } } @@ -1611,63 +1015,16 @@ panfrost_create_sampler_view( struct pipe_resource *texture, const struct pipe_sampler_view *template) { - struct panfrost_screen *screen = pan_screen(pctx->screen); struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); pipe_reference(NULL, &texture->reference); - struct panfrost_resource *prsrc = (struct panfrost_resource *) texture; - assert(prsrc->bo); - so->base = *template; so->base.texture = texture; so->base.reference.count = 1; so->base.context = pctx; - unsigned char user_swizzle[4] = { - template->swizzle_r, - template->swizzle_g, - template->swizzle_b, - template->swizzle_a - }; - - /* In the hardware, array_size refers specifically to array textures, - * whereas in Gallium, it also covers cubemaps */ - - unsigned array_size = texture->array_size; - - if (template->target == PIPE_TEXTURE_CUBE) { - /* TODO: Cubemap arrays */ - assert(array_size == 6); - array_size /= 6; - } - - enum mali_texture_type type = - panfrost_translate_texture_type(template->target); - - unsigned size = panfrost_estimate_texture_size( - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - type, prsrc->layout); - - so->bo = panfrost_bo_create(screen, size, 0); - - panfrost_new_texture( - so->bo->cpu, - texture->width0, texture->height0, - texture->depth0, array_size, - template->format, - type, prsrc->layout, - template->u.tex.first_level, - template->u.tex.last_level, - template->u.tex.first_layer, - template->u.tex.last_layer, - prsrc->cubemap_stride, - panfrost_translate_swizzle_4(user_swizzle), - prsrc->bo->gpu, - prsrc->slices); + panfrost_create_sampler_view_bo(so, pctx, texture); return (struct pipe_sampler_view *) so; } @@ -1725,94 +1082,85 @@ panfrost_set_shader_buffers( buffers, start, count); } -/* Hints that a framebuffer should use AFBC where possible */ - -static void -panfrost_hint_afbc( - struct panfrost_screen *screen, - const struct pipe_framebuffer_state *fb) -{ - /* AFBC implemenation incomplete; hide it */ - if (!(pan_debug & PAN_DBG_AFBC)) return; - - /* Hint AFBC to the resources bound to each color buffer */ - - for (unsigned i = 0; i < fb->nr_cbufs; ++i) { - struct pipe_surface *surf = fb->cbufs[i]; - struct panfrost_resource *rsrc = pan_resource(surf->texture); - panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1); - } - - /* Also hint it to the depth buffer */ - - if (fb->zsbuf) { - struct panfrost_resource *rsrc = pan_resource(fb->zsbuf->texture); - panfrost_resource_hint_layout(screen, rsrc, MALI_TEXTURE_AFBC, 1); - } -} - static void panfrost_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *fb) { struct panfrost_context *ctx = pan_context(pctx); - panfrost_hint_afbc(pan_screen(pctx->screen), fb); util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb); ctx->batch = NULL; - panfrost_invalidate_frame(ctx); + + /* We may need to generate a new variant if the fragment shader is + * keyed to the framebuffer format (due to EXT_framebuffer_fetch) */ + struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT]; + + if (fs && fs->variant_count && fs->variants[fs->active_variant].outputs_read) + ctx->base.bind_fs_state(&ctx->base, fs); } -static void * -panfrost_create_depth_stencil_state(struct pipe_context *pipe, - const struct pipe_depth_stencil_alpha_state *depth_stencil) +static inline unsigned +pan_pipe_to_stencil_op(enum pipe_stencil_op in) { - return mem_dup(depth_stencil, sizeof(*depth_stencil)); + switch (in) { + case PIPE_STENCIL_OP_KEEP: return MALI_STENCIL_OP_KEEP; + case PIPE_STENCIL_OP_ZERO: return MALI_STENCIL_OP_ZERO; + case PIPE_STENCIL_OP_REPLACE: return MALI_STENCIL_OP_REPLACE; + case PIPE_STENCIL_OP_INCR: return MALI_STENCIL_OP_INCR_SAT; + case PIPE_STENCIL_OP_DECR: return MALI_STENCIL_OP_DECR_SAT; + case PIPE_STENCIL_OP_INCR_WRAP: return MALI_STENCIL_OP_INCR_WRAP; + case PIPE_STENCIL_OP_DECR_WRAP: return MALI_STENCIL_OP_DECR_WRAP; + case PIPE_STENCIL_OP_INVERT: return MALI_STENCIL_OP_INVERT; + default: unreachable("Invalid stencil op"); + } } -static void -panfrost_bind_depth_stencil_state(struct pipe_context *pipe, - void *cso) +static inline void +pan_pipe_to_stencil(const struct pipe_stencil_state *in, void *out) { - struct panfrost_context *ctx = pan_context(pipe); - struct pipe_depth_stencil_alpha_state *depth_stencil = cso; - ctx->depth_stencil = depth_stencil; - - if (!depth_stencil) - return; - - /* Alpha does not exist in the hardware (it's not in ES3), so it's - * emulated in the fragment shader */ - - if (depth_stencil->alpha.enabled) { - /* We need to trigger a new shader (maybe) */ - ctx->base.bind_fs_state(&ctx->base, ctx->shader[PIPE_SHADER_FRAGMENT]); + pan_pack(out, STENCIL, cfg) { + cfg.mask = in->valuemask; + cfg.compare_function = panfrost_translate_compare_func(in->func); + cfg.stencil_fail = pan_pipe_to_stencil_op(in->fail_op); + cfg.depth_fail = pan_pipe_to_stencil_op(in->zfail_op); + cfg.depth_pass = pan_pipe_to_stencil_op(in->zpass_op); } +} - /* Stencil state */ - SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_STENCIL_TEST, depth_stencil->stencil[0].enabled); - - panfrost_make_stencil_state(&depth_stencil->stencil[0], &ctx->fragment_shader_core.stencil_front); - ctx->fragment_shader_core.stencil_mask_front = depth_stencil->stencil[0].writemask; +static void * +panfrost_create_depth_stencil_state(struct pipe_context *pipe, + const struct pipe_depth_stencil_alpha_state *zsa) +{ + struct panfrost_zsa_state *so = CALLOC_STRUCT(panfrost_zsa_state); + so->base = *zsa; - /* If back-stencil is not enabled, use the front values */ - bool back_enab = ctx->depth_stencil->stencil[1].enabled; - unsigned back_index = back_enab ? 1 : 0; + pan_pipe_to_stencil(&zsa->stencil[0], &so->stencil_front); + so->stencil_mask_front = zsa->stencil[0].writemask; - panfrost_make_stencil_state(&depth_stencil->stencil[back_index], &ctx->fragment_shader_core.stencil_back); - ctx->fragment_shader_core.stencil_mask_back = depth_stencil->stencil[back_index].writemask; + if (zsa->stencil[1].enabled) { + pan_pipe_to_stencil(&zsa->stencil[1], &so->stencil_back); + so->stencil_mask_back = zsa->stencil[1].writemask; + } else { + so->stencil_back = so->stencil_front; + so->stencil_mask_back = so->stencil_mask_front; + } - /* Depth state (TODO: Refactor) */ - SET_BIT(ctx->fragment_shader_core.unknown2_3, MALI_DEPTH_WRITEMASK, - depth_stencil->depth.writemask); + /* Alpha lowered by frontend */ + assert(!zsa->alpha.enabled); - int func = depth_stencil->depth.enabled ? depth_stencil->depth.func : PIPE_FUNC_ALWAYS; + /* TODO: Bounds test should be easy */ + assert(!zsa->depth.bounds_test); - ctx->fragment_shader_core.unknown2_3 &= ~MALI_DEPTH_FUNC_MASK; - ctx->fragment_shader_core.unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(func)); + return so; +} - /* Bounds test not implemented */ - assert(!depth_stencil->depth.bounds_test); +static void +panfrost_bind_depth_stencil_state(struct pipe_context *pipe, + void *cso) +{ + struct panfrost_context *ctx = pan_context(pipe); + struct panfrost_zsa_state *zsa = cso; + ctx->depth_stencil = zsa; } static void @@ -1825,8 +1173,19 @@ static void panfrost_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { + struct panfrost_context *ctx = pan_context(pipe); + ctx->sample_mask = sample_mask; +} + +static void +panfrost_set_min_samples(struct pipe_context *pipe, + unsigned min_samples) +{ + struct panfrost_context *ctx = pan_context(pipe); + ctx->min_samples = min_samples; } + static void panfrost_set_clip_state(struct pipe_context *pipe, const struct pipe_clip_state *clip) @@ -1890,6 +1249,7 @@ panfrost_destroy(struct pipe_context *pipe) util_unreference_framebuffer_state(&panfrost->pipe_framebuffer); u_upload_destroy(pipe->stream_uploader); + u_upload_destroy(panfrost->state_uploader); ralloc_free(pipe); } @@ -1933,7 +1293,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) /* Allocate a bo for the query results to be stored */ if (!query->bo) { query->bo = panfrost_bo_create( - pan_screen(ctx->base.screen), + pan_device(ctx->base.screen), sizeof(unsigned), 0); } @@ -1953,7 +1313,7 @@ panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q) break; default: - DBG("Skipping query %u\n", query->type); + /* TODO: timestamp queries, etc? */ break; } @@ -1997,8 +1357,8 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: - /* Flush first */ - panfrost_flush_all_batches(ctx, true); + panfrost_flush_batches_accessing_bo(ctx, query->bo, false); + panfrost_bo_wait(query->bo, INT64_MAX, false); /* Read back the query results */ unsigned *result = (unsigned *) query->bo->cpu; @@ -2014,12 +1374,12 @@ panfrost_get_query_result(struct pipe_context *pipe, case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_PRIMITIVES_EMITTED: - panfrost_flush_all_batches(ctx, true); + panfrost_flush_all_batches(ctx, 0); vresult->u64 = query->end - query->start; break; default: - DBG("Skipped query get %u\n", query->type); + /* TODO: more queries */ break; } @@ -2086,6 +1446,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) { struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context); struct pipe_context *gallium = (struct pipe_context *) ctx; + struct panfrost_device *dev = pan_device(screen); gallium->screen = screen; @@ -2096,6 +1457,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->flush = panfrost_flush; gallium->clear = panfrost_clear; gallium->draw_vbo = panfrost_draw_vbo; + gallium->texture_barrier = panfrost_texture_barrier; gallium->set_vertex_buffers = panfrost_set_vertex_buffers; gallium->set_constant_buffer = panfrost_set_constant_buffer; @@ -2132,6 +1494,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state; gallium->set_sample_mask = panfrost_set_sample_mask; + gallium->set_min_samples = panfrost_set_min_samples; gallium->set_clip_state = panfrost_set_clip_state; gallium->set_viewport_states = panfrost_set_viewport_states; @@ -2153,13 +1516,21 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) panfrost_blend_context_init(gallium); panfrost_compute_context_init(gallium); - /* XXX: leaks */ gallium->stream_uploader = u_upload_create_default(gallium); gallium->const_uploader = gallium->stream_uploader; - assert(gallium->stream_uploader); - /* Midgard supports ES modes, plus QUADS/QUAD_STRIPS/POLYGON */ - ctx->draw_modes = (1 << (PIPE_PRIM_POLYGON + 1)) - 1; + ctx->state_uploader = u_upload_create(gallium, 4096, + PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_DYNAMIC, 0); + + /* All of our GPUs support ES mode. Midgard supports additionally + * QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */ + + ctx->draw_modes = (1 << (PIPE_PRIM_QUADS + 1)) - 1; + + if (!(dev->quirks & IS_BIFROST)) { + ctx->draw_modes |= (1 << PIPE_PRIM_QUAD_STRIP); + ctx->draw_modes |= (1 << PIPE_PRIM_POLYGON); + } ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes); @@ -2172,9 +1543,15 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) /* Prepare for render! */ panfrost_batch_init(ctx); - panfrost_emit_vertex_payload(ctx); - panfrost_invalidate_frame(ctx); - panfrost_default_shader_backend(ctx); + + if (!(dev->quirks & IS_BIFROST)) { + for (unsigned c = 0; c < PIPE_MAX_COLOR_BUFS; ++c) + ctx->blit_blend.rt[c].shaders = _mesa_hash_table_u64_create(ctx); + } + + /* By default mask everything on */ + ctx->sample_mask = ~0; + ctx->active_queries = true; return gallium; }