X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fpan_context.c;h=de6dd38c5566b8d77ed38bc22e952f0dcb172e13;hb=b670becb1ea00a8aafd30b56521e9e93b10c0e85;hp=cacad77d162e5144e597319233bb15e58430e823;hpb=8c88bd0253063fc51ea15e59a09dd3c238c34463;p=mesa.git diff --git a/src/gallium/drivers/panfrost/pan_context.c b/src/gallium/drivers/panfrost/pan_context.c index cacad77d162..de6dd38c556 100644 --- a/src/gallium/drivers/panfrost/pan_context.c +++ b/src/gallium/drivers/panfrost/pan_context.c @@ -26,7 +26,6 @@ #include #include "pan_context.h" -#include "pan_swizzle.h" #include "pan_format.h" #include "util/macros.h" @@ -46,13 +45,36 @@ #include "pan_blending.h" #include "pan_blend_shaders.h" #include "pan_util.h" - -static int performance_counter_number = 0; -extern const char *pan_counters_base; +#include "pan_tiler.h" /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */ //#define DRY_RUN +static enum mali_job_type +panfrost_job_type_for_pipe(enum pipe_shader_type type) +{ + switch (type) { + case PIPE_SHADER_VERTEX: + return JOB_TYPE_VERTEX; + + case PIPE_SHADER_FRAGMENT: + /* Note: JOB_TYPE_FRAGMENT is different. + * JOB_TYPE_FRAGMENT actually executes the + * fragment shader, but JOB_TYPE_TILER is how you + * specify it*/ + return JOB_TYPE_TILER; + + case PIPE_SHADER_GEOMETRY: + return JOB_TYPE_GEOMETRY; + + case PIPE_SHADER_COMPUTE: + return JOB_TYPE_COMPUTE; + + default: + unreachable("Unsupported shader stage"); + } +} + static void panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource *rsrc) { @@ -64,7 +86,7 @@ panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource /* 8 byte checksum per tile */ rsrc->bo->checksum_stride = tile_w * 8; int pages = (((rsrc->bo->checksum_stride * tile_h) + 4095) / 4096); - screen->driver->allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0); + panfrost_drm_allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0); rsrc->bo->has_checksum = true; } @@ -82,20 +104,21 @@ panfrost_set_framebuffer_resolution(struct mali_single_framebuffer *fb, int w, i * The formula itself was discovered mostly by manual bruteforce and * aggressive algebraic simplification. */ - fb->resolution_check = ((w + h) / 3) << 4; + fb->tiler_resolution_check = ((w + h) / 3) << 4; } struct mali_single_framebuffer -panfrost_emit_sfbd(struct panfrost_context *ctx) +panfrost_emit_sfbd(struct panfrost_context *ctx, unsigned vertex_count) { struct mali_single_framebuffer framebuffer = { .unknown2 = 0x1f, .format = 0x30000000, .clear_flags = 0x1000, .unknown_address_0 = ctx->scratchpad.gpu, - .unknown_address_1 = ctx->misc_0.gpu, - .unknown_address_2 = ctx->misc_0.gpu + 40960, - .tiler_flags = 0xf0, + .tiler_polygon_list = ctx->tiler_polygon_list.gpu, + .tiler_polygon_list_body = ctx->tiler_polygon_list.gpu + 40960, + .tiler_hierarchy_mask = 0xF0, + .tiler_flags = 0x0, .tiler_heap_free = ctx->tiler_heap.gpu, .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size, }; @@ -106,36 +129,16 @@ panfrost_emit_sfbd(struct panfrost_context *ctx) } struct bifrost_framebuffer -panfrost_emit_mfbd(struct panfrost_context *ctx) +panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count) { + unsigned width = ctx->pipe_framebuffer.width; + unsigned height = ctx->pipe_framebuffer.height; + struct bifrost_framebuffer framebuffer = { - /* It is not yet clear what tiler_meta means or how it's - * calculated, but we can tell the lower 32-bits are a - * (monotonically increasing?) function of tile count and - * geometry complexity; I suspect it defines a memory size of - * some kind? for the tiler. It's really unclear at the - * moment... but to add to the confusion, the hardware is happy - * enough to accept a zero in this field, so we don't even have - * to worry about it right now. - * - * The byte (just after the 32-bit mark) is much more - * interesting. The higher nibble I've only ever seen as 0xF, - * but the lower one I've seen as 0x0 or 0xF, and it's not - * obvious what the difference is. But what -is- obvious is - * that when the lower nibble is zero, performance is severely - * degraded compared to when the lower nibble is set. - * Evidently, that nibble enables some sort of fast path, - * perhaps relating to caching or tile flush? Regardless, at - * this point there's no clear reason not to set it, aside from - * substantially increased memory requirements (of the misc_0 - * buffer) */ - - .tiler_meta = ((uint64_t) 0xff << 32) | 0x0, - - .width1 = MALI_POSITIVE(ctx->pipe_framebuffer.width), - .height1 = MALI_POSITIVE(ctx->pipe_framebuffer.height), - .width2 = MALI_POSITIVE(ctx->pipe_framebuffer.width), - .height2 = MALI_POSITIVE(ctx->pipe_framebuffer.height), + .width1 = MALI_POSITIVE(width), + .height1 = MALI_POSITIVE(height), + .width2 = MALI_POSITIVE(width), + .height2 = MALI_POSITIVE(height), .unk1 = 0x1080, @@ -145,27 +148,53 @@ panfrost_emit_mfbd(struct panfrost_context *ctx) .unknown2 = 0x1f, - /* Corresponds to unknown_address_X of SFBD */ .scratchpad = ctx->scratchpad.gpu, - .tiler_scratch_start = ctx->misc_0.gpu, - - /* The constant added here is, like the lower word of - * tiler_meta, (loosely) another product of framebuffer size - * and geometry complexity. It must be sufficiently large for - * the tiler_meta fast path to work; if it's too small, there - * will be DATA_INVALID_FAULTs. Conversely, it must be less - * than the total size of misc_0, or else there's no room. It's - * possible this constant configures a partition between two - * parts of misc_0? We haven't investigated the functionality, - * as these buffers are internally used by the hardware - * (presumably by the tiler) but not seemingly touched by the driver - */ + }; + + framebuffer.tiler_hierarchy_mask = + panfrost_choose_hierarchy_mask(width, height, vertex_count); + + /* Compute the polygon header size and use that to offset the body */ + + unsigned header_size = panfrost_tiler_header_size( + width, height, framebuffer.tiler_hierarchy_mask); + + unsigned body_size = panfrost_tiler_body_size( + width, height, framebuffer.tiler_hierarchy_mask); + + /* Sanity check */ + + unsigned total_size = header_size + body_size; + + if (framebuffer.tiler_hierarchy_mask) { + assert(ctx->tiler_polygon_list.size >= total_size); + + /* Specify allocated tiler structures */ + framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.gpu; + + /* Allow the entire tiler heap */ + framebuffer.tiler_heap_start = ctx->tiler_heap.gpu; + framebuffer.tiler_heap_end = + ctx->tiler_heap.gpu + ctx->tiler_heap.size; + } else { + /* The tiler is disabled, so don't allow the tiler heap */ + framebuffer.tiler_heap_start = ctx->tiler_heap.gpu; + framebuffer.tiler_heap_end = framebuffer.tiler_heap_start; + + /* Use a dummy polygon list */ + framebuffer.tiler_polygon_list = ctx->tiler_dummy.gpu; + + /* Also, set a "tiler disabled?" flag? */ + framebuffer.tiler_hierarchy_mask |= 0x1000; + } + + framebuffer.tiler_polygon_list_body = + framebuffer.tiler_polygon_list + header_size; + + framebuffer.tiler_polygon_list_size = + header_size + body_size; - .tiler_scratch_middle = ctx->misc_0.gpu + 0xf0000, - .tiler_heap_start = ctx->tiler_heap.gpu, - .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size, - }; return framebuffer; } @@ -188,40 +217,6 @@ panfrost_is_scanout(struct panfrost_context *ctx) ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SHARED; } -static uint32_t -pan_pack_color(const union pipe_color_union *color, enum pipe_format format) -{ - /* Alpha magicked to 1.0 if there is no alpha */ - - bool has_alpha = util_format_has_alpha(format); - float clear_alpha = has_alpha ? color->f[3] : 1.0f; - - /* Packed color depends on the framebuffer format */ - - const struct util_format_description *desc = - util_format_description(format); - - if (util_format_is_rgba8_variant(desc)) { - return (float_to_ubyte(clear_alpha) << 24) | - (float_to_ubyte(color->f[2]) << 16) | - (float_to_ubyte(color->f[1]) << 8) | - (float_to_ubyte(color->f[0]) << 0); - } else if (format == PIPE_FORMAT_B5G6R5_UNORM) { - /* First, we convert the components to R5, G6, B5 separately */ - unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0; - unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0; - unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0; - - /* Then we pack into a sparse u32. TODO: Why these shifts? */ - return (b5 << 25) | (g6 << 14) | (r5 << 5); - } else { - /* Unknown format */ - assert(0); - } - - return 0; -} - static void panfrost_clear( struct pipe_context *pipe, @@ -232,20 +227,7 @@ panfrost_clear( struct panfrost_context *ctx = pan_context(pipe); struct panfrost_job *job = panfrost_get_job_for_fbo(ctx); - if (buffers & PIPE_CLEAR_COLOR) { - enum pipe_format format = ctx->pipe_framebuffer.cbufs[0]->format; - job->clear_color = pan_pack_color(color, format); - } - - if (buffers & PIPE_CLEAR_DEPTH) { - job->clear_depth = depth; - } - - if (buffers & PIPE_CLEAR_STENCIL) { - job->clear_stencil = stencil; - } - - job->clear |= buffers; + panfrost_job_clear(ctx, job, buffers, color, depth, stencil); } static mali_ptr @@ -285,9 +267,9 @@ panfrost_invalidate_frame(struct panfrost_context *ctx) ctx->cmdstream_i = 0; if (ctx->require_sfbd) - ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx); + ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0); else - ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx); + ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0); /* Reset varyings allocated */ ctx->varying_height = 0; @@ -306,11 +288,6 @@ panfrost_invalidate_frame(struct panfrost_context *ctx) /* XXX */ ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES; - - /* Reset job counters */ - ctx->draw_count = 0; - ctx->vertex_job_count = 0; - ctx->tiler_job_count = 0; } /* In practice, every field of these payloads should be configurable @@ -537,15 +514,6 @@ panfrost_default_shader_backend(struct panfrost_context *ctx) memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader)); } -static void -panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next) -{ - if (first->job_descriptor_size) - first->next_job_64 = (u64) (uintptr_t) next; - else - first->next_job_32 = (u32) (uintptr_t) next; -} - /* Generates a vertex/tiler job. This is, in some sense, the heart of the * graphics command stream. It should be called once per draw, accordding to * presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in @@ -555,12 +523,8 @@ panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next) struct panfrost_transfer panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler) { - /* Each draw call corresponds to two jobs, and the set-value job is first */ - int draw_job_index = 1 + (2 * ctx->draw_count) + 1; - struct mali_job_descriptor_header job = { .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX, - .job_index = draw_job_index + (is_tiler ? 1 : 0), #ifdef __LP64__ .job_descriptor_size = 1, #endif @@ -577,65 +541,11 @@ panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler) #endif struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload)); - if (is_tiler) { - /* Tiler jobs depend on vertex jobs */ - - job.job_dependency_index_1 = draw_job_index; - - /* Tiler jobs also depend on the previous tiler job */ - - if (ctx->draw_count) { - job.job_dependency_index_2 = draw_job_index - 1; - /* Previous tiler job points to this tiler job */ - panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->draw_count - 1], transfer.gpu); - } else { - /* The only vertex job so far points to first tiler job */ - panfrost_link_job_pair(ctx->u_vertex_jobs[0], transfer.gpu); - } - } else { - if (ctx->draw_count) { - /* Previous vertex job points to this vertex job */ - panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->draw_count - 1], transfer.gpu); - - /* Last vertex job points to first tiler job */ - panfrost_link_job_pair(&job, ctx->tiler_jobs[0]); - } else { - /* Have the first vertex job depend on the set value job */ - job.job_dependency_index_1 = ctx->u_set_value_job->job_index; - panfrost_link_job_pair(ctx->u_set_value_job, transfer.gpu); - } - } - memcpy(transfer.cpu, &job, sizeof(job)); memcpy(transfer.cpu + sizeof(job) - offset, payload, sizeof(*payload)); return transfer; } -/* Generates a set value job. It's unclear what exactly this does, why it's - * necessary, and when to call it. */ - -static void -panfrost_set_value_job(struct panfrost_context *ctx) -{ - struct mali_job_descriptor_header job = { - .job_type = JOB_TYPE_SET_VALUE, - .job_descriptor_size = 1, - .job_index = 1, - }; - - struct mali_payload_set_value payload = { - .out = ctx->misc_0.gpu, - .unknown = 0x3, - }; - - struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(payload)); - memcpy(transfer.cpu, &job, sizeof(job)); - memcpy(transfer.cpu + sizeof(job), &payload, sizeof(payload)); - - ctx->u_set_value_job = (struct mali_job_descriptor_header *) transfer.cpu; - ctx->set_value_job = transfer.gpu; -} - static mali_ptr panfrost_emit_varyings( struct panfrost_context *ctx, @@ -672,6 +582,7 @@ panfrost_emit_varying_descriptor( struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant]; struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant]; + unsigned int num_gen_varyings = 0; /* Allocate the varying descriptor */ @@ -681,6 +592,65 @@ panfrost_emit_varying_descriptor( struct panfrost_transfer trans = panfrost_allocate_transient(ctx, vs_size + fs_size); + /* + * Assign ->src_offset now that we know about all the general purpose + * varyings that will be used by the fragment and vertex shaders. + */ + for (unsigned i = 0; i < vs->tripipe->varying_count; i++) { + /* + * General purpose varyings have ->index set to 0, skip other + * entries. + */ + if (vs->varyings[i].index) + continue; + + vs->varyings[i].src_offset = 16 * (num_gen_varyings++); + } + + for (unsigned i = 0; i < fs->tripipe->varying_count; i++) { + unsigned j; + + /* If we have a point sprite replacement, handle that here. We + * have to translate location first. TODO: Flip y in shader. + * We're already keying ... just time crunch .. */ + + unsigned loc = fs->varyings_loc[i]; + unsigned pnt_loc = + (loc >= VARYING_SLOT_VAR0) ? (loc - VARYING_SLOT_VAR0) : + (loc == VARYING_SLOT_PNTC) ? 8 : + ~0; + + if (~pnt_loc && fs->point_sprite_mask & (1 << pnt_loc)) { + /* gl_PointCoord index by convention */ + fs->varyings[i].index = 3; + fs->reads_point_coord = true; + + /* Swizzle out the z/w to 0/1 */ + fs->varyings[i].format = MALI_RG16F; + fs->varyings[i].swizzle = + panfrost_get_default_swizzle(2); + + continue; + } + + if (fs->varyings[i].index) + continue; + + /* + * Re-use the VS general purpose varying pos if it exists, + * create a new one otherwise. + */ + for (j = 0; j < vs->tripipe->varying_count; j++) { + if (fs->varyings_loc[i] == vs->varyings_loc[j]) + break; + } + + if (j < vs->tripipe->varying_count) + fs->varyings[i].src_offset = vs->varyings[j].src_offset; + else + fs->varyings[i].src_offset = 16 * (num_gen_varyings++); + } + memcpy(trans.cpu, vs->varyings, vs_size); memcpy(trans.cpu + vs_size, fs->varyings, fs_size); @@ -691,11 +661,8 @@ panfrost_emit_varying_descriptor( union mali_attr varyings[PIPE_MAX_ATTRIBS]; unsigned idx = 0; - /* General varyings -- use the VS's, since those are more likely to be - * accurate on desktop */ - - panfrost_emit_varyings(ctx, &varyings[idx++], - vs->general_varying_stride, invocation_count); + panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16, + invocation_count); /* fp32 vec4 gl_Position */ ctx->payload_tiler.postfix.position_varying = @@ -833,23 +800,27 @@ panfrost_upload_sampler_descriptors(struct panfrost_context *ctx) size_t desc_size = sizeof(struct mali_sampler_descriptor); for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) { - if (!ctx->sampler_count[t]) continue; + mali_ptr upload = 0; - size_t transfer_size = desc_size * ctx->sampler_count[t]; + if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) { + size_t transfer_size = desc_size * ctx->sampler_count[t]; - struct panfrost_transfer transfer = - panfrost_allocate_transient(ctx, transfer_size); + struct panfrost_transfer transfer = + panfrost_allocate_transient(ctx, transfer_size); - struct mali_sampler_descriptor *desc = - (struct mali_sampler_descriptor *) transfer.cpu; + struct mali_sampler_descriptor *desc = + (struct mali_sampler_descriptor *) transfer.cpu; - for (int i = 0; i < ctx->sampler_count[t]; ++i) - desc[i] = ctx->samplers[t][i]->hw; + for (int i = 0; i < ctx->sampler_count[t]; ++i) + desc[i] = ctx->samplers[t][i]->hw; + + upload = transfer.gpu; + } if (t == PIPE_SHADER_FRAGMENT) - ctx->payload_tiler.postfix.sampler_descriptor = transfer.gpu; + ctx->payload_tiler.postfix.sampler_descriptor = upload; else if (t == PIPE_SHADER_VERTEX) - ctx->payload_vertex.postfix.sampler_descriptor = transfer.gpu; + ctx->payload_vertex.postfix.sampler_descriptor = upload; else assert(0); } @@ -877,21 +848,27 @@ panfrost_upload_tex( if (!view) return (mali_ptr) NULL; - struct pipe_resource *tex_rsrc = view->base.texture; - struct panfrost_resource *rsrc = (struct panfrost_resource *) tex_rsrc; + struct pipe_sampler_view *pview = &view->base; + struct panfrost_resource *rsrc = pan_resource(pview->texture); /* Do we interleave an explicit stride with every element? */ bool has_manual_stride = view->hw.format.usage2 & MALI_TEX_MANUAL_STRIDE; + /* For easy access */ + + assert(pview->target != PIPE_BUFFER); + unsigned first_level = pview->u.tex.first_level; + unsigned last_level = pview->u.tex.last_level; + /* Inject the addresses in, interleaving mip levels, cube faces, and * strides in that order */ unsigned idx = 0; - for (unsigned l = 0; l <= tex_rsrc->last_level; ++l) { - for (unsigned f = 0; f < tex_rsrc->array_size; ++f) { + for (unsigned l = first_level; l <= last_level; ++l) { + for (unsigned f = 0; f < pview->texture->array_size; ++f) { view->hw.payload[idx++] = panfrost_get_texture_address(rsrc, l, f); @@ -910,16 +887,17 @@ static void panfrost_upload_texture_descriptors(struct panfrost_context *ctx) { for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) { - /* Shortcircuit */ - if (!ctx->sampler_view_count[t]) continue; + mali_ptr trampoline = 0; - uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS]; + if (ctx->sampler_view_count[t]) { + uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS]; - for (int i = 0; i < ctx->sampler_view_count[t]; ++i) - trampolines[i] = - panfrost_upload_tex(ctx, ctx->sampler_views[t][i]); + for (int i = 0; i < ctx->sampler_view_count[t]; ++i) + trampolines[i] = + panfrost_upload_tex(ctx, ctx->sampler_views[t][i]); - mali_ptr trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]); + trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]); + } if (t == PIPE_SHADER_FRAGMENT) ctx->payload_tiler.postfix.texture_trampoline = trampoline; @@ -930,6 +908,148 @@ panfrost_upload_texture_descriptors(struct panfrost_context *ctx) } } +struct sysval_uniform { + union { + float f[4]; + int32_t i[4]; + uint32_t u[4]; + }; +}; + +static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx, + struct sysval_uniform *uniform) +{ + const struct pipe_viewport_state *vp = &ctx->pipe_viewport; + + uniform->f[0] = vp->scale[0]; + uniform->f[1] = vp->scale[1]; + uniform->f[2] = vp->scale[2]; +} + +static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx, + struct sysval_uniform *uniform) +{ + const struct pipe_viewport_state *vp = &ctx->pipe_viewport; + + uniform->f[0] = vp->translate[0]; + uniform->f[1] = vp->translate[1]; + uniform->f[2] = vp->translate[2]; +} + +static void panfrost_upload_txs_sysval(struct panfrost_context *ctx, + enum pipe_shader_type st, + unsigned int sysvalid, + struct sysval_uniform *uniform) +{ + unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid); + unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid); + bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid); + struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base; + + assert(dim); + uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level); + + if (dim > 1) + uniform->i[1] = u_minify(tex->texture->height0, + tex->u.tex.first_level); + + if (dim > 2) + uniform->i[2] = u_minify(tex->texture->depth0, + tex->u.tex.first_level); + + if (is_array) + uniform->i[dim] = tex->texture->array_size; +} + +static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf, + struct panfrost_shader_state *ss, + enum pipe_shader_type st) +{ + struct sysval_uniform *uniforms = (void *)buf; + + for (unsigned i = 0; i < ss->sysval_count; ++i) { + int sysval = ss->sysval[i]; + + switch (PAN_SYSVAL_TYPE(sysval)) { + case PAN_SYSVAL_VIEWPORT_SCALE: + panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]); + break; + case PAN_SYSVAL_VIEWPORT_OFFSET: + panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]); + break; + case PAN_SYSVAL_TEXTURE_SIZE: + panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval), + &uniforms[i]); + break; + default: + assert(0); + } + } +} + +static const void * +panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index) +{ + struct pipe_constant_buffer *cb = &buf->cb[index]; + struct panfrost_resource *rsrc = pan_resource(cb->buffer); + + if (rsrc) + return rsrc->bo->cpu; + else if (cb->user_buffer) + return cb->user_buffer; + else + unreachable("No constant buffer"); +} + +static mali_ptr +panfrost_map_constant_buffer_gpu( + struct panfrost_context *ctx, + struct panfrost_constant_buffer *buf, + unsigned index) +{ + struct pipe_constant_buffer *cb = &buf->cb[index]; + struct panfrost_resource *rsrc = pan_resource(cb->buffer); + + if (rsrc) + return rsrc->bo->gpu; + else if (cb->user_buffer) + return panfrost_upload_transient(ctx, cb->user_buffer, cb->buffer_size); + else + unreachable("No constant buffer"); +} + +/* Compute number of UBOs active (more specifically, compute the highest UBO + * number addressable -- if there are gaps, include them in the count anyway). + * We always include UBO #0 in the count, since we *need* uniforms enabled for + * sysvals. */ + +static unsigned +panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage) +{ + unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1; + return 32 - __builtin_clz(mask); +} + +/* Fixes up a shader state with current state, returning a GPU address to the + * patched shader */ + +static mali_ptr +panfrost_patch_shader_state( + struct panfrost_context *ctx, + struct panfrost_shader_state *ss, + enum pipe_shader_type stage) +{ + ss->tripipe->texture_count = ctx->sampler_view_count[stage]; + ss->tripipe->sampler_count = ctx->sampler_count[stage]; + + ss->tripipe->midgard1.flags = 0x220; + + unsigned ubo_count = panfrost_ubo_count(ctx, stage); + ss->tripipe->midgard1.uniform_buffer_count = ubo_count; + + return ss->tripipe_gpu; +} + /* Go through dirty flags and actualise them in the cmdstream. */ void @@ -951,13 +1071,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa); } - /* Enable job requirements at draw-time */ - - if (msaa) - job->requirements |= PAN_REQ_MSAA; - - if (ctx->depth_stencil->depth.writemask) - job->requirements |= PAN_REQ_DEPTH_WRITE; + panfrost_job_set_requirements(ctx, job); if (ctx->occlusion_query) { ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE; @@ -969,15 +1083,8 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant]; - /* Late shader descriptor assignments */ - - vs->tripipe->texture_count = ctx->sampler_view_count[PIPE_SHADER_VERTEX]; - vs->tripipe->sampler_count = ctx->sampler_count[PIPE_SHADER_VERTEX]; - - /* Who knows */ - vs->tripipe->midgard1.unknown1 = 0x2201; - - ctx->payload_vertex.postfix._shader_upper = vs->tripipe_gpu >> 4; + ctx->payload_vertex.postfix._shader_upper = + panfrost_patch_shader_state(ctx, vs, PIPE_SHADER_VERTEX) >> 4; } if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) { @@ -999,13 +1106,20 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) assert(ctx->fs); struct panfrost_shader_state *variant = &ctx->fs->variants[ctx->fs->active_variant]; + panfrost_patch_shader_state(ctx, variant, PIPE_SHADER_FRAGMENT); + #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name COPY(shader); COPY(attribute_count); COPY(varying_count); + COPY(texture_count); + COPY(sampler_count); + COPY(sampler_count); COPY(midgard1.uniform_count); + COPY(midgard1.uniform_buffer_count); COPY(midgard1.work_count); + COPY(midgard1.flags); COPY(midgard1.unknown2); #undef COPY @@ -1015,12 +1129,21 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16; /* Set late due to depending on render state */ - /* The one at the end seems to mean "1 UBO" */ - ctx->fragment_shader_core.midgard1.unknown1 = MALI_NO_ALPHA_TO_COVERAGE | 0x200 | 0x2201; + unsigned flags = ctx->fragment_shader_core.midgard1.flags; + + /* Depending on whether it's legal to in the given shader, we + * try to enable early-z testing (or forward-pixel kill?) */ - /* Assign texture/sample count right before upload */ - ctx->fragment_shader_core.texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT]; - ctx->fragment_shader_core.sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT]; + if (!variant->can_discard) + flags |= MALI_EARLY_Z; + + /* Any time texturing is used, derivatives are implicitly + * calculated, so we need to enable helper invocations */ + + if (ctx->sampler_view_count[PIPE_SHADER_FRAGMENT]) + flags |= MALI_HELPER_INVOCATIONS; + + ctx->fragment_shader_core.midgard1.flags = flags; /* Assign the stencil refs late */ ctx->fragment_shader_core.stencil_front.ref = ctx->stencil_ref.ref_value[0]; @@ -1035,9 +1158,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) if (variant->can_discard) { ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD; - ctx->fragment_shader_core.midgard1.unknown1 &= ~MALI_NO_ALPHA_TO_COVERAGE; - ctx->fragment_shader_core.midgard1.unknown1 |= 0x4000; - ctx->fragment_shader_core.midgard1.unknown1 = 0x4200; + ctx->fragment_shader_core.midgard1.flags |= 0x400; } /* Check if we're using the default blend descriptor (fast path) */ @@ -1055,6 +1176,8 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) if (ctx->blend->has_blend_shader) { ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader; + } else { + ctx->fragment_shader_core.blend.shader = 0; } if (ctx->require_sfbd) { @@ -1105,8 +1228,23 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) /* TODO: MRT */ for (unsigned i = 0; i < 1; ++i) { + bool is_srgb = + (ctx->pipe_framebuffer.nr_cbufs > i) && + util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format); + rts[i].flags = blend_count; + if (is_srgb) + rts[i].flags |= MALI_BLEND_SRGB; + + /* TODO: sRGB in blend shaders is currently + * unimplemented. Contact me (Alyssa) if you're + * interested in working on this. We have + * native Midgard ops for helping here, but + * they're not well-understood yet. */ + + assert(!(is_srgb && ctx->blend->has_blend_shader)); + if (ctx->blend->has_blend_shader) { rts[i].blend.shader = ctx->blend->blend_shader; } else { @@ -1137,31 +1275,23 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant]; struct panfrost_shader_state *ss = (i == PIPE_SHADER_FRAGMENT) ? fs : vs; + /* Uniforms are implicitly UBO #0 */ + bool has_uniforms = buf->enabled_mask & (1 << 0); + /* Allocate room for the sysval and the uniforms */ size_t sys_size = sizeof(float) * 4 * ss->sysval_count; - size_t size = sys_size + buf->size; + size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0; + size_t size = sys_size + uniform_size; struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size); /* Upload sysvals requested by the shader */ - float *uniforms = (float *) transfer.cpu; - for (unsigned i = 0; i < ss->sysval_count; ++i) { - int sysval = ss->sysval[i]; - - if (sysval == PAN_SYSVAL_VIEWPORT_SCALE) { - uniforms[4*i + 0] = vp->scale[0]; - uniforms[4*i + 1] = vp->scale[1]; - uniforms[4*i + 2] = vp->scale[2]; - } else if (sysval == PAN_SYSVAL_VIEWPORT_OFFSET) { - uniforms[4*i + 0] = vp->translate[0]; - uniforms[4*i + 1] = vp->translate[1]; - uniforms[4*i + 2] = vp->translate[2]; - } else { - assert(0); - } - } + panfrost_upload_sysvals(ctx, transfer.cpu, ss, i); /* Upload uniforms */ - memcpy(transfer.cpu + sys_size, buf->buffer, buf->size); + if (has_uniforms) { + const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0); + memcpy(transfer.cpu + sys_size, cpu, uniform_size); + } int uniform_count = 0; @@ -1182,20 +1312,50 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) unreachable("Invalid shader stage\n"); } - /* Also attach the same buffer as a UBO for extended access */ + /* Next up, attach UBOs. UBO #0 is the uniforms we just + * uploaded */ + + unsigned ubo_count = panfrost_ubo_count(ctx, i); + assert(ubo_count >= 1); + + size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count; + struct mali_uniform_buffer_meta *ubos = calloc(sz, 1); - struct mali_uniform_buffer_meta uniform_buffers[] = { - { - .size = MALI_POSITIVE((2 + uniform_count)), - .ptr = transfer.gpu >> 2, - }, - }; + /* Upload uniforms as a UBO */ + ubos[0].size = MALI_POSITIVE((2 + uniform_count)); + ubos[0].ptr = transfer.gpu >> 2; + + /* The rest are honest-to-goodness UBOs */ + + for (unsigned ubo = 1; ubo < ubo_count; ++ubo) { + size_t sz = buf->cb[ubo].buffer_size; + + bool enabled = buf->enabled_mask & (1 << ubo); + bool empty = sz == 0; + + if (!enabled || empty) { + /* Stub out disabled UBOs to catch accesses */ + + ubos[ubo].size = 0; + ubos[ubo].ptr = 0xDEAD0000; + continue; + } + + mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo); + + unsigned bytes_per_field = 16; + unsigned aligned = ALIGN(sz, bytes_per_field); + unsigned fields = aligned / bytes_per_field; + + ubos[ubo].size = MALI_POSITIVE(fields); + ubos[ubo].ptr = gpu >> 2; + } - mali_ptr ubufs = panfrost_upload_transient(ctx, uniform_buffers, sizeof(uniform_buffers)); + mali_ptr ubufs = panfrost_upload_transient(ctx, ubos, sz); postfix->uniforms = transfer.gpu; postfix->uniform_buffers = ubufs; - buf->dirty = 0; + buf->dirty_mask = 0; } /* TODO: Upload the viewport somewhere more appropriate */ @@ -1220,28 +1380,64 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) }; /* Always scissor to the viewport by default. */ - view.viewport0[0] = (int) (vp->translate[0] - vp->scale[0]); - view.viewport1[0] = MALI_POSITIVE((int) (vp->translate[0] + vp->scale[0])); + int minx = (int) (vp->translate[0] - vp->scale[0]); + int maxx = (int) (vp->translate[0] + vp->scale[0]); int miny = (int) (vp->translate[1] - vp->scale[1]); int maxy = (int) (vp->translate[1] + vp->scale[1]); - if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) { - view.viewport0[0] = ss->minx; - view.viewport1[0] = MALI_POSITIVE(ss->maxx); + /* Apply the scissor test */ + if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) { + minx = ss->minx; + maxx = ss->maxx; miny = ss->miny; maxy = ss->maxy; } /* Hardware needs the min/max to be strictly ordered, so flip if we - * need to */ + * need to. The viewport transformation in the vertex shader will + * handle the negatives if we don't */ + if (miny > maxy) { int temp = miny; miny = maxy; maxy = temp; } + if (minx > maxx) { + int temp = minx; + minx = maxx; + maxx = temp; + } + + /* Clamp everything positive, just in case */ + + maxx = MAX2(0, maxx); + maxy = MAX2(0, maxy); + minx = MAX2(0, minx); + miny = MAX2(0, miny); + + /* Clamp to the framebuffer size as a last check */ + + minx = MIN2(ctx->pipe_framebuffer.width, minx); + maxx = MIN2(ctx->pipe_framebuffer.width, maxx); + + miny = MIN2(ctx->pipe_framebuffer.height, miny); + maxy = MIN2(ctx->pipe_framebuffer.height, maxy); + + /* Update the job, unless we're doing wallpapering (whose lack of + * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll + * just... be faster :) */ + + if (!ctx->wallpaper_batch) + panfrost_job_union_scissor(job, minx, miny, maxx, maxy); + + /* Upload */ + + view.viewport0[0] = minx; + view.viewport1[0] = MALI_POSITIVE(maxx); + view.viewport0[1] = miny; view.viewport1[1] = MALI_POSITIVE(maxy); @@ -1258,28 +1454,28 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data) static void panfrost_queue_draw(struct panfrost_context *ctx) { - /* TODO: Expand the array? */ - if (ctx->draw_count >= MAX_DRAW_CALLS) { - DBG("Job buffer overflow, ignoring draw\n"); - assert(0); - } - /* Handle dirty flags now */ panfrost_emit_for_draw(ctx, true); - /* We need a set_value job before any other draw jobs */ - if (ctx->draw_count == 0) - panfrost_set_value_job(ctx); + /* If rasterizer discard is enable, only submit the vertex */ + + bool rasterizer_discard = ctx->rasterizer + && ctx->rasterizer->base.rasterizer_discard; struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false); - ctx->u_vertex_jobs[ctx->vertex_job_count] = (struct mali_job_descriptor_header *) vertex.cpu; - ctx->vertex_jobs[ctx->vertex_job_count++] = vertex.gpu; + struct panfrost_transfer tiler; + + if (!rasterizer_discard) + tiler = panfrost_vertex_tiler_job(ctx, true); - struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true); - ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu; - ctx->tiler_jobs[ctx->tiler_job_count++] = tiler.gpu; + struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx); - ctx->draw_count++; + if (rasterizer_discard) + panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE); + else if (ctx->wallpaper_batch) + panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler); + else + panfrost_scoreboard_queue_fused_job(batch, vertex, tiler); } /* The entire frame is in memory -- send it off to the kernel! */ @@ -1292,40 +1488,21 @@ panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate, struct pipe_context *gallium = (struct pipe_context *) ctx; struct panfrost_screen *screen = pan_screen(gallium->screen); - /* Edge case if screen is cleared and nothing else */ - bool has_draws = ctx->draw_count > 0; - - /* Workaround a bizarre lockup (a hardware errata?) */ - if (!has_draws) - flush_immediate = true; - #ifndef DRY_RUN - bool is_scanout = panfrost_is_scanout(ctx); - screen->driver->submit_vs_fs_job(ctx, has_draws, is_scanout); + panfrost_job_submit(ctx, job); /* If visual, we can stall a frame */ if (!flush_immediate) - screen->driver->force_flush_fragment(ctx, fence); + panfrost_drm_force_flush_fragment(ctx, fence); screen->last_fragment_flushed = false; screen->last_job = job; /* If readback, flush now (hurts the pipelined performance) */ if (flush_immediate) - screen->driver->force_flush_fragment(ctx, fence); - - if (screen->driver->dump_counters && pan_counters_base) { - screen->driver->dump_counters(screen); - - char filename[128]; - snprintf(filename, sizeof(filename), "%s/frame%d.mdgprf", pan_counters_base, ++performance_counter_number); - FILE *fp = fopen(filename, "wb"); - fwrite(screen->perf_counters.cpu, 4096, sizeof(uint32_t), fp); - fclose(fp); - } - + panfrost_drm_force_flush_fragment(ctx, fence); #endif } @@ -1338,35 +1515,21 @@ panfrost_draw_wallpaper(struct pipe_context *pipe) if (ctx->pipe_framebuffer.cbufs[0] == NULL) return; - /* Blit the wallpaper in */ - panfrost_blit_wallpaper(ctx); - - /* We are flushing all queued draws and we know that no more jobs will - * be added until the next frame. - * We also know that the last jobs are the wallpaper jobs, and they - * need to be linked so they execute right after the set_value job. - */ - - /* set_value job to wallpaper vertex job */ - panfrost_link_job_pair(ctx->u_set_value_job, ctx->vertex_jobs[ctx->vertex_job_count - 1]); - ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_dependency_index_1 = ctx->u_set_value_job->job_index; + /* Check if the buffer has any content on it worth preserving */ - /* wallpaper vertex job to first vertex job */ - panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 1], ctx->vertex_jobs[0]); - ctx->u_vertex_jobs[0]->job_dependency_index_1 = ctx->u_set_value_job->job_index; + struct pipe_surface *surf = ctx->pipe_framebuffer.cbufs[0]; + struct panfrost_resource *rsrc = pan_resource(surf->texture); + unsigned level = surf->u.tex.level; - /* last vertex job to wallpaper tiler job */ - panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 2], ctx->tiler_jobs[ctx->tiler_job_count - 1]); - ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_1 = ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_index; - ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_2 = 0; + if (!rsrc->bo->slices[level].initialized) + return; - /* wallpaper tiler job to first tiler job */ - panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 1], ctx->tiler_jobs[0]); - ctx->u_tiler_jobs[0]->job_dependency_index_1 = ctx->u_vertex_jobs[0]->job_index; - ctx->u_tiler_jobs[0]->job_dependency_index_2 = ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_index; + /* Save the batch */ + struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx); - /* last tiler job to NULL */ - panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 2], 0); + ctx->wallpaper_batch = batch; + panfrost_blit_wallpaper(ctx); + ctx->wallpaper_batch = NULL; } void @@ -1379,13 +1542,15 @@ panfrost_flush( struct panfrost_job *job = panfrost_get_job_for_fbo(ctx); /* Nothing to do! */ - if (!ctx->draw_count && !job->clear) return; + if (!job->last_job.gpu && !job->clear) return; if (!job->clear) panfrost_draw_wallpaper(&ctx->base); - /* Whether to stall the pipeline for immediately correct results */ - bool flush_immediate = flags & PIPE_FLUSH_END_OF_FRAME; + /* Whether to stall the pipeline for immediately correct results. Since + * pipelined rendering is quite broken right now (to be fixed by the + * panfrost_job refactor, just take the perf hit for correctness) */ + bool flush_immediate = /*flags & PIPE_FLUSH_END_OF_FRAME*/true; /* Submit the frame itself */ panfrost_submit_frame(ctx, flush_immediate, fence, job); @@ -1456,6 +1621,19 @@ panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe } } +static bool +panfrost_scissor_culls_everything(struct panfrost_context *ctx) +{ + const struct pipe_scissor_state *ss = &ctx->scissor; + + /* Check if we're scissoring at all */ + + if (!(ss && ctx->rasterizer && ctx->rasterizer->base.scissor)) + return false; + + return (ss->minx == ss->maxx) && (ss->miny == ss->maxy); +} + static void panfrost_draw_vbo( struct pipe_context *pipe, @@ -1463,6 +1641,13 @@ panfrost_draw_vbo( { struct panfrost_context *ctx = pan_context(pipe); + /* First of all, check the scissor to see if anything is drawn at all. + * If it's not, we drop the draw (mostly a conformance issue; + * well-behaved apps shouldn't hit this) */ + + if (panfrost_scissor_culls_everything(ctx)) + return; + ctx->payload_vertex.draw_start = info->start; ctx->payload_tiler.draw_start = info->start; @@ -1602,6 +1787,14 @@ panfrost_bind_rasterizer_state( ctx->rasterizer = hwcso; ctx->dirty |= PAN_DIRTY_RASTERIZER; + + /* Point sprites are emulated */ + + struct panfrost_shader_state *variant = + ctx->fs ? &ctx->fs->variants[ctx->fs->active_variant] : NULL; + + if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask)) + ctx->base.bind_fs_state(&ctx->base, ctx->fs); } static void * @@ -1737,11 +1930,17 @@ panfrost_bind_sampler_states( } static bool -panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_state *variant) +panfrost_variant_matches( + struct panfrost_context *ctx, + struct panfrost_shader_state *variant, + enum pipe_shader_type type) { + struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base; struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha; - if (alpha->enabled || variant->alpha_state.enabled) { + bool is_fragment = (type == PIPE_SHADER_FRAGMENT); + + if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) { /* Make sure enable state is at least the same */ if (alpha->enabled != variant->alpha_state.enabled) { return false; @@ -1755,91 +1954,113 @@ panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_st return false; } } + + if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable | + variant->point_sprite_mask)) { + /* Ensure the same varyings are turned to point sprites */ + if (rasterizer->sprite_coord_enable != variant->point_sprite_mask) + return false; + + /* Ensure the orientation is correct */ + bool upper_left = + rasterizer->sprite_coord_mode == + PIPE_SPRITE_COORD_UPPER_LEFT; + + if (variant->point_sprite_upper_left != upper_left) + return false; + } + /* Otherwise, we're good to go */ return true; } static void -panfrost_bind_fs_state( +panfrost_bind_shader_state( struct pipe_context *pctx, - void *hwcso) + void *hwcso, + enum pipe_shader_type type) { struct panfrost_context *ctx = pan_context(pctx); - ctx->fs = hwcso; + if (type == PIPE_SHADER_FRAGMENT) { + ctx->fs = hwcso; + ctx->dirty |= PAN_DIRTY_FS; + } else { + assert(type == PIPE_SHADER_VERTEX); + ctx->vs = hwcso; + ctx->dirty |= PAN_DIRTY_VS; + } - if (hwcso) { - /* Match the appropriate variant */ + if (!hwcso) return; - signed variant = -1; + /* Match the appropriate variant */ - struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso; + signed variant = -1; + struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso; - for (unsigned i = 0; i < variants->variant_count; ++i) { - if (panfrost_variant_matches(ctx, &variants->variants[i])) { - variant = i; - break; - } + for (unsigned i = 0; i < variants->variant_count; ++i) { + if (panfrost_variant_matches(ctx, &variants->variants[i], type)) { + variant = i; + break; } + } - if (variant == -1) { - /* No variant matched, so create a new one */ - variant = variants->variant_count++; - assert(variants->variant_count < MAX_SHADER_VARIANTS); + if (variant == -1) { + /* No variant matched, so create a new one */ + variant = variants->variant_count++; + assert(variants->variant_count < MAX_SHADER_VARIANTS); - variants->variants[variant].base = hwcso; - variants->variants[variant].alpha_state = ctx->depth_stencil->alpha; + struct panfrost_shader_state *v = + &variants->variants[variant]; - /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */ - struct panfrost_context *ctx = pan_context(pctx); - struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR); + v->base = hwcso; - variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu; - variants->variants[variant].tripipe_gpu = transfer.gpu; + if (type == PIPE_SHADER_FRAGMENT) { + v->alpha_state = ctx->depth_stencil->alpha; + if (ctx->rasterizer) { + v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable; + v->point_sprite_upper_left = + ctx->rasterizer->base.sprite_coord_mode == + PIPE_SPRITE_COORD_UPPER_LEFT; + } } - /* Select this variant */ - variants->active_variant = variant; - - struct panfrost_shader_state *shader_state = &variants->variants[variant]; - assert(panfrost_variant_matches(ctx, shader_state)); + /* Allocate the mapped descriptor ahead-of-time. */ + struct panfrost_context *ctx = pan_context(pctx); + struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR); - /* Now we have a variant selected, so compile and go */ + variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu; + variants->variants[variant].tripipe_gpu = transfer.gpu; - if (!shader_state->compiled) { - panfrost_shader_compile(ctx, shader_state->tripipe, NULL, JOB_TYPE_TILER, shader_state); - shader_state->compiled = true; - } } - ctx->dirty |= PAN_DIRTY_FS; -} - -static void -panfrost_bind_vs_state( - struct pipe_context *pctx, - void *hwcso) -{ - struct panfrost_context *ctx = pan_context(pctx); + /* Select this variant */ + variants->active_variant = variant; - ctx->vs = hwcso; + struct panfrost_shader_state *shader_state = &variants->variants[variant]; + assert(panfrost_variant_matches(ctx, shader_state, type)); - if (hwcso) { - if (!ctx->vs->variants[0].compiled) { - ctx->vs->variants[0].base = hwcso; + /* We finally have a variant, so compile it */ - /* TODO DRY from above */ - struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR); - ctx->vs->variants[0].tripipe = (struct mali_shader_meta *) transfer.cpu; - ctx->vs->variants[0].tripipe_gpu = transfer.gpu; + if (!shader_state->compiled) { + panfrost_shader_compile(ctx, shader_state->tripipe, NULL, + panfrost_job_type_for_pipe(type), shader_state); - panfrost_shader_compile(ctx, ctx->vs->variants[0].tripipe, NULL, JOB_TYPE_VERTEX, &ctx->vs->variants[0]); - ctx->vs->variants[0].compiled = true; - } + shader_state->compiled = true; } +} - ctx->dirty |= PAN_DIRTY_VS; +static void +panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso) +{ + panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX); +} + +static void +panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso) +{ + panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT); } static void @@ -1863,43 +2084,18 @@ panfrost_set_constant_buffer( struct panfrost_context *ctx = pan_context(pctx); struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader]; - size_t sz = buf ? buf->buffer_size : 0; - - /* Free previous buffer */ - - pbuf->dirty = true; - pbuf->size = sz; - - if (pbuf->buffer) { - free(pbuf->buffer); - pbuf->buffer = NULL; - } - - /* If unbinding, we're done */ + util_copy_constant_buffer(&pbuf->cb[index], buf); - if (!buf) - return; - - /* Multiple constant buffers not yet supported */ - assert(index == 0); - - const uint8_t *cpu; + unsigned mask = (1 << index); - struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer); - - if (rsrc) { - cpu = rsrc->bo->cpu; - } else if (buf->user_buffer) { - cpu = buf->user_buffer; - } else { - DBG("No constant buffer?\n"); + if (unlikely(!buf)) { + pbuf->enabled_mask &= ~mask; + pbuf->dirty_mask &= ~mask; return; } - /* Copy the constant buffer into the driver context for later upload */ - - pbuf->buffer = malloc(sz); - memcpy(pbuf->buffer, cpu + buf->buffer_offset, sz); + pbuf->enabled_mask |= mask; + pbuf->dirty_mask |= mask; } static void @@ -1914,13 +2110,39 @@ panfrost_set_stencil_ref( ctx->dirty |= PAN_DIRTY_FS; } +static enum mali_texture_type +panfrost_translate_texture_type(enum pipe_texture_target t) +{ + switch (t) { + case PIPE_BUFFER: + case PIPE_TEXTURE_1D: + case PIPE_TEXTURE_1D_ARRAY: + return MALI_TEX_1D; + + case PIPE_TEXTURE_2D: + case PIPE_TEXTURE_2D_ARRAY: + case PIPE_TEXTURE_RECT: + return MALI_TEX_2D; + + case PIPE_TEXTURE_3D: + return MALI_TEX_3D; + + case PIPE_TEXTURE_CUBE: + case PIPE_TEXTURE_CUBE_ARRAY: + return MALI_TEX_CUBE; + + default: + unreachable("Unknown target"); + } +} + static struct pipe_sampler_view * panfrost_create_sampler_view( struct pipe_context *pctx, struct pipe_resource *texture, const struct pipe_sampler_view *template) { - struct panfrost_sampler_view *so = CALLOC_STRUCT(panfrost_sampler_view); + struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view); int bytes_per_pixel = util_format_get_blocksize(texture->format); pipe_reference(NULL, &texture->reference); @@ -1992,18 +2214,29 @@ panfrost_create_sampler_view( } } + /* In the hardware, array_size refers specifically to array textures, + * whereas in Gallium, it also covers cubemaps */ + + unsigned array_size = texture->array_size; + + if (texture->target == PIPE_TEXTURE_CUBE) { + /* TODO: Cubemap arrays */ + assert(array_size == 6); + } + struct mali_texture_descriptor texture_descriptor = { - .width = MALI_POSITIVE(texture->width0), - .height = MALI_POSITIVE(texture->height0), - .depth = MALI_POSITIVE(texture->depth0), + .width = MALI_POSITIVE(u_minify(texture->width0, first_level)), + .height = MALI_POSITIVE(u_minify(texture->height0, first_level)), + .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)), + .array_size = MALI_POSITIVE(array_size), /* TODO: Decode */ .format = { .swizzle = panfrost_translate_swizzle_4(desc->swizzle), .format = format, - .usage1 = 0x0, - .is_not_cubemap = texture->target != PIPE_TEXTURE_CUBE, + .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB, + .type = panfrost_translate_texture_type(texture->target), .usage2 = usage2_layout }, @@ -2011,14 +2244,7 @@ panfrost_create_sampler_view( .swizzle = panfrost_translate_swizzle_4(user_swizzle) }; - /* TODO: Other base levels require adjusting dimensions / level numbers / etc */ - assert (template->u.tex.first_level == 0); - - /* Disable mipmapping for now to avoid regressions while automipmapping - * is being implemented. TODO: Remove me once automipmaps work */ - - //texture_descriptor.nr_mipmap_levels = template->u.tex.last_level - template->u.tex.first_level; - texture_descriptor.nr_mipmap_levels = 0; + //texture_descriptor.nr_mipmap_levels = last_level - first_level; so->hw = texture_descriptor; @@ -2036,7 +2262,13 @@ panfrost_set_sampler_views( assert(start_slot == 0); - ctx->sampler_view_count[shader] = num_views; + unsigned new_nr = 0; + for (unsigned i = 0; i < num_views; ++i) { + if (views[i]) + new_nr = i + 1; + } + + ctx->sampler_view_count[shader] = new_nr; memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *)); ctx->dirty |= PAN_DIRTY_TEXTURES; @@ -2048,7 +2280,7 @@ panfrost_sampler_view_destroy( struct pipe_sampler_view *view) { pipe_resource_reference(&view->texture, NULL); - free(view); + ralloc_free(view); } static void @@ -2057,12 +2289,16 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx, { struct panfrost_context *ctx = pan_context(pctx); - /* Flush when switching away from an FBO, but not if the framebuffer + /* Flush when switching framebuffers, but not if the framebuffer * state is being restored by u_blitter */ - if (!panfrost_is_scanout(ctx) && !ctx->blitter->running) { - panfrost_flush(pctx, NULL, 0); + struct panfrost_job *job = panfrost_get_job_for_fbo(ctx); + bool is_scanout = panfrost_is_scanout(ctx); + bool has_draws = job->last_job.gpu; + + if (!ctx->blitter->running && (!is_scanout || has_draws)) { + panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME); } ctx->pipe_framebuffer.nr_cbufs = fb->nr_cbufs; @@ -2089,9 +2325,9 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx, continue; if (ctx->require_sfbd) - ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx); + ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0); else - ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx); + ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0); panfrost_attach_vt_framebuffer(ctx); @@ -2116,9 +2352,9 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx, if (zb) { if (ctx->require_sfbd) - ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx); + ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0); else - ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx); + ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0); panfrost_attach_vt_framebuffer(ctx); @@ -2138,7 +2374,7 @@ panfrost_create_blend_state(struct pipe_context *pipe, const struct pipe_blend_state *blend) { struct panfrost_context *ctx = pan_context(pipe); - struct panfrost_blend_state *so = CALLOC_STRUCT(panfrost_blend_state); + struct panfrost_blend_state *so = rzalloc(ctx, struct panfrost_blend_state); so->base = *blend; /* TODO: The following features are not yet implemented */ @@ -2188,7 +2424,7 @@ panfrost_delete_blend_state(struct pipe_context *pipe, DBG("Deleting blend state leak blend shaders bytecode\n"); } - free(blend); + ralloc_free(blend); } static void @@ -2329,11 +2565,20 @@ panfrost_destroy(struct pipe_context *pipe) if (panfrost->blitter) util_blitter_destroy(panfrost->blitter); - screen->driver->free_slab(screen, &panfrost->scratchpad); - screen->driver->free_slab(screen, &panfrost->varying_mem); - screen->driver->free_slab(screen, &panfrost->shaders); - screen->driver->free_slab(screen, &panfrost->tiler_heap); - screen->driver->free_slab(screen, &panfrost->misc_0); + panfrost_drm_free_slab(screen, &panfrost->scratchpad); + panfrost_drm_free_slab(screen, &panfrost->varying_mem); + panfrost_drm_free_slab(screen, &panfrost->shaders); + panfrost_drm_free_slab(screen, &panfrost->tiler_heap); + panfrost_drm_free_slab(screen, &panfrost->tiler_polygon_list); + panfrost_drm_free_slab(screen, &panfrost->tiler_dummy); + + for (int i = 0; i < ARRAY_SIZE(panfrost->transient_pools); ++i) { + struct panfrost_memory_entry *entry; + entry = panfrost->transient_pools[i].entries[0]; + pb_slab_free(&screen->slabs, (struct pb_slab_entry *)entry); + } + + ralloc_free(pipe); } static struct pipe_query * @@ -2341,7 +2586,7 @@ panfrost_create_query(struct pipe_context *pipe, unsigned type, unsigned index) { - struct panfrost_query *q = CALLOC_STRUCT(panfrost_query); + struct panfrost_query *q = rzalloc(pipe, struct panfrost_query); q->type = type; q->index = index; @@ -2352,7 +2597,7 @@ panfrost_create_query(struct pipe_context *pipe, static void panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q) { - FREE(q); + ralloc_free(q); } static boolean @@ -2436,7 +2681,7 @@ panfrost_create_stream_output_target(struct pipe_context *pctx, { struct pipe_stream_output_target *target; - target = CALLOC_STRUCT(pipe_stream_output_target); + target = rzalloc(pctx, struct pipe_stream_output_target); if (!target) return NULL; @@ -2456,7 +2701,7 @@ panfrost_stream_output_target_destroy(struct pipe_context *pctx, struct pipe_stream_output_target *target) { pipe_resource_reference(&target->buffer, NULL); - free(target); + ralloc_free(target); } static void @@ -2484,12 +2729,12 @@ panfrost_setup_hardware(struct panfrost_context *ctx) ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT); } - screen->driver->allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0); - screen->driver->allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0); - screen->driver->allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0); - screen->driver->allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128); - screen->driver->allocate_slab(screen, &ctx->misc_0, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128); - + panfrost_drm_allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0); + panfrost_drm_allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0); + panfrost_drm_allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0); + panfrost_drm_allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128); + panfrost_drm_allocate_slab(screen, &ctx->tiler_polygon_list, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128); + panfrost_drm_allocate_slab(screen, &ctx->tiler_dummy, 1, false, PAN_ALLOCATE_INVISIBLE, 0, 0); } /* New context creation, which also does hardware initialisation since I don't @@ -2498,13 +2743,13 @@ panfrost_setup_hardware(struct panfrost_context *ctx) struct pipe_context * panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) { - struct panfrost_context *ctx = CALLOC_STRUCT(panfrost_context); + struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context); struct panfrost_screen *pscreen = pan_screen(screen); memset(ctx, 0, sizeof(*ctx)); struct pipe_context *gallium = (struct pipe_context *) ctx; unsigned gpu_id; - gpu_id = pscreen->driver->query_gpu_version(pscreen); + gpu_id = panfrost_drm_query_gpu_version(pscreen); ctx->is_t6xx = gpu_id <= 0x0750; /* For now, this flag means T760 or less */ ctx->require_sfbd = gpu_id < 0x0750; /* T760 is the first to support MFBD */ @@ -2578,7 +2823,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) panfrost_resource_context_init(gallium); - pscreen->driver->init_context(ctx); + panfrost_drm_init_context(ctx); panfrost_setup_hardware(ctx); @@ -2602,7 +2847,6 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags) panfrost_emit_tiler_payload(ctx); panfrost_invalidate_frame(ctx); panfrost_default_shader_backend(ctx); - panfrost_generate_space_filler_indices(); return gallium; }