#include <errno.h>
#include "pan_context.h"
-#include "pan_swizzle.h"
#include "pan_format.h"
#include "util/macros.h"
#include "util/half_float.h"
#include "util/u_helpers.h"
#include "util/u_format.h"
+#include "util/u_prim_restart.h"
#include "indices/u_primconvert.h"
#include "tgsi/tgsi_parse.h"
#include "util/u_math.h"
#include "pan_blending.h"
#include "pan_blend_shaders.h"
#include "pan_util.h"
-
-static int performance_counter_number = 0;
-extern const char *pan_counters_base;
+#include "pan_tiler.h"
/* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
//#define DRY_RUN
-static void
-panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource *rsrc)
+static enum mali_job_type
+panfrost_job_type_for_pipe(enum pipe_shader_type type)
{
- struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
- int tile_w = (rsrc->base.width0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
- int tile_h = (rsrc->base.height0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
+ switch (type) {
+ case PIPE_SHADER_VERTEX:
+ return JOB_TYPE_VERTEX;
+
+ case PIPE_SHADER_FRAGMENT:
+ /* Note: JOB_TYPE_FRAGMENT is different.
+ * JOB_TYPE_FRAGMENT actually executes the
+ * fragment shader, but JOB_TYPE_TILER is how you
+ * specify it*/
+ return JOB_TYPE_TILER;
- /* 8 byte checksum per tile */
- rsrc->bo->checksum_stride = tile_w * 8;
- int pages = (((rsrc->bo->checksum_stride * tile_h) + 4095) / 4096);
- screen->driver->allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0);
+ case PIPE_SHADER_GEOMETRY:
+ return JOB_TYPE_GEOMETRY;
- rsrc->bo->has_checksum = true;
+ case PIPE_SHADER_COMPUTE:
+ return JOB_TYPE_COMPUTE;
+
+ default:
+ unreachable("Unsupported shader stage");
+ }
}
/* Framebuffer descriptor */
* The formula itself was discovered mostly by manual bruteforce and
* aggressive algebraic simplification. */
- fb->resolution_check = ((w + h) / 3) << 4;
+ fb->tiler_resolution_check = ((w + h) / 3) << 4;
}
struct mali_single_framebuffer
-panfrost_emit_sfbd(struct panfrost_context *ctx)
+panfrost_emit_sfbd(struct panfrost_context *ctx, unsigned vertex_count)
{
struct mali_single_framebuffer framebuffer = {
.unknown2 = 0x1f,
.format = 0x30000000,
.clear_flags = 0x1000,
- .unknown_address_0 = ctx->scratchpad.gpu,
- .unknown_address_1 = ctx->misc_0.gpu,
- .unknown_address_2 = ctx->misc_0.gpu + 40960,
- .tiler_flags = 0xf0,
- .tiler_heap_free = ctx->tiler_heap.gpu,
- .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
+ .unknown_address_0 = ctx->scratchpad.bo->gpu,
+ .tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu,
+ .tiler_polygon_list_body = ctx->tiler_polygon_list.bo->gpu + 40960,
+ .tiler_hierarchy_mask = 0xF0,
+ .tiler_flags = 0x0,
+ .tiler_heap_free = ctx->tiler_heap.bo->gpu,
+ .tiler_heap_end = ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size,
};
panfrost_set_framebuffer_resolution(&framebuffer, ctx->pipe_framebuffer.width, ctx->pipe_framebuffer.height);
}
struct bifrost_framebuffer
-panfrost_emit_mfbd(struct panfrost_context *ctx)
+panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count)
{
+ unsigned width = ctx->pipe_framebuffer.width;
+ unsigned height = ctx->pipe_framebuffer.height;
+
struct bifrost_framebuffer framebuffer = {
- /* It is not yet clear what tiler_meta means or how it's
- * calculated, but we can tell the lower 32-bits are a
- * (monotonically increasing?) function of tile count and
- * geometry complexity; I suspect it defines a memory size of
- * some kind? for the tiler. It's really unclear at the
- * moment... but to add to the confusion, the hardware is happy
- * enough to accept a zero in this field, so we don't even have
- * to worry about it right now.
- *
- * The byte (just after the 32-bit mark) is much more
- * interesting. The higher nibble I've only ever seen as 0xF,
- * but the lower one I've seen as 0x0 or 0xF, and it's not
- * obvious what the difference is. But what -is- obvious is
- * that when the lower nibble is zero, performance is severely
- * degraded compared to when the lower nibble is set.
- * Evidently, that nibble enables some sort of fast path,
- * perhaps relating to caching or tile flush? Regardless, at
- * this point there's no clear reason not to set it, aside from
- * substantially increased memory requirements (of the misc_0
- * buffer) */
-
- .tiler_meta = ((uint64_t) 0xff << 32) | 0x0,
-
- .width1 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
- .height1 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
- .width2 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
- .height2 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
+ .width1 = MALI_POSITIVE(width),
+ .height1 = MALI_POSITIVE(height),
+ .width2 = MALI_POSITIVE(width),
+ .height2 = MALI_POSITIVE(height),
.unk1 = 0x1080,
.unknown2 = 0x1f,
- /* Corresponds to unknown_address_X of SFBD */
- .scratchpad = ctx->scratchpad.gpu,
- .tiler_scratch_start = ctx->misc_0.gpu,
-
- /* The constant added here is, like the lower word of
- * tiler_meta, (loosely) another product of framebuffer size
- * and geometry complexity. It must be sufficiently large for
- * the tiler_meta fast path to work; if it's too small, there
- * will be DATA_INVALID_FAULTs. Conversely, it must be less
- * than the total size of misc_0, or else there's no room. It's
- * possible this constant configures a partition between two
- * parts of misc_0? We haven't investigated the functionality,
- * as these buffers are internally used by the hardware
- * (presumably by the tiler) but not seemingly touched by the driver
- */
+ .scratchpad = ctx->scratchpad.bo->gpu,
+ };
+
+ framebuffer.tiler_hierarchy_mask =
+ panfrost_choose_hierarchy_mask(width, height, vertex_count);
+
+ /* Compute the polygon header size and use that to offset the body */
+
+ unsigned header_size = panfrost_tiler_header_size(
+ width, height, framebuffer.tiler_hierarchy_mask);
+
+ unsigned body_size = panfrost_tiler_body_size(
+ width, height, framebuffer.tiler_hierarchy_mask);
+
+ /* Sanity check */
+
+ unsigned total_size = header_size + body_size;
+
+ if (framebuffer.tiler_hierarchy_mask) {
+ assert(ctx->tiler_polygon_list.bo->size >= total_size);
+
+ /* Specify allocated tiler structures */
+ framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu;
+
+ /* Allow the entire tiler heap */
+ framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
+ framebuffer.tiler_heap_end =
+ ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size;
+ } else {
+ /* The tiler is disabled, so don't allow the tiler heap */
+ framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
+ framebuffer.tiler_heap_end = framebuffer.tiler_heap_start;
+
+ /* Use a dummy polygon list */
+ framebuffer.tiler_polygon_list = ctx->tiler_dummy.bo->gpu;
+
+ /* Also, set a "tiler disabled?" flag? */
+ framebuffer.tiler_hierarchy_mask |= 0x1000;
+ }
+
+ framebuffer.tiler_polygon_list_body =
+ framebuffer.tiler_polygon_list + header_size;
+
+ framebuffer.tiler_polygon_list_size =
+ header_size + body_size;
- .tiler_scratch_middle = ctx->misc_0.gpu + 0xf0000,
- .tiler_heap_start = ctx->tiler_heap.gpu,
- .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
- };
return framebuffer;
}
ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
}
-static uint32_t
-pan_pack_color(const union pipe_color_union *color, enum pipe_format format)
-{
- /* Alpha magicked to 1.0 if there is no alpha */
-
- bool has_alpha = util_format_has_alpha(format);
- float clear_alpha = has_alpha ? color->f[3] : 1.0f;
-
- /* Packed color depends on the framebuffer format */
-
- const struct util_format_description *desc =
- util_format_description(format);
-
- if (util_format_is_rgba8_variant(desc)) {
- return (float_to_ubyte(clear_alpha) << 24) |
- (float_to_ubyte(color->f[2]) << 16) |
- (float_to_ubyte(color->f[1]) << 8) |
- (float_to_ubyte(color->f[0]) << 0);
- } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
- /* First, we convert the components to R5, G6, B5 separately */
- unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
- unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0;
- unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0;
-
- /* Then we pack into a sparse u32. TODO: Why these shifts? */
- return (b5 << 25) | (g6 << 14) | (r5 << 5);
- } else {
- /* Unknown format */
- assert(0);
- }
-
- return 0;
-}
-
static void
panfrost_clear(
struct pipe_context *pipe,
struct panfrost_context *ctx = pan_context(pipe);
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
- if (buffers & PIPE_CLEAR_COLOR) {
- enum pipe_format format = ctx->pipe_framebuffer.cbufs[0]->format;
- job->clear_color = pan_pack_color(color, format);
- }
-
- if (buffers & PIPE_CLEAR_DEPTH) {
- job->clear_depth = depth;
- }
-
- if (buffers & PIPE_CLEAR_STENCIL) {
- job->clear_stencil = stencil;
- }
-
- job->clear |= buffers;
+ panfrost_job_clear(ctx, job, buffers, color, depth, stencil);
}
static mali_ptr
ctx->cmdstream_i = 0;
if (ctx->require_sfbd)
- ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+ ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
else
- ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+ ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
/* Reset varyings allocated */
ctx->varying_height = 0;
/* XXX */
ctx->dirty |= PAN_DIRTY_SAMPLERS | PAN_DIRTY_TEXTURES;
-
- /* Reset job counters */
- ctx->draw_count = 0;
- ctx->vertex_job_count = 0;
- ctx->tiler_job_count = 0;
}
/* In practice, every field of these payloads should be configurable
panfrost_emit_vertex_payload(struct panfrost_context *ctx)
{
struct midgard_payload_vertex_tiler payload = {
- .prefix = {
- .workgroups_z_shift = 32,
- .workgroups_x_shift_2 = 0x2,
- .workgroups_x_shift_3 = 0x5,
- },
.gl_enables = 0x4 | (ctx->is_t6xx ? 0 : 0x2),
};
{
struct midgard_payload_vertex_tiler payload = {
.prefix = {
- .workgroups_z_shift = 32,
- .workgroups_x_shift_2 = 0x2,
- .workgroups_x_shift_3 = 0x6,
-
.zero1 = 0xffff, /* Why is this only seen on test-quad-textured? */
},
};
memcpy(&ctx->fragment_shader_core, &shader, sizeof(shader));
}
-static void
-panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next)
-{
- if (first->job_descriptor_size)
- first->next_job_64 = (u64) (uintptr_t) next;
- else
- first->next_job_32 = (u32) (uintptr_t) next;
-}
-
/* Generates a vertex/tiler job. This is, in some sense, the heart of the
* graphics command stream. It should be called once per draw, accordding to
* presentations. Set is_tiler for "tiler" jobs (fragment shader jobs, but in
struct panfrost_transfer
panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
{
- /* Each draw call corresponds to two jobs, and the set-value job is first */
- int draw_job_index = 1 + (2 * ctx->draw_count) + 1;
-
struct mali_job_descriptor_header job = {
.job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
- .job_index = draw_job_index + (is_tiler ? 1 : 0),
#ifdef __LP64__
.job_descriptor_size = 1,
#endif
#endif
struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload));
- if (is_tiler) {
- /* Tiler jobs depend on vertex jobs */
-
- job.job_dependency_index_1 = draw_job_index;
-
- /* Tiler jobs also depend on the previous tiler job */
-
- if (ctx->draw_count) {
- job.job_dependency_index_2 = draw_job_index - 1;
- /* Previous tiler job points to this tiler job */
- panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->draw_count - 1], transfer.gpu);
- } else {
- /* The only vertex job so far points to first tiler job */
- panfrost_link_job_pair(ctx->u_vertex_jobs[0], transfer.gpu);
- }
- } else {
- if (ctx->draw_count) {
- /* Previous vertex job points to this vertex job */
- panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->draw_count - 1], transfer.gpu);
-
- /* Last vertex job points to first tiler job */
- panfrost_link_job_pair(&job, ctx->tiler_jobs[0]);
- } else {
- /* Have the first vertex job depend on the set value job */
- job.job_dependency_index_1 = ctx->u_set_value_job->job_index;
- panfrost_link_job_pair(ctx->u_set_value_job, transfer.gpu);
- }
- }
-
memcpy(transfer.cpu, &job, sizeof(job));
memcpy(transfer.cpu + sizeof(job) - offset, payload, sizeof(*payload));
return transfer;
}
-/* Generates a set value job. It's unclear what exactly this does, why it's
- * necessary, and when to call it. */
-
-static void
-panfrost_set_value_job(struct panfrost_context *ctx)
-{
- struct mali_job_descriptor_header job = {
- .job_type = JOB_TYPE_SET_VALUE,
- .job_descriptor_size = 1,
- .job_index = 1,
- };
-
- struct mali_payload_set_value payload = {
- .out = ctx->misc_0.gpu,
- .unknown = 0x3,
- };
-
- struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(payload));
- memcpy(transfer.cpu, &job, sizeof(job));
- memcpy(transfer.cpu + sizeof(job), &payload, sizeof(payload));
-
- ctx->u_set_value_job = (struct mali_job_descriptor_header *) transfer.cpu;
- ctx->set_value_job = transfer.gpu;
-}
-
static mali_ptr
panfrost_emit_varyings(
struct panfrost_context *ctx,
unsigned stride,
unsigned count)
{
- mali_ptr varying_address = ctx->varying_mem.gpu + ctx->varying_height;
+ mali_ptr varying_address = ctx->varying_mem.bo->gpu + ctx->varying_height;
/* Fill out the descriptor */
slot->elements = varying_address | MALI_ATTR_LINEAR;
slot->stride = stride;
slot->size = stride * count;
- ctx->varying_height += ALIGN(slot->size, 64);
- assert(ctx->varying_height < ctx->varying_mem.size);
+ ctx->varying_height += ALIGN_POT(slot->size, 64);
+ assert(ctx->varying_height < ctx->varying_mem.bo->size);
return varying_address;
}
static void
panfrost_emit_varying_descriptor(
struct panfrost_context *ctx,
- unsigned invocation_count)
+ unsigned vertex_count)
{
/* Load the shaders */
struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
+ unsigned int num_gen_varyings = 0;
/* Allocate the varying descriptor */
struct panfrost_transfer trans = panfrost_allocate_transient(ctx,
vs_size + fs_size);
+ /*
+ * Assign ->src_offset now that we know about all the general purpose
+ * varyings that will be used by the fragment and vertex shaders.
+ */
+ for (unsigned i = 0; i < vs->tripipe->varying_count; i++) {
+ /*
+ * General purpose varyings have ->index set to 0, skip other
+ * entries.
+ */
+ if (vs->varyings[i].index)
+ continue;
+
+ vs->varyings[i].src_offset = 16 * (num_gen_varyings++);
+ }
+
+ for (unsigned i = 0; i < fs->tripipe->varying_count; i++) {
+ unsigned j;
+
+ /* If we have a point sprite replacement, handle that here. We
+ * have to translate location first. TODO: Flip y in shader.
+ * We're already keying ... just time crunch .. */
+
+ unsigned loc = fs->varyings_loc[i];
+ unsigned pnt_loc =
+ (loc >= VARYING_SLOT_VAR0) ? (loc - VARYING_SLOT_VAR0) :
+ (loc == VARYING_SLOT_PNTC) ? 8 :
+ ~0;
+
+ if (~pnt_loc && fs->point_sprite_mask & (1 << pnt_loc)) {
+ /* gl_PointCoord index by convention */
+ fs->varyings[i].index = 3;
+ fs->reads_point_coord = true;
+
+ /* Swizzle out the z/w to 0/1 */
+ fs->varyings[i].format = MALI_RG16F;
+ fs->varyings[i].swizzle =
+ panfrost_get_default_swizzle(2);
+
+ continue;
+ }
+
+ if (fs->varyings[i].index)
+ continue;
+
+ /*
+ * Re-use the VS general purpose varying pos if it exists,
+ * create a new one otherwise.
+ */
+ for (j = 0; j < vs->tripipe->varying_count; j++) {
+ if (fs->varyings_loc[i] == vs->varyings_loc[j])
+ break;
+ }
+
+ if (j < vs->tripipe->varying_count)
+ fs->varyings[i].src_offset = vs->varyings[j].src_offset;
+ else
+ fs->varyings[i].src_offset = 16 * (num_gen_varyings++);
+ }
+
memcpy(trans.cpu, vs->varyings, vs_size);
memcpy(trans.cpu + vs_size, fs->varyings, fs_size);
union mali_attr varyings[PIPE_MAX_ATTRIBS];
unsigned idx = 0;
- /* General varyings -- use the VS's, since those are more likely to be
- * accurate on desktop */
-
- panfrost_emit_varyings(ctx, &varyings[idx++],
- vs->general_varying_stride, invocation_count);
+ panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16,
+ vertex_count);
/* fp32 vec4 gl_Position */
ctx->payload_tiler.postfix.position_varying =
panfrost_emit_varyings(ctx, &varyings[idx++],
- sizeof(float) * 4, invocation_count);
+ sizeof(float) * 4, vertex_count);
if (vs->writes_point_size || fs->reads_point_coord) {
/* fp16 vec1 gl_PointSize */
ctx->payload_tiler.primitive_size.pointer =
panfrost_emit_varyings(ctx, &varyings[idx++],
- 2, invocation_count);
+ 2, vertex_count);
}
if (fs->reads_point_coord) {
ctx->payload_tiler.postfix.varyings = varyings_p;
}
-static mali_ptr
+mali_ptr
panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
{
struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
return rsrc->bo->gpu + buf->buffer_offset;
}
-/* Emits attributes and varying descriptors, which should be called every draw,
- * excepting some obscure circumstances */
-
-static void
-panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
-{
- /* Staged mali_attr, and index into them. i =/= k, depending on the
- * vertex buffer mask */
- union mali_attr attrs[PIPE_MAX_ATTRIBS];
- unsigned k = 0;
-
- unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
-
- for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
- if (!(ctx->vb_mask & (1 << i))) continue;
-
- struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
- struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
-
- if (!rsrc) continue;
-
- /* Align to 64 bytes by masking off the lower bits. This
- * will be adjusted back when we fixup the src_offset in
- * mali_attr_meta */
-
- mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
-
- /* Offset vertex count by draw_start to make sure we upload enough */
- attrs[k].stride = buf->stride;
- attrs[k].size = rsrc->base.width0;
-
- panfrost_job_add_bo(job, rsrc->bo);
- attrs[k].elements = addr | MALI_ATTR_LINEAR;
-
- ++k;
- }
-
- ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
-
- panfrost_emit_varying_descriptor(ctx, invocation_count);
-}
-
static bool
panfrost_writes_point_size(struct panfrost_context *ctx)
{
* QED.
*/
+ unsigned start = ctx->payload_vertex.draw_start;
+
for (unsigned i = 0; i < so->num_elements; ++i) {
unsigned vbi = so->pipe[i].vertex_buffer_index;
+ struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
/* Adjust by the masked off bits of the offset */
target[i].src_offset += (addr & 63);
+
+ /* Also, somewhat obscurely per-instance data needs to be
+ * offset in response to a delayed start in an indexed draw */
+
+ if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) {
+ target[i].src_offset -= buf->stride * start;
+ }
+
+
}
ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
size_t desc_size = sizeof(struct mali_sampler_descriptor);
for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
- if (!ctx->sampler_count[t]) continue;
+ mali_ptr upload = 0;
- size_t transfer_size = desc_size * ctx->sampler_count[t];
+ if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) {
+ size_t transfer_size = desc_size * ctx->sampler_count[t];
- struct panfrost_transfer transfer =
- panfrost_allocate_transient(ctx, transfer_size);
+ struct panfrost_transfer transfer =
+ panfrost_allocate_transient(ctx, transfer_size);
- struct mali_sampler_descriptor *desc =
- (struct mali_sampler_descriptor *) transfer.cpu;
+ struct mali_sampler_descriptor *desc =
+ (struct mali_sampler_descriptor *) transfer.cpu;
- for (int i = 0; i < ctx->sampler_count[t]; ++i)
- desc[i] = ctx->samplers[t][i]->hw;
+ for (int i = 0; i < ctx->sampler_count[t]; ++i)
+ desc[i] = ctx->samplers[t][i]->hw;
+
+ upload = transfer.gpu;
+ }
if (t == PIPE_SHADER_FRAGMENT)
- ctx->payload_tiler.postfix.sampler_descriptor = transfer.gpu;
+ ctx->payload_tiler.postfix.sampler_descriptor = upload;
else if (t == PIPE_SHADER_VERTEX)
- ctx->payload_vertex.postfix.sampler_descriptor = transfer.gpu;
+ ctx->payload_vertex.postfix.sampler_descriptor = upload;
else
assert(0);
}
}
-/* Computes the address to a texture at a particular slice */
-
-static mali_ptr
-panfrost_get_texture_address(
- struct panfrost_resource *rsrc,
- unsigned level, unsigned face)
-{
- unsigned level_offset = rsrc->bo->slices[level].offset;
- unsigned face_offset = face * rsrc->bo->cubemap_stride;
-
- return rsrc->bo->gpu + level_offset + face_offset;
-
-}
-
static mali_ptr
panfrost_upload_tex(
struct panfrost_context *ctx,
if (!view)
return (mali_ptr) NULL;
- struct pipe_resource *tex_rsrc = view->base.texture;
- struct panfrost_resource *rsrc = (struct panfrost_resource *) tex_rsrc;
+ struct pipe_sampler_view *pview = &view->base;
+ struct panfrost_resource *rsrc = pan_resource(pview->texture);
/* Do we interleave an explicit stride with every element? */
bool has_manual_stride =
view->hw.format.usage2 & MALI_TEX_MANUAL_STRIDE;
+ /* For easy access */
+
+ assert(pview->target != PIPE_BUFFER);
+ unsigned first_level = pview->u.tex.first_level;
+ unsigned last_level = pview->u.tex.last_level;
+ unsigned first_layer = pview->u.tex.first_layer;
+ unsigned last_layer = pview->u.tex.last_layer;
+
+ /* Lower-bit is set when sampling from colour AFBC */
+ bool is_afbc = rsrc->layout == PAN_AFBC;
+ bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
+ unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
+
+ /* Add the BO to the job so it's retained until the job is done. */
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ panfrost_job_add_bo(job, rsrc->bo);
+
/* Inject the addresses in, interleaving mip levels, cube faces, and
* strides in that order */
unsigned idx = 0;
- for (unsigned l = 0; l <= tex_rsrc->last_level; ++l) {
- for (unsigned f = 0; f < tex_rsrc->array_size; ++f) {
+ for (unsigned l = first_level; l <= last_level; ++l) {
+ for (unsigned f = first_layer; f <= last_layer; ++f) {
+
view->hw.payload[idx++] =
- panfrost_get_texture_address(rsrc, l, f);
+ panfrost_get_texture_address(rsrc, l, f) + afbc_bit;
if (has_manual_stride) {
view->hw.payload[idx++] =
- rsrc->bo->slices[l].stride;
+ rsrc->slices[l].stride;
}
}
}
panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
{
for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
- /* Shortcircuit */
- if (!ctx->sampler_view_count[t]) continue;
+ mali_ptr trampoline = 0;
- uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ if (ctx->sampler_view_count[t]) {
+ uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
- for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
- trampolines[i] =
- panfrost_upload_tex(ctx, ctx->sampler_views[t][i]);
+ for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
+ trampolines[i] =
+ panfrost_upload_tex(ctx, ctx->sampler_views[t][i]);
- mali_ptr trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
+ trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
+ }
if (t == PIPE_SHADER_FRAGMENT)
ctx->payload_tiler.postfix.texture_trampoline = trampoline;
}
}
+struct sysval_uniform {
+ union {
+ float f[4];
+ int32_t i[4];
+ uint32_t u[4];
+ };
+};
+
+static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
+ struct sysval_uniform *uniform)
+{
+ const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
+
+ uniform->f[0] = vp->scale[0];
+ uniform->f[1] = vp->scale[1];
+ uniform->f[2] = vp->scale[2];
+}
+
+static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
+ struct sysval_uniform *uniform)
+{
+ const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
+
+ uniform->f[0] = vp->translate[0];
+ uniform->f[1] = vp->translate[1];
+ uniform->f[2] = vp->translate[2];
+}
+
+static void panfrost_upload_txs_sysval(struct panfrost_context *ctx,
+ enum pipe_shader_type st,
+ unsigned int sysvalid,
+ struct sysval_uniform *uniform)
+{
+ unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
+ unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
+ bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
+ struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
+
+ assert(dim);
+ uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
+
+ if (dim > 1)
+ uniform->i[1] = u_minify(tex->texture->height0,
+ tex->u.tex.first_level);
+
+ if (dim > 2)
+ uniform->i[2] = u_minify(tex->texture->depth0,
+ tex->u.tex.first_level);
+
+ if (is_array)
+ uniform->i[dim] = tex->texture->array_size;
+}
+
+static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf,
+ struct panfrost_shader_state *ss,
+ enum pipe_shader_type st)
+{
+ struct sysval_uniform *uniforms = (void *)buf;
+
+ for (unsigned i = 0; i < ss->sysval_count; ++i) {
+ int sysval = ss->sysval[i];
+
+ switch (PAN_SYSVAL_TYPE(sysval)) {
+ case PAN_SYSVAL_VIEWPORT_SCALE:
+ panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]);
+ break;
+ case PAN_SYSVAL_VIEWPORT_OFFSET:
+ panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]);
+ break;
+ case PAN_SYSVAL_TEXTURE_SIZE:
+ panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
+ &uniforms[i]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+static const void *
+panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index)
+{
+ struct pipe_constant_buffer *cb = &buf->cb[index];
+ struct panfrost_resource *rsrc = pan_resource(cb->buffer);
+
+ if (rsrc)
+ return rsrc->bo->cpu;
+ else if (cb->user_buffer)
+ return cb->user_buffer;
+ else
+ unreachable("No constant buffer");
+}
+
+static mali_ptr
+panfrost_map_constant_buffer_gpu(
+ struct panfrost_context *ctx,
+ struct panfrost_constant_buffer *buf,
+ unsigned index)
+{
+ struct pipe_constant_buffer *cb = &buf->cb[index];
+ struct panfrost_resource *rsrc = pan_resource(cb->buffer);
+
+ if (rsrc)
+ return rsrc->bo->gpu;
+ else if (cb->user_buffer)
+ return panfrost_upload_transient(ctx, cb->user_buffer, cb->buffer_size);
+ else
+ unreachable("No constant buffer");
+}
+
+/* Compute number of UBOs active (more specifically, compute the highest UBO
+ * number addressable -- if there are gaps, include them in the count anyway).
+ * We always include UBO #0 in the count, since we *need* uniforms enabled for
+ * sysvals. */
+
+static unsigned
+panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
+{
+ unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
+ return 32 - __builtin_clz(mask);
+}
+
+/* Fixes up a shader state with current state, returning a GPU address to the
+ * patched shader */
+
+static mali_ptr
+panfrost_patch_shader_state(
+ struct panfrost_context *ctx,
+ struct panfrost_shader_state *ss,
+ enum pipe_shader_type stage)
+{
+ ss->tripipe->texture_count = ctx->sampler_view_count[stage];
+ ss->tripipe->sampler_count = ctx->sampler_count[stage];
+
+ ss->tripipe->midgard1.flags = 0x220;
+
+ unsigned ubo_count = panfrost_ubo_count(ctx, stage);
+ ss->tripipe->midgard1.uniform_buffer_count = ubo_count;
+
+ return ss->tripipe_gpu;
+}
+
/* Go through dirty flags and actualise them in the cmdstream. */
void
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
if (with_vertex_data) {
- panfrost_emit_vertex_data(ctx, job);
+ panfrost_emit_vertex_data(job);
+
+ /* Varyings emitted for -all- geometry */
+ unsigned total_count = ctx->padded_count * ctx->instance_count;
+ panfrost_emit_varying_descriptor(ctx, total_count);
}
bool msaa = ctx->rasterizer->base.multisample;
SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
}
- /* Enable job requirements at draw-time */
-
- if (msaa)
- job->requirements |= PAN_REQ_MSAA;
-
- if (ctx->depth_stencil->depth.writemask)
- job->requirements |= PAN_REQ_DEPTH_WRITE;
+ panfrost_job_set_requirements(ctx, job);
if (ctx->occlusion_query) {
ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
- /* Late shader descriptor assignments */
-
- vs->tripipe->texture_count = ctx->sampler_view_count[PIPE_SHADER_VERTEX];
- vs->tripipe->sampler_count = ctx->sampler_count[PIPE_SHADER_VERTEX];
-
- /* Who knows */
- vs->tripipe->midgard1.unknown1 = 0x2201;
-
- ctx->payload_vertex.postfix._shader_upper = vs->tripipe_gpu >> 4;
+ ctx->payload_vertex.postfix._shader_upper =
+ panfrost_patch_shader_state(ctx, vs, PIPE_SHADER_VERTEX) >> 4;
}
if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
assert(ctx->fs);
struct panfrost_shader_state *variant = &ctx->fs->variants[ctx->fs->active_variant];
+ panfrost_patch_shader_state(ctx, variant, PIPE_SHADER_FRAGMENT);
+
#define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
COPY(shader);
COPY(attribute_count);
COPY(varying_count);
+ COPY(texture_count);
+ COPY(sampler_count);
+ COPY(sampler_count);
COPY(midgard1.uniform_count);
+ COPY(midgard1.uniform_buffer_count);
COPY(midgard1.work_count);
+ COPY(midgard1.flags);
COPY(midgard1.unknown2);
#undef COPY
ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
/* Set late due to depending on render state */
- /* The one at the end seems to mean "1 UBO" */
- ctx->fragment_shader_core.midgard1.unknown1 = MALI_NO_ALPHA_TO_COVERAGE | 0x200 | 0x2201;
+ unsigned flags = ctx->fragment_shader_core.midgard1.flags;
+
+ /* Depending on whether it's legal to in the given shader, we
+ * try to enable early-z testing (or forward-pixel kill?) */
+
+ if (!variant->can_discard)
+ flags |= MALI_EARLY_Z;
- /* Assign texture/sample count right before upload */
- ctx->fragment_shader_core.texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
- ctx->fragment_shader_core.sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
+ /* Any time texturing is used, derivatives are implicitly
+ * calculated, so we need to enable helper invocations */
+
+ if (ctx->sampler_view_count[PIPE_SHADER_FRAGMENT])
+ flags |= MALI_HELPER_INVOCATIONS;
+
+ ctx->fragment_shader_core.midgard1.flags = flags;
/* Assign the stencil refs late */
ctx->fragment_shader_core.stencil_front.ref = ctx->stencil_ref.ref_value[0];
if (variant->can_discard) {
ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
- ctx->fragment_shader_core.midgard1.unknown1 &= ~MALI_NO_ALPHA_TO_COVERAGE;
- ctx->fragment_shader_core.midgard1.unknown1 |= 0x4000;
- ctx->fragment_shader_core.midgard1.unknown1 = 0x4200;
+ ctx->fragment_shader_core.midgard1.flags |= 0x400;
}
/* Check if we're using the default blend descriptor (fast path) */
if (ctx->blend->has_blend_shader) {
ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
+ } else {
+ ctx->fragment_shader_core.blend.shader = 0;
}
if (ctx->require_sfbd) {
/* TODO: MRT */
for (unsigned i = 0; i < 1; ++i) {
+ bool is_srgb =
+ (ctx->pipe_framebuffer.nr_cbufs > i) &&
+ util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+
rts[i].flags = blend_count;
+ if (is_srgb)
+ rts[i].flags |= MALI_BLEND_SRGB;
+
+ /* TODO: sRGB in blend shaders is currently
+ * unimplemented. Contact me (Alyssa) if you're
+ * interested in working on this. We have
+ * native Midgard ops for helping here, but
+ * they're not well-understood yet. */
+
+ assert(!(is_srgb && ctx->blend->has_blend_shader));
+
if (ctx->blend->has_blend_shader) {
rts[i].blend.shader = ctx->blend->blend_shader;
} else {
struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
struct panfrost_shader_state *ss = (i == PIPE_SHADER_FRAGMENT) ? fs : vs;
+ /* Uniforms are implicitly UBO #0 */
+ bool has_uniforms = buf->enabled_mask & (1 << 0);
+
/* Allocate room for the sysval and the uniforms */
size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
- size_t size = sys_size + buf->size;
+ size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
+ size_t size = sys_size + uniform_size;
struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
/* Upload sysvals requested by the shader */
- float *uniforms = (float *) transfer.cpu;
- for (unsigned i = 0; i < ss->sysval_count; ++i) {
- int sysval = ss->sysval[i];
-
- if (sysval == PAN_SYSVAL_VIEWPORT_SCALE) {
- uniforms[4*i + 0] = vp->scale[0];
- uniforms[4*i + 1] = vp->scale[1];
- uniforms[4*i + 2] = vp->scale[2];
- } else if (sysval == PAN_SYSVAL_VIEWPORT_OFFSET) {
- uniforms[4*i + 0] = vp->translate[0];
- uniforms[4*i + 1] = vp->translate[1];
- uniforms[4*i + 2] = vp->translate[2];
- } else {
- assert(0);
- }
- }
+ panfrost_upload_sysvals(ctx, transfer.cpu, ss, i);
/* Upload uniforms */
- memcpy(transfer.cpu + sys_size, buf->buffer, buf->size);
+ if (has_uniforms) {
+ const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
+ memcpy(transfer.cpu + sys_size, cpu, uniform_size);
+ }
int uniform_count = 0;
unreachable("Invalid shader stage\n");
}
- /* Also attach the same buffer as a UBO for extended access */
+ /* Next up, attach UBOs. UBO #0 is the uniforms we just
+ * uploaded */
+
+ unsigned ubo_count = panfrost_ubo_count(ctx, i);
+ assert(ubo_count >= 1);
+
+ size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count;
+ struct mali_uniform_buffer_meta *ubos = calloc(sz, 1);
+
+ /* Upload uniforms as a UBO */
+ ubos[0].size = MALI_POSITIVE((2 + uniform_count));
+ ubos[0].ptr = transfer.gpu >> 2;
+
+ /* The rest are honest-to-goodness UBOs */
+
+ for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
+ size_t sz = buf->cb[ubo].buffer_size;
+
+ bool enabled = buf->enabled_mask & (1 << ubo);
+ bool empty = sz == 0;
+
+ if (!enabled || empty) {
+ /* Stub out disabled UBOs to catch accesses */
+
+ ubos[ubo].size = 0;
+ ubos[ubo].ptr = 0xDEAD0000;
+ continue;
+ }
+
+ mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo);
+
+ unsigned bytes_per_field = 16;
+ unsigned aligned = ALIGN_POT(sz, bytes_per_field);
+ unsigned fields = aligned / bytes_per_field;
- struct mali_uniform_buffer_meta uniform_buffers[] = {
- {
- .size = MALI_POSITIVE((2 + uniform_count)),
- .ptr = transfer.gpu >> 2,
- },
- };
+ ubos[ubo].size = MALI_POSITIVE(fields);
+ ubos[ubo].ptr = gpu >> 2;
+ }
- mali_ptr ubufs = panfrost_upload_transient(ctx, uniform_buffers, sizeof(uniform_buffers));
+ mali_ptr ubufs = panfrost_upload_transient(ctx, ubos, sz);
postfix->uniforms = transfer.gpu;
postfix->uniform_buffers = ubufs;
- buf->dirty = 0;
+ buf->dirty_mask = 0;
}
/* TODO: Upload the viewport somewhere more appropriate */
* should work, but in practice causes issues when we're not
* explicitly trying to scissor */
- .clip_minx = -inff,
- .clip_miny = -inff,
- .clip_maxx = inff,
- .clip_maxy = inff,
+ .clip_minx = -INFINITY,
+ .clip_miny = -INFINITY,
+ .clip_maxx = INFINITY,
+ .clip_maxy = INFINITY,
.clip_minz = 0.0,
.clip_maxz = 1.0,
};
/* Always scissor to the viewport by default. */
- view.viewport0[0] = (int) (vp->translate[0] - vp->scale[0]);
- view.viewport1[0] = MALI_POSITIVE((int) (vp->translate[0] + vp->scale[0]));
+ int minx = (int) (vp->translate[0] - vp->scale[0]);
+ int maxx = (int) (vp->translate[0] + vp->scale[0]);
int miny = (int) (vp->translate[1] - vp->scale[1]);
int maxy = (int) (vp->translate[1] + vp->scale[1]);
- if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
- view.viewport0[0] = ss->minx;
- view.viewport1[0] = MALI_POSITIVE(ss->maxx);
+ /* Apply the scissor test */
+ if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
+ minx = ss->minx;
+ maxx = ss->maxx;
miny = ss->miny;
maxy = ss->maxy;
}
/* Hardware needs the min/max to be strictly ordered, so flip if we
- * need to */
+ * need to. The viewport transformation in the vertex shader will
+ * handle the negatives if we don't */
+
if (miny > maxy) {
int temp = miny;
miny = maxy;
maxy = temp;
}
+ if (minx > maxx) {
+ int temp = minx;
+ minx = maxx;
+ maxx = temp;
+ }
+
+ /* Clamp everything positive, just in case */
+
+ maxx = MAX2(0, maxx);
+ maxy = MAX2(0, maxy);
+ minx = MAX2(0, minx);
+ miny = MAX2(0, miny);
+
+ /* Clamp to the framebuffer size as a last check */
+
+ minx = MIN2(ctx->pipe_framebuffer.width, minx);
+ maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
+
+ miny = MIN2(ctx->pipe_framebuffer.height, miny);
+ maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
+
+ /* Update the job, unless we're doing wallpapering (whose lack of
+ * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
+ * just... be faster :) */
+
+ if (!ctx->wallpaper_batch)
+ panfrost_job_union_scissor(job, minx, miny, maxx, maxy);
+
+ /* Upload */
+
+ view.viewport0[0] = minx;
+ view.viewport1[0] = MALI_POSITIVE(maxx);
+
view.viewport0[1] = miny;
view.viewport1[1] = MALI_POSITIVE(maxy);
static void
panfrost_queue_draw(struct panfrost_context *ctx)
{
- /* TODO: Expand the array? */
- if (ctx->draw_count >= MAX_DRAW_CALLS) {
- DBG("Job buffer overflow, ignoring draw\n");
- assert(0);
- }
-
/* Handle dirty flags now */
panfrost_emit_for_draw(ctx, true);
- /* We need a set_value job before any other draw jobs */
- if (ctx->draw_count == 0)
- panfrost_set_value_job(ctx);
+ /* If rasterizer discard is enable, only submit the vertex */
+
+ bool rasterizer_discard = ctx->rasterizer
+ && ctx->rasterizer->base.rasterizer_discard;
struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
- ctx->u_vertex_jobs[ctx->vertex_job_count] = (struct mali_job_descriptor_header *) vertex.cpu;
- ctx->vertex_jobs[ctx->vertex_job_count++] = vertex.gpu;
+ struct panfrost_transfer tiler;
+
+ if (!rasterizer_discard)
+ tiler = panfrost_vertex_tiler_job(ctx, true);
- struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true);
- ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu;
- ctx->tiler_jobs[ctx->tiler_job_count++] = tiler.gpu;
+ struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
- ctx->draw_count++;
+ if (rasterizer_discard)
+ panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
+ else if (ctx->wallpaper_batch)
+ panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler);
+ else
+ panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
}
/* The entire frame is in memory -- send it off to the kernel! */
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_screen *screen = pan_screen(gallium->screen);
- /* Edge case if screen is cleared and nothing else */
- bool has_draws = ctx->draw_count > 0;
-
- /* Workaround a bizarre lockup (a hardware errata?) */
- if (!has_draws)
- flush_immediate = true;
-
#ifndef DRY_RUN
- bool is_scanout = panfrost_is_scanout(ctx);
- screen->driver->submit_vs_fs_job(ctx, has_draws, is_scanout);
+ panfrost_job_submit(ctx, job);
/* If visual, we can stall a frame */
if (!flush_immediate)
- screen->driver->force_flush_fragment(ctx, fence);
+ panfrost_drm_force_flush_fragment(ctx, fence);
screen->last_fragment_flushed = false;
screen->last_job = job;
/* If readback, flush now (hurts the pipelined performance) */
if (flush_immediate)
- screen->driver->force_flush_fragment(ctx, fence);
-
- if (screen->driver->dump_counters && pan_counters_base) {
- screen->driver->dump_counters(screen);
-
- char filename[128];
- snprintf(filename, sizeof(filename), "%s/frame%d.mdgprf", pan_counters_base, ++performance_counter_number);
- FILE *fp = fopen(filename, "wb");
- fwrite(screen->perf_counters.cpu, 4096, sizeof(uint32_t), fp);
- fclose(fp);
- }
-
+ panfrost_drm_force_flush_fragment(ctx, fence);
#endif
}
if (ctx->pipe_framebuffer.cbufs[0] == NULL)
return;
- /* Blit the wallpaper in */
- panfrost_blit_wallpaper(ctx);
-
- /* We are flushing all queued draws and we know that no more jobs will
- * be added until the next frame.
- * We also know that the last jobs are the wallpaper jobs, and they
- * need to be linked so they execute right after the set_value job.
- */
-
- /* set_value job to wallpaper vertex job */
- panfrost_link_job_pair(ctx->u_set_value_job, ctx->vertex_jobs[ctx->vertex_job_count - 1]);
- ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
+ /* Check if the buffer has any content on it worth preserving */
- /* wallpaper vertex job to first vertex job */
- panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 1], ctx->vertex_jobs[0]);
- ctx->u_vertex_jobs[0]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
+ struct pipe_surface *surf = ctx->pipe_framebuffer.cbufs[0];
+ struct panfrost_resource *rsrc = pan_resource(surf->texture);
+ unsigned level = surf->u.tex.level;
- /* last vertex job to wallpaper tiler job */
- panfrost_link_job_pair(ctx->u_vertex_jobs[ctx->vertex_job_count - 2], ctx->tiler_jobs[ctx->tiler_job_count - 1]);
- ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_1 = ctx->u_vertex_jobs[ctx->vertex_job_count - 1]->job_index;
- ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_dependency_index_2 = 0;
+ if (!rsrc->slices[level].initialized)
+ return;
- /* wallpaper tiler job to first tiler job */
- panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 1], ctx->tiler_jobs[0]);
- ctx->u_tiler_jobs[0]->job_dependency_index_1 = ctx->u_vertex_jobs[0]->job_index;
- ctx->u_tiler_jobs[0]->job_dependency_index_2 = ctx->u_tiler_jobs[ctx->tiler_job_count - 1]->job_index;
+ /* Save the batch */
+ struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
- /* last tiler job to NULL */
- panfrost_link_job_pair(ctx->u_tiler_jobs[ctx->tiler_job_count - 2], 0);
+ ctx->wallpaper_batch = batch;
+ panfrost_blit_wallpaper(ctx);
+ ctx->wallpaper_batch = NULL;
}
void
struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
/* Nothing to do! */
- if (!ctx->draw_count && !job->clear) return;
+ if (!job->last_job.gpu && !job->clear) return;
if (!job->clear)
panfrost_draw_wallpaper(&ctx->base);
- /* Whether to stall the pipeline for immediately correct results */
- bool flush_immediate = flags & PIPE_FLUSH_END_OF_FRAME;
+ /* Whether to stall the pipeline for immediately correct results. Since
+ * pipelined rendering is quite broken right now (to be fixed by the
+ * panfrost_job refactor, just take the perf hit for correctness) */
+ bool flush_immediate = /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
/* Submit the frame itself */
panfrost_submit_frame(ctx, flush_immediate, fence, job);
struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
off_t offset = info->start * info->index_size;
+ struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
if (!info->has_user_indices) {
/* Only resources can be directly mapped */
+ panfrost_job_add_bo(batch, rsrc->bo);
return rsrc->bo->gpu + offset;
} else {
/* Otherwise, we need to upload to transient memory */
}
}
+static bool
+panfrost_scissor_culls_everything(struct panfrost_context *ctx)
+{
+ const struct pipe_scissor_state *ss = &ctx->scissor;
+
+ /* Check if we're scissoring at all */
+
+ if (!(ctx->rasterizer && ctx->rasterizer->base.scissor))
+ return false;
+
+ return (ss->minx == ss->maxx) || (ss->miny == ss->maxy);
+}
+
static void
panfrost_draw_vbo(
struct pipe_context *pipe,
{
struct panfrost_context *ctx = pan_context(pipe);
+ /* First of all, check the scissor to see if anything is drawn at all.
+ * If it's not, we drop the draw (mostly a conformance issue;
+ * well-behaved apps shouldn't hit this) */
+
+ if (panfrost_scissor_culls_everything(ctx))
+ return;
+
ctx->payload_vertex.draw_start = info->start;
ctx->payload_tiler.draw_start = info->start;
int mode = info->mode;
+ /* Fallback unsupported restart index */
+ unsigned primitive_index = (1 << (info->index_size * 8)) - 1;
+
+ if (info->primitive_restart && info->index_size
+ && info->restart_index != primitive_index) {
+ util_draw_vbo_without_prim_restart(pipe, info);
+ return;
+ }
+
/* Fallback for unsupported modes */
if (!(ctx->draw_modes & (1 << mode))) {
ctx->payload_tiler.prefix.draw_mode = g2m_draw_mode(mode);
ctx->vertex_count = info->count;
+ ctx->instance_count = info->instance_count;
/* For non-indexed draws, they're the same */
- unsigned invocation_count = ctx->vertex_count;
+ unsigned vertex_count = ctx->vertex_count;
unsigned draw_flags = 0;
if (panfrost_writes_point_size(ctx))
draw_flags |= MALI_DRAW_VARYING_SIZE;
+ if (info->primitive_restart)
+ draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
+
/* For higher amounts of vertices (greater than what fits in a 16-bit
* short), the other value is needed, otherwise there will be bizarre
- * rendering artefacts. It's not clear what these values mean yet. */
+ * rendering artefacts. It's not clear what these values mean yet. This
+ * change is also needed for instancing and sometimes points (perhaps
+ * related to dynamically setting gl_PointSize) */
+
+ bool is_points = mode == PIPE_PRIM_POINTS;
+ bool many_verts = ctx->vertex_count > 0xFFFF;
+ bool instanced = ctx->instance_count > 1;
+
+ draw_flags |= (is_points || many_verts || instanced) ? 0x3000 : 0x18000;
- draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
+ /* This doesn't make much sense */
+ if (mode == PIPE_PRIM_LINE_STRIP) {
+ draw_flags |= 0x800;
+ }
if (info->index_size) {
/* Calculate the min/max index used so we can figure out how
}
/* Use the corresponding values */
- invocation_count = max_index - min_index + 1;
+ vertex_count = max_index - min_index + 1;
ctx->payload_vertex.draw_start = min_index;
ctx->payload_tiler.draw_start = min_index;
ctx->payload_tiler.prefix.indices = (uintptr_t) NULL;
}
- ctx->payload_vertex.prefix.invocation_count = MALI_POSITIVE(invocation_count);
- ctx->payload_tiler.prefix.invocation_count = MALI_POSITIVE(invocation_count);
+ /* Dispatch "compute jobs" for the vertex/tiler pair as (1,
+ * vertex_count, 1) */
+
+ panfrost_pack_work_groups_fused(
+ &ctx->payload_vertex.prefix,
+ &ctx->payload_tiler.prefix,
+ 1, vertex_count, info->instance_count,
+ 1, 1, 1);
+
ctx->payload_tiler.prefix.unknown_draw = draw_flags;
+ /* Encode the padded vertex count */
+
+ if (info->instance_count > 1) {
+ /* Triangles have non-even vertex counts so they change how
+ * padding works internally */
+
+ bool is_triangle =
+ mode == PIPE_PRIM_TRIANGLES ||
+ mode == PIPE_PRIM_TRIANGLE_STRIP ||
+ mode == PIPE_PRIM_TRIANGLE_FAN;
+
+ struct pan_shift_odd so =
+ panfrost_padded_vertex_count(vertex_count, !is_triangle);
+
+ ctx->payload_vertex.instance_shift = so.shift;
+ ctx->payload_tiler.instance_shift = so.shift;
+
+ ctx->payload_vertex.instance_odd = so.odd;
+ ctx->payload_tiler.instance_odd = so.odd;
+
+ ctx->padded_count = pan_expand_shift_odd(so);
+ } else {
+ ctx->padded_count = ctx->vertex_count;
+
+ /* Reset instancing state */
+ ctx->payload_vertex.instance_shift = 0;
+ ctx->payload_vertex.instance_odd = 0;
+ ctx->payload_tiler.instance_shift = 0;
+ ctx->payload_tiler.instance_odd = 0;
+ }
+
/* Fire off the draw itself */
panfrost_queue_draw(ctx);
}
ctx->rasterizer = hwcso;
ctx->dirty |= PAN_DIRTY_RASTERIZER;
+
+ /* Point sprites are emulated */
+
+ struct panfrost_shader_state *variant =
+ ctx->fs ? &ctx->fs->variants[ctx->fs->active_variant] : NULL;
+
+ if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
+ ctx->base.bind_fs_state(&ctx->base, ctx->fs);
}
static void *
panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
for (int i = 0; i < num_elements; ++i) {
- so->hw[i].index = elements[i].vertex_buffer_index;
+ so->hw[i].index = i;
enum pipe_format fmt = elements[i].src_format;
const struct util_format_description *desc = util_format_description(fmt);
},
.min_lod = FIXED_16(cso->min_lod),
.max_lod = FIXED_16(cso->max_lod),
- .unknown2 = 1,
+ .seamless_cube_map = cso->seamless_cube_map,
};
+ /* If necessary, we disable mipmapping in the sampler descriptor by
+ * clamping the LOD as tight as possible (from 0 to epsilon,
+ * essentially -- remember these are fixed point numbers, so
+ * epsilon=1/256) */
+
+ if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
+ sampler_descriptor.max_lod = sampler_descriptor.min_lod;
+
+ /* Enforce that there is something in the middle by adding epsilon*/
+
+ if (sampler_descriptor.min_lod == sampler_descriptor.max_lod)
+ sampler_descriptor.max_lod++;
+
+ /* Sanity check */
+ assert(sampler_descriptor.max_lod > sampler_descriptor.min_lod);
+
so->hw = sampler_descriptor;
return so;
}
static bool
-panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_state *variant)
+panfrost_variant_matches(
+ struct panfrost_context *ctx,
+ struct panfrost_shader_state *variant,
+ enum pipe_shader_type type)
{
+ struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
- if (alpha->enabled || variant->alpha_state.enabled) {
+ bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
+
+ if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
/* Make sure enable state is at least the same */
if (alpha->enabled != variant->alpha_state.enabled) {
return false;
return false;
}
}
+
+ if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
+ variant->point_sprite_mask)) {
+ /* Ensure the same varyings are turned to point sprites */
+ if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
+ return false;
+
+ /* Ensure the orientation is correct */
+ bool upper_left =
+ rasterizer->sprite_coord_mode ==
+ PIPE_SPRITE_COORD_UPPER_LEFT;
+
+ if (variant->point_sprite_upper_left != upper_left)
+ return false;
+ }
+
/* Otherwise, we're good to go */
return true;
}
static void
-panfrost_bind_fs_state(
+panfrost_bind_shader_state(
struct pipe_context *pctx,
- void *hwcso)
+ void *hwcso,
+ enum pipe_shader_type type)
{
struct panfrost_context *ctx = pan_context(pctx);
- ctx->fs = hwcso;
+ if (type == PIPE_SHADER_FRAGMENT) {
+ ctx->fs = hwcso;
+ ctx->dirty |= PAN_DIRTY_FS;
+ } else {
+ assert(type == PIPE_SHADER_VERTEX);
+ ctx->vs = hwcso;
+ ctx->dirty |= PAN_DIRTY_VS;
+ }
- if (hwcso) {
- /* Match the appropriate variant */
+ if (!hwcso) return;
- signed variant = -1;
+ /* Match the appropriate variant */
- struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
+ signed variant = -1;
+ struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
- for (unsigned i = 0; i < variants->variant_count; ++i) {
- if (panfrost_variant_matches(ctx, &variants->variants[i])) {
- variant = i;
- break;
- }
+ for (unsigned i = 0; i < variants->variant_count; ++i) {
+ if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
+ variant = i;
+ break;
}
+ }
- if (variant == -1) {
- /* No variant matched, so create a new one */
- variant = variants->variant_count++;
- assert(variants->variant_count < MAX_SHADER_VARIANTS);
+ if (variant == -1) {
+ /* No variant matched, so create a new one */
+ variant = variants->variant_count++;
+ assert(variants->variant_count < MAX_SHADER_VARIANTS);
- variants->variants[variant].base = hwcso;
- variants->variants[variant].alpha_state = ctx->depth_stencil->alpha;
+ struct panfrost_shader_state *v =
+ &variants->variants[variant];
- /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */
- struct panfrost_context *ctx = pan_context(pctx);
- struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
+ v->base = hwcso;
- variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
- variants->variants[variant].tripipe_gpu = transfer.gpu;
+ if (type == PIPE_SHADER_FRAGMENT) {
+ v->alpha_state = ctx->depth_stencil->alpha;
+ if (ctx->rasterizer) {
+ v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
+ v->point_sprite_upper_left =
+ ctx->rasterizer->base.sprite_coord_mode ==
+ PIPE_SPRITE_COORD_UPPER_LEFT;
+ }
}
- /* Select this variant */
- variants->active_variant = variant;
-
- struct panfrost_shader_state *shader_state = &variants->variants[variant];
- assert(panfrost_variant_matches(ctx, shader_state));
+ /* Allocate the mapped descriptor ahead-of-time. */
+ struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
- /* Now we have a variant selected, so compile and go */
+ variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
+ variants->variants[variant].tripipe_gpu = transfer.gpu;
- if (!shader_state->compiled) {
- panfrost_shader_compile(ctx, shader_state->tripipe, NULL, JOB_TYPE_TILER, shader_state);
- shader_state->compiled = true;
- }
}
- ctx->dirty |= PAN_DIRTY_FS;
-}
-
-static void
-panfrost_bind_vs_state(
- struct pipe_context *pctx,
- void *hwcso)
-{
- struct panfrost_context *ctx = pan_context(pctx);
+ /* Select this variant */
+ variants->active_variant = variant;
- ctx->vs = hwcso;
+ struct panfrost_shader_state *shader_state = &variants->variants[variant];
+ assert(panfrost_variant_matches(ctx, shader_state, type));
- if (hwcso) {
- if (!ctx->vs->variants[0].compiled) {
- ctx->vs->variants[0].base = hwcso;
+ /* We finally have a variant, so compile it */
- /* TODO DRY from above */
- struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
- ctx->vs->variants[0].tripipe = (struct mali_shader_meta *) transfer.cpu;
- ctx->vs->variants[0].tripipe_gpu = transfer.gpu;
+ if (!shader_state->compiled) {
+ panfrost_shader_compile(ctx, shader_state->tripipe, NULL,
+ panfrost_job_type_for_pipe(type), shader_state);
- panfrost_shader_compile(ctx, ctx->vs->variants[0].tripipe, NULL, JOB_TYPE_VERTEX, &ctx->vs->variants[0]);
- ctx->vs->variants[0].compiled = true;
- }
+ shader_state->compiled = true;
}
+}
+
+static void
+panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
+{
+ panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
+}
- ctx->dirty |= PAN_DIRTY_VS;
+static void
+panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
+{
+ panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
}
static void
struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
- size_t sz = buf ? buf->buffer_size : 0;
-
- /* Free previous buffer */
-
- pbuf->dirty = true;
- pbuf->size = sz;
-
- if (pbuf->buffer) {
- free(pbuf->buffer);
- pbuf->buffer = NULL;
- }
-
- /* If unbinding, we're done */
-
- if (!buf)
- return;
-
- /* Multiple constant buffers not yet supported */
- assert(index == 0);
+ util_copy_constant_buffer(&pbuf->cb[index], buf);
- const uint8_t *cpu;
+ unsigned mask = (1 << index);
- struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer);
-
- if (rsrc) {
- cpu = rsrc->bo->cpu;
- } else if (buf->user_buffer) {
- cpu = buf->user_buffer;
- } else {
- DBG("No constant buffer?\n");
+ if (unlikely(!buf)) {
+ pbuf->enabled_mask &= ~mask;
+ pbuf->dirty_mask &= ~mask;
return;
}
- /* Copy the constant buffer into the driver context for later upload */
-
- pbuf->buffer = malloc(sz);
- memcpy(pbuf->buffer, cpu + buf->buffer_offset, sz);
+ pbuf->enabled_mask |= mask;
+ pbuf->dirty_mask |= mask;
}
static void
ctx->dirty |= PAN_DIRTY_FS;
}
+static enum mali_texture_type
+panfrost_translate_texture_type(enum pipe_texture_target t)
+{
+ switch (t) {
+ case PIPE_BUFFER:
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_1D_ARRAY:
+ return MALI_TEX_1D;
+
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_RECT:
+ return MALI_TEX_2D;
+
+ case PIPE_TEXTURE_3D:
+ return MALI_TEX_3D;
+
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ return MALI_TEX_CUBE;
+
+ default:
+ unreachable("Unknown target");
+ }
+}
+
static struct pipe_sampler_view *
panfrost_create_sampler_view(
struct pipe_context *pctx,
struct pipe_resource *texture,
const struct pipe_sampler_view *template)
{
- struct panfrost_sampler_view *so = CALLOC_STRUCT(panfrost_sampler_view);
+ struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
int bytes_per_pixel = util_format_get_blocksize(texture->format);
pipe_reference(NULL, &texture->reference);
* (data) itself. So, we serialise the descriptor here and cache it for
* later. */
- /* Make sure it's something with which we're familiar */
- assert(bytes_per_pixel >= 1 && bytes_per_pixel <= 4);
-
/* TODO: Detect from format better */
const struct util_format_description *desc = util_format_description(prsrc->base.format);
unsigned usage2_layout = 0x10;
- switch (prsrc->bo->layout) {
+ switch (prsrc->layout) {
case PAN_AFBC:
usage2_layout |= 0x8 | 0x4;
break;
unsigned first_level = template->u.tex.first_level;
unsigned last_level = template->u.tex.last_level;
- if (prsrc->bo->layout == PAN_LINEAR) {
+ if (prsrc->layout == PAN_LINEAR) {
for (unsigned l = first_level; l <= last_level; ++l) {
- unsigned actual_stride = prsrc->bo->slices[l].stride;
+ unsigned actual_stride = prsrc->slices[l].stride;
unsigned width = u_minify(texture->width0, l);
unsigned comp_stride = width * bytes_per_pixel;
}
}
+ /* In the hardware, array_size refers specifically to array textures,
+ * whereas in Gallium, it also covers cubemaps */
+
+ unsigned array_size = texture->array_size;
+
+ if (template->target == PIPE_TEXTURE_CUBE) {
+ /* TODO: Cubemap arrays */
+ assert(array_size == 6);
+ array_size /= 6;
+ }
+
struct mali_texture_descriptor texture_descriptor = {
- .width = MALI_POSITIVE(texture->width0),
- .height = MALI_POSITIVE(texture->height0),
- .depth = MALI_POSITIVE(texture->depth0),
+ .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
+ .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
+ .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
+ .array_size = MALI_POSITIVE(array_size),
/* TODO: Decode */
.format = {
.swizzle = panfrost_translate_swizzle_4(desc->swizzle),
.format = format,
- .usage1 = 0x0,
- .is_not_cubemap = texture->target != PIPE_TEXTURE_CUBE,
+ .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB,
+ .type = panfrost_translate_texture_type(template->target),
.usage2 = usage2_layout
},
.swizzle = panfrost_translate_swizzle_4(user_swizzle)
};
- /* TODO: Other base levels require adjusting dimensions / level numbers / etc */
- assert (template->u.tex.first_level == 0);
-
- /* Disable mipmapping for now to avoid regressions while automipmapping
- * is being implemented. TODO: Remove me once automipmaps work */
-
- //texture_descriptor.nr_mipmap_levels = template->u.tex.last_level - template->u.tex.first_level;
- texture_descriptor.nr_mipmap_levels = 0;
+ texture_descriptor.nr_mipmap_levels = last_level - first_level;
so->hw = texture_descriptor;
assert(start_slot == 0);
- ctx->sampler_view_count[shader] = num_views;
+ unsigned new_nr = 0;
+ for (unsigned i = 0; i < num_views; ++i) {
+ if (views[i])
+ new_nr = i + 1;
+ }
+
+ ctx->sampler_view_count[shader] = new_nr;
memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *));
ctx->dirty |= PAN_DIRTY_TEXTURES;
struct pipe_sampler_view *view)
{
pipe_resource_reference(&view->texture, NULL);
- free(view);
+ ralloc_free(view);
}
static void
{
struct panfrost_context *ctx = pan_context(pctx);
- /* Flush when switching away from an FBO, but not if the framebuffer
+ /* Flush when switching framebuffers, but not if the framebuffer
* state is being restored by u_blitter
*/
- if (!panfrost_is_scanout(ctx) && !ctx->blitter->running) {
- panfrost_flush(pctx, NULL, 0);
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ bool is_scanout = panfrost_is_scanout(ctx);
+ bool has_draws = job->last_job.gpu;
+
+ if (!ctx->wallpaper_batch && (!is_scanout || has_draws)) {
+ panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
}
ctx->pipe_framebuffer.nr_cbufs = fb->nr_cbufs;
continue;
if (ctx->require_sfbd)
- ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+ ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
else
- ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+ ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
panfrost_attach_vt_framebuffer(ctx);
-
- struct panfrost_resource *tex = ((struct panfrost_resource *) ctx->pipe_framebuffer.cbufs[i]->texture);
- enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
-
- bool can_afbc = panfrost_format_supports_afbc(format);
- bool is_scanout = panfrost_is_scanout(ctx);
-
- if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
- panfrost_enable_afbc(ctx, tex, false);
-
- if (!is_scanout && !tex->bo->has_checksum)
- panfrost_enable_checksum(ctx, tex);
}
{
if (zb) {
if (ctx->require_sfbd)
- ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+ ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
else
- ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+ ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
panfrost_attach_vt_framebuffer(ctx);
-
- struct panfrost_resource *tex = pan_resource(zb->texture);
- bool can_afbc = panfrost_format_supports_afbc(zb->format);
- bool is_scanout = panfrost_is_scanout(ctx);
-
- if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
- panfrost_enable_afbc(ctx, tex, true);
}
}
}
const struct pipe_blend_state *blend)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct panfrost_blend_state *so = CALLOC_STRUCT(panfrost_blend_state);
+ struct panfrost_blend_state *so = rzalloc(ctx, struct panfrost_blend_state);
so->base = *blend;
/* TODO: The following features are not yet implemented */
DBG("Deleting blend state leak blend shaders bytecode\n");
}
- free(blend);
+ ralloc_free(blend);
}
static void
if (panfrost->blitter)
util_blitter_destroy(panfrost->blitter);
- screen->driver->free_slab(screen, &panfrost->scratchpad);
- screen->driver->free_slab(screen, &panfrost->varying_mem);
- screen->driver->free_slab(screen, &panfrost->shaders);
- screen->driver->free_slab(screen, &panfrost->tiler_heap);
- screen->driver->free_slab(screen, &panfrost->misc_0);
+ if (panfrost->blitter_wallpaper)
+ util_blitter_destroy(panfrost->blitter_wallpaper);
+
+ panfrost_drm_free_slab(screen, &panfrost->scratchpad);
+ panfrost_drm_free_slab(screen, &panfrost->varying_mem);
+ panfrost_drm_free_slab(screen, &panfrost->shaders);
+ panfrost_drm_free_slab(screen, &panfrost->tiler_heap);
+ panfrost_drm_free_slab(screen, &panfrost->tiler_polygon_list);
+ panfrost_drm_free_slab(screen, &panfrost->tiler_dummy);
+
+ for (int i = 0; i < ARRAY_SIZE(panfrost->transient_pools); ++i) {
+ struct panfrost_memory_entry *entry;
+ entry = panfrost->transient_pools[i].entries[0];
+ pb_slab_free(&screen->slabs, (struct pb_slab_entry *)entry);
+ }
+
+ ralloc_free(pipe);
}
static struct pipe_query *
unsigned type,
unsigned index)
{
- struct panfrost_query *q = CALLOC_STRUCT(panfrost_query);
+ struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
q->type = type;
q->index = index;
static void
panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
{
- FREE(q);
+ ralloc_free(q);
}
static boolean
{
struct pipe_stream_output_target *target;
- target = CALLOC_STRUCT(pipe_stream_output_target);
+ target = rzalloc(pctx, struct pipe_stream_output_target);
if (!target)
return NULL;
struct pipe_stream_output_target *target)
{
pipe_resource_reference(&target->buffer, NULL);
- free(target);
+ ralloc_free(target);
}
static void
ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT);
}
- screen->driver->allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
- screen->driver->allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
- screen->driver->allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
- screen->driver->allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
- screen->driver->allocate_slab(screen, &ctx->misc_0, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
-
+ panfrost_drm_allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
+ panfrost_drm_allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
+ panfrost_drm_allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
+ panfrost_drm_allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
+ panfrost_drm_allocate_slab(screen, &ctx->tiler_polygon_list, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
+ panfrost_drm_allocate_slab(screen, &ctx->tiler_dummy, 1, false, PAN_ALLOCATE_INVISIBLE, 0, 0);
}
/* New context creation, which also does hardware initialisation since I don't
struct pipe_context *
panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
{
- struct panfrost_context *ctx = CALLOC_STRUCT(panfrost_context);
+ struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
struct panfrost_screen *pscreen = pan_screen(screen);
memset(ctx, 0, sizeof(*ctx));
struct pipe_context *gallium = (struct pipe_context *) ctx;
unsigned gpu_id;
- gpu_id = pscreen->driver->query_gpu_version(pscreen);
+ gpu_id = panfrost_drm_query_gpu_version(pscreen);
ctx->is_t6xx = gpu_id <= 0x0750; /* For now, this flag means T760 or less */
ctx->require_sfbd = gpu_id < 0x0750; /* T760 is the first to support MFBD */
panfrost_resource_context_init(gallium);
- pscreen->driver->init_context(ctx);
+ panfrost_drm_init_context(ctx);
panfrost_setup_hardware(ctx);
ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
ctx->blitter = util_blitter_create(gallium);
+ ctx->blitter_wallpaper = util_blitter_create(gallium);
+
assert(ctx->blitter);
+ assert(ctx->blitter_wallpaper);
/* Prepare for render! */
panfrost_emit_tiler_payload(ctx);
panfrost_invalidate_frame(ctx);
panfrost_default_shader_backend(ctx);
- panfrost_generate_space_filler_indices();
return gallium;
}