panfrost_allocate_transient(struct panfrost_context *ctx, size_t sz)
{
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
/* Pad the size */
sz = ALIGN_POT(sz, ALIGNMENT);
bo = panfrost_create_slab(screen, &index);
}
- panfrost_job_add_bo(batch, bo);
+ panfrost_batch_add_bo(batch, bo);
/* Remember we created this */
util_dynarray_append(&batch->transient_indices, unsigned, index);
} else {
/* Create a new BO and reference it */
bo = panfrost_drm_create_bo(screen, ALIGN_POT(sz, 4096), 0);
- panfrost_job_add_bo(batch, bo);
+ panfrost_batch_add_bo(batch, bo);
/* Creating a BO adds a reference, and then the job adds a
* second one. So we need to pop back one reference */
panfrost_get_blend_for_context(struct panfrost_context *ctx, unsigned rti)
{
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
/* Grab the format, falling back gracefully if called invalidly (which
* has to happen for no-color-attachment FBOs, for instance) */
memcpy(final.shader.bo->cpu, shader->buffer, shader->size);
/* Pass BO ownership to job */
- panfrost_job_add_bo(job, final.shader.bo);
+ panfrost_batch_add_bo(batch, final.shader.bo);
panfrost_bo_unreference(ctx->base.screen, final.shader.bo);
if (shader->patch_index) {
memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload));
/* TODO: Do we want a special compute-only batch? */
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
/* Queue the job */
panfrost_scoreboard_queue_compute_job(batch, transfer);
unsigned vertex_count)
{
struct midgard_tiler_descriptor t = {};
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
t.hierarchy_mask =
panfrost_choose_hierarchy_mask(width, height, vertex_count);
/* Sanity check */
if (t.hierarchy_mask) {
- t.polygon_list = panfrost_job_get_polygon_list(batch,
- header_size + t.polygon_list_size);
+ t.polygon_list = panfrost_batch_get_polygon_list(batch,
+ header_size +
+ t.polygon_list_size);
/* Allow the entire tiler heap */
double depth, unsigned stencil)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- panfrost_job_clear(ctx, job, buffers, color, depth, stencil);
+ panfrost_batch_clear(ctx, batch, buffers, color, depth, stencil);
}
static mali_ptr
unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
/* Add the BO to the job so it's retained until the job is done. */
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
- panfrost_job_add_bo(job, rsrc->bo);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ panfrost_batch_add_bo(batch, rsrc->bo);
/* Add the usage flags in, since they can change across the CSO
* lifetime due to layout switches */
struct pipe_shader_buffer sb = ctx->ssbo[st][ssbo_id];
/* Compute address */
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct panfrost_bo *bo = pan_resource(sb.buffer)->bo;
- panfrost_job_add_bo(batch, bo);
+ panfrost_batch_add_bo(batch, bo);
/* Upload address and size as sysval */
uniform->du[0] = bo->gpu + sb.buffer_offset;
void
panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
{
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
panfrost_attach_vt_framebuffer(ctx);
if (with_vertex_data) {
- panfrost_emit_vertex_data(job);
+ panfrost_emit_vertex_data(batch);
/* Varyings emitted for -all- geometry */
unsigned total_count = ctx->padded_count * ctx->instance_count;
SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
}
- panfrost_job_set_requirements(ctx, job);
+ panfrost_batch_set_requirements(ctx, batch);
if (ctx->occlusion_query) {
ctx->payloads[PIPE_SHADER_FRAGMENT].gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
panfrost_patch_shader_state(ctx, variant, PIPE_SHADER_FRAGMENT, false);
- panfrost_job_add_bo(job, variant->bo);
+ panfrost_batch_add_bo(batch, variant->bo);
#define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
* just... be faster :) */
if (!ctx->wallpaper_batch)
- panfrost_job_union_scissor(job, minx, miny, maxx, maxy);
+ panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
/* Upload */
if (!rasterizer_discard)
tiler = panfrost_vertex_tiler_job(ctx, true);
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
if (rasterizer_discard)
panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
static void
panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
struct pipe_fence_handle **fence,
- struct panfrost_job *job)
+ struct panfrost_batch *batch)
{
- panfrost_job_submit(ctx, job);
+ panfrost_batch_submit(ctx, batch);
/* If visual, we can stall a frame */
panfrost_drm_force_flush_fragment(ctx, fence);
ctx->last_fragment_flushed = false;
- ctx->last_job = job;
+ ctx->last_batch = batch;
/* If readback, flush now (hurts the pipelined performance) */
if (flush_immediate)
return;
/* Save the batch */
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
ctx->wallpaper_batch = batch;
unsigned damage_height = (rsrc->damage.extent.maxy - rsrc->damage.extent.miny);
if (damage_width && damage_height) {
- panfrost_job_intersection_scissor(batch, rsrc->damage.extent.minx,
- rsrc->damage.extent.miny,
- rsrc->damage.extent.maxx,
- rsrc->damage.extent.maxy);
+ panfrost_batch_intersection_scissor(batch,
+ rsrc->damage.extent.minx,
+ rsrc->damage.extent.miny,
+ rsrc->damage.extent.maxx,
+ rsrc->damage.extent.maxy);
}
/* FIXME: Looks like aligning on a tile is not enough, but
unsigned flags)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
/* Nothing to do! */
- if (!job->last_job.gpu && !job->clear) return;
+ if (!batch->last_job.gpu && !batch->clear) return;
- if (!job->clear && job->last_tiler.gpu)
+ if (!batch->clear && batch->last_tiler.gpu)
panfrost_draw_wallpaper(&ctx->base);
/* Whether to stall the pipeline for immediately correct results. Since
bool flush_immediate = /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
/* Submit the frame itself */
- panfrost_submit_frame(ctx, flush_immediate, fence, job);
+ panfrost_submit_frame(ctx, flush_immediate, fence, batch);
/* Prepare for the next frame */
panfrost_invalidate_frame(ctx);
struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
off_t offset = info->start * info->index_size;
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
if (!info->has_user_indices) {
/* Only resources can be directly mapped */
- panfrost_job_add_bo(batch, rsrc->bo);
+ panfrost_batch_add_bo(batch, rsrc->bo);
return rsrc->bo->gpu + offset;
} else {
/* Otherwise, we need to upload to transient memory */
/* Now that we have a guaranteed terminating path, find the job.
* Assignment commented out to prevent unused warning */
- /* struct panfrost_job *job = */ panfrost_get_job_for_fbo(ctx);
+ /* struct panfrost_batch *batch = */ panfrost_get_batch_for_fbo(ctx);
ctx->payloads[PIPE_SHADER_FRAGMENT].prefix.draw_mode = g2m_draw_mode(mode);
* state is being restored by u_blitter
*/
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
bool is_scanout = panfrost_is_scanout(ctx);
- bool has_draws = job->last_job.gpu;
+ bool has_draws = batch->last_job.gpu;
/* Bail out early when the current and new states are the same. */
if (util_framebuffer_state_equal(&ctx->pipe_framebuffer, fb))
/* Invalidate the FBO job cache since we've just been assigned a new
* FB state.
*/
- ctx->job = NULL;
+ ctx->batch = NULL;
util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
assert(ctx->blitter_wallpaper);
ctx->last_fragment_flushed = true;
- ctx->last_job = NULL;
+ ctx->last_batch = NULL;
/* Prepare for render! */
- panfrost_job_init(ctx);
+ panfrost_batch_init(ctx);
panfrost_emit_vertex_payload(ctx);
panfrost_emit_tiler_payload(ctx);
panfrost_invalidate_frame(ctx);
/* Compiler context */
struct midgard_screen compiler;
- /* Bound job and map of panfrost_job_key to jobs */
- struct panfrost_job *job;
- struct hash_table *jobs;
+ /* Bound job batch and map of panfrost_batch_key to job batches */
+ struct panfrost_batch *batch;
+ struct hash_table *batches;
/* panfrost_resource -> panfrost_job */
struct hash_table *write_jobs;
* errors due to unsupported reucrsion */
struct blitter_context *blitter_wallpaper;
- struct panfrost_job *wallpaper_batch;
+ struct panfrost_batch *wallpaper_batch;
struct panfrost_blend_state *blend;
* still busy executing frame N-1. So hold a reference to
* yesterjob */
int last_fragment_flushed;
- struct panfrost_job *last_job;
+ struct panfrost_batch *last_batch;
};
/* Corresponds to the CSO */
panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i);
void
-panfrost_emit_vertex_data(struct panfrost_job *batch);
+panfrost_emit_vertex_data(struct panfrost_batch *batch);
struct pan_shift_odd {
unsigned shift;
}
static int
-panfrost_drm_submit_job(struct panfrost_context *ctx, u64 job_desc, int reqs)
+panfrost_drm_submit_batch(struct panfrost_context *ctx, u64 first_job_desc,
+ int reqs)
{
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_screen *screen = pan_screen(gallium->screen);
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct drm_panfrost_submit submit = {0,};
int *bo_handles, ret;
submit.out_sync = ctx->out_sync;
- submit.jc = job_desc;
+ submit.jc = first_job_desc;
submit.requirements = reqs;
- bo_handles = calloc(job->bos->entries, sizeof(*bo_handles));
+ bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
assert(bo_handles);
- set_foreach(job->bos, entry) {
+ set_foreach(batch->bos, entry) {
struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
assert(bo->gem_handle > 0);
bo_handles[submit.bo_handle_count++] = bo->gem_handle;
}
int
-panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws)
+panfrost_drm_submit_vs_fs_batch(struct panfrost_context *ctx, bool has_draws)
{
int ret = 0;
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- panfrost_job_add_bo(job, ctx->scratchpad.bo);
- panfrost_job_add_bo(job, ctx->tiler_heap.bo);
- panfrost_job_add_bo(job, job->polygon_list);
+ panfrost_batch_add_bo(batch, ctx->scratchpad.bo);
+ panfrost_batch_add_bo(batch, ctx->tiler_heap.bo);
+ panfrost_batch_add_bo(batch, batch->polygon_list);
- if (job->first_job.gpu) {
- ret = panfrost_drm_submit_job(ctx, job->first_job.gpu, 0);
+ if (batch->first_job.gpu) {
+ ret = panfrost_drm_submit_batch(ctx, batch->first_job.gpu, 0);
assert(!ret);
}
- if (job->first_tiler.gpu || job->clear) {
- ret = panfrost_drm_submit_job(ctx, panfrost_fragment_job(ctx, has_draws), PANFROST_JD_REQ_FS);
+ if (batch->first_tiler.gpu || batch->clear) {
+ ret = panfrost_drm_submit_batch(ctx,
+ panfrost_fragment_job(ctx, has_draws),
+ PANFROST_JD_REQ_FS);
assert(!ret);
}
ctx->last_fragment_flushed = true;
/* The job finished up, so we're safe to clean it up now */
- panfrost_free_job(ctx, ctx->last_job);
+ panfrost_free_batch(ctx, ctx->last_batch);
}
if (fence) {
static void
panfrost_initialize_surface(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct pipe_surface *surf)
{
if (!surf)
rsrc->slices[level].initialized = true;
assert(rsrc->bo);
- panfrost_job_add_bo(batch, rsrc->bo);
+ panfrost_batch_add_bo(batch, rsrc->bo);
}
/* Generate a fragment job. This should be called once per frame. (According to
* Also, add the surfaces we're writing to to the batch */
struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
panfrost_initialize_surface(batch, fb->cbufs[i]);
.job_descriptor_size = 1
};
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
-
/* The passed tile coords can be out of range in some cases, so we need
* to clamp them to the framebuffer size to avoid a TILE_RANGE_FAULT.
* Theoretically we also need to clamp the coordinates positive, but we
* But that can't happen if any actual drawing occurs (beyond a
* wallpaper reload), so this is again irrelevant in practice. */
- job->maxx = MIN2(job->maxx, fb->width);
- job->maxy = MIN2(job->maxy, fb->height);
+ batch->maxx = MIN2(batch->maxx, fb->width);
+ batch->maxy = MIN2(batch->maxy, fb->height);
/* Rendering region must be at least 1x1; otherwise, there is nothing
* to do and the whole job chain should have been discarded. */
- assert(job->maxx > job->minx);
- assert(job->maxy > job->miny);
+ assert(batch->maxx > batch->minx);
+ assert(batch->maxy > batch->miny);
struct mali_payload_fragment payload = {
- .min_tile_coord = MALI_COORDINATE_TO_TILE_MIN(job->minx, job->miny),
- .max_tile_coord = MALI_COORDINATE_TO_TILE_MAX(job->maxx, job->maxy),
+ .min_tile_coord = MALI_COORDINATE_TO_TILE_MIN(batch->minx, batch->miny),
+ .max_tile_coord = MALI_COORDINATE_TO_TILE_MAX(batch->maxx, batch->maxy),
.framebuffer = framebuffer,
};
static unsigned
panfrost_vertex_instanced(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_resource *rsrc,
unsigned divisor,
union mali_attr *attrs,
}
void
-panfrost_emit_vertex_data(struct panfrost_job *batch)
+panfrost_emit_vertex_data(struct panfrost_batch *batch)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
unsigned chopped_addr = raw_addr - addr;
/* Add a dependency of the batch on the vertex buffer */
- panfrost_job_add_bo(batch, rsrc->bo);
+ panfrost_batch_add_bo(batch, rsrc->bo);
/* Set common fields */
attrs[k].elements = addr;
#include "util/u_format.h"
#include "util/u_pack_color.h"
-struct panfrost_job *
-panfrost_create_job(struct panfrost_context *ctx)
+struct panfrost_batch *
+panfrost_create_batch(struct panfrost_context *ctx)
{
- struct panfrost_job *job = rzalloc(ctx, struct panfrost_job);
+ struct panfrost_batch *batch = rzalloc(ctx, struct panfrost_batch);
- job->ctx = ctx;
+ batch->ctx = ctx;
- job->bos = _mesa_set_create(job,
- _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ batch->bos = _mesa_set_create(batch,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
- job->minx = job->miny = ~0;
- job->maxx = job->maxy = 0;
- job->transient_offset = 0;
+ batch->minx = batch->miny = ~0;
+ batch->maxx = batch->maxy = 0;
+ batch->transient_offset = 0;
- util_dynarray_init(&job->headers, job);
- util_dynarray_init(&job->gpu_headers, job);
- util_dynarray_init(&job->transient_indices, job);
+ util_dynarray_init(&batch->headers, batch);
+ util_dynarray_init(&batch->gpu_headers, batch);
+ util_dynarray_init(&batch->transient_indices, batch);
- return job;
+ return batch;
}
void
-panfrost_free_job(struct panfrost_context *ctx, struct panfrost_job *job)
+panfrost_free_batch(struct panfrost_context *ctx, struct panfrost_batch *batch)
{
- if (!job)
+ if (!batch)
return;
- set_foreach(job->bos, entry) {
+ set_foreach(batch->bos, entry) {
struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
panfrost_bo_unreference(ctx->base.screen, bo);
}
struct panfrost_screen *screen = pan_screen(ctx->base.screen);
pthread_mutex_lock(&screen->transient_lock);
- util_dynarray_foreach(&job->transient_indices, unsigned, index) {
+ util_dynarray_foreach(&batch->transient_indices, unsigned, index) {
/* Mark it free */
BITSET_SET(screen->free_transient, *index);
}
pthread_mutex_unlock(&screen->transient_lock);
/* Unreference the polygon list */
- panfrost_bo_unreference(ctx->base.screen, job->polygon_list);
+ panfrost_bo_unreference(ctx->base.screen, batch->polygon_list);
- _mesa_hash_table_remove_key(ctx->jobs, &job->key);
+ _mesa_hash_table_remove_key(ctx->batches, &batch->key);
- if (ctx->job == job)
- ctx->job = NULL;
+ if (ctx->batch == batch)
+ ctx->batch = NULL;
- ralloc_free(job);
+ ralloc_free(batch);
}
-struct panfrost_job *
-panfrost_get_job(struct panfrost_context *ctx,
+struct panfrost_batch *
+panfrost_get_batch(struct panfrost_context *ctx,
struct pipe_surface **cbufs, struct pipe_surface *zsbuf)
{
/* Lookup the job first */
- struct panfrost_job_key key = {
+ struct panfrost_batch_key key = {
.cbufs = {
cbufs[0],
cbufs[1],
.zsbuf = zsbuf
};
- struct hash_entry *entry = _mesa_hash_table_search(ctx->jobs, &key);
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, &key);
if (entry)
return entry->data;
/* Otherwise, let's create a job */
- struct panfrost_job *job = panfrost_create_job(ctx);
+ struct panfrost_batch *batch = panfrost_create_batch(ctx);
/* Save the created job */
- memcpy(&job->key, &key, sizeof(key));
- _mesa_hash_table_insert(ctx->jobs, &job->key, job);
+ memcpy(&batch->key, &key, sizeof(key));
+ _mesa_hash_table_insert(ctx->batches, &batch->key, batch);
- return job;
+ return batch;
}
/* Get the job corresponding to the FBO we're currently rendering into */
-struct panfrost_job *
-panfrost_get_job_for_fbo(struct panfrost_context *ctx)
+struct panfrost_batch *
+panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
{
/* If we're wallpapering, we special case to workaround
* u_blitter abuse */
/* If we already began rendering, use that */
- if (ctx->job) {
- assert(ctx->job->key.zsbuf == ctx->pipe_framebuffer.zsbuf &&
- !memcmp(ctx->job->key.cbufs,
+ if (ctx->batch) {
+ assert(ctx->batch->key.zsbuf == ctx->pipe_framebuffer.zsbuf &&
+ !memcmp(ctx->batch->key.cbufs,
ctx->pipe_framebuffer.cbufs,
- sizeof(ctx->job->key.cbufs)));
- return ctx->job;
+ sizeof(ctx->batch->key.cbufs)));
+ return ctx->batch;
}
/* If not, look up the job */
struct pipe_surface **cbufs = ctx->pipe_framebuffer.cbufs;
struct pipe_surface *zsbuf = ctx->pipe_framebuffer.zsbuf;
- struct panfrost_job *job = panfrost_get_job(ctx, cbufs, zsbuf);
+ struct panfrost_batch *batch = panfrost_get_batch(ctx, cbufs, zsbuf);
/* Set this job as the current FBO job. Will be reset when updating the
* FB state and when submitting or releasing a job.
*/
- ctx->job = job;
- return job;
+ ctx->batch = batch;
+ return batch;
}
void
-panfrost_job_add_bo(struct panfrost_job *job, struct panfrost_bo *bo)
+panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo)
{
if (!bo)
return;
- if (_mesa_set_search(job->bos, bo))
+ if (_mesa_set_search(batch->bos, bo))
return;
panfrost_bo_reference(bo);
- _mesa_set_add(job->bos, bo);
+ _mesa_set_add(batch->bos, bo);
}
/* Returns the polygon list's GPU address if available, or otherwise allocates
* since we'll hit the BO cache and this is one-per-batch anyway. */
mali_ptr
-panfrost_job_get_polygon_list(struct panfrost_job *batch, unsigned size)
+panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size)
{
if (batch->polygon_list) {
assert(batch->polygon_list->size >= size);
struct hash_entry *entry = _mesa_hash_table_search(panfrost->write_jobs,
prsc);
if (entry) {
- struct panfrost_job *job = entry->data;
- panfrost_job_submit(panfrost, job);
+ struct panfrost_batch *batch = entry->data;
+ panfrost_batch_submit(panfrost, job);
}
#endif
/* TODO stub */
}
void
-panfrost_job_submit(struct panfrost_context *ctx, struct panfrost_job *job)
+panfrost_batch_submit(struct panfrost_context *ctx, struct panfrost_batch *batch)
{
int ret;
- assert(job);
- panfrost_scoreboard_link_batch(job);
+ assert(batch);
+ panfrost_scoreboard_link_batch(batch);
- bool has_draws = job->last_job.gpu;
+ bool has_draws = batch->last_job.gpu;
- ret = panfrost_drm_submit_vs_fs_job(ctx, has_draws);
+ ret = panfrost_drm_submit_vs_fs_batch(ctx, has_draws);
if (ret)
- fprintf(stderr, "panfrost_job_submit failed: %d\n", ret);
+ fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
/* The job has been submitted, let's invalidate the current FBO job
* cache.
*/
- assert(!ctx->job || job == ctx->job);
- ctx->job = NULL;
+ assert(!ctx->batch || batch == ctx->batch);
+ ctx->batch = NULL;
- /* Remove the job from the ctx->jobs set so that future
- * panfrost_get_job() calls don't see it.
+ /* Remove the job from the ctx->batches set so that future
+ * panfrost_get_batch() calls don't see it.
* We must reset the job key to avoid removing another valid entry when
* the job is freed.
*/
- _mesa_hash_table_remove_key(ctx->jobs, &job->key);
- memset(&job->key, 0, sizeof(job->key));
+ _mesa_hash_table_remove_key(ctx->batches, &batch->key);
+ memset(&batch->key, 0, sizeof(batch->key));
}
void
-panfrost_job_set_requirements(struct panfrost_context *ctx,
- struct panfrost_job *job)
+panfrost_batch_set_requirements(struct panfrost_context *ctx,
+ struct panfrost_batch *batch)
{
if (ctx->rasterizer && ctx->rasterizer->base.multisample)
- job->requirements |= PAN_REQ_MSAA;
+ batch->requirements |= PAN_REQ_MSAA;
if (ctx->depth_stencil && ctx->depth_stencil->depth.writemask)
- job->requirements |= PAN_REQ_DEPTH_WRITE;
+ batch->requirements |= PAN_REQ_DEPTH_WRITE;
}
/* Helper to smear a 32-bit color across 128-bit components */
}
void
-panfrost_job_clear(struct panfrost_context *ctx,
- struct panfrost_job *job,
- unsigned buffers,
- const union pipe_color_union *color,
- double depth, unsigned stencil)
+panfrost_batch_clear(struct panfrost_context *ctx,
+ struct panfrost_batch *batch,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil)
{
if (buffers & PIPE_CLEAR_COLOR) {
continue;
enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
- pan_pack_color(job->clear_color[i], color, format);
+ pan_pack_color(batch->clear_color[i], color, format);
}
}
if (buffers & PIPE_CLEAR_DEPTH) {
- job->clear_depth = depth;
+ batch->clear_depth = depth;
}
if (buffers & PIPE_CLEAR_STENCIL) {
- job->clear_stencil = stencil;
+ batch->clear_stencil = stencil;
}
- job->clear |= buffers;
+ batch->clear |= buffers;
/* Clearing affects the entire framebuffer (by definition -- this is
* the Gallium clear callback, which clears the whole framebuffer. If
* the scissor test were enabled from the GL side, the state tracker
* would emit a quad instead and we wouldn't go down this code path) */
- panfrost_job_union_scissor(job, 0, 0,
- ctx->pipe_framebuffer.width,
- ctx->pipe_framebuffer.height);
+ panfrost_batch_union_scissor(batch, 0, 0,
+ ctx->pipe_framebuffer.width,
+ ctx->pipe_framebuffer.height);
}
void
panfrost_flush_jobs_writing_resource(panfrost, prsc);
- hash_table_foreach(panfrost->jobs, entry) {
- struct panfrost_job *job = entry->data;
+ hash_table_foreach(panfrost->batches, entry) {
+ struct panfrost_batch *batch = entry->data;
- if (_mesa_set_search(job->bos, rsc->bo)) {
+ if (_mesa_set_search(batch->bos, rsc->bo)) {
printf("TODO: submit job for flush\n");
- //panfrost_job_submit(panfrost, job);
+ //panfrost_batch_submit(panfrost, job);
continue;
}
}
}
static bool
-panfrost_job_compare(const void *a, const void *b)
+panfrost_batch_compare(const void *a, const void *b)
{
- return memcmp(a, b, sizeof(struct panfrost_job_key)) == 0;
+ return memcmp(a, b, sizeof(struct panfrost_batch_key)) == 0;
}
static uint32_t
-panfrost_job_hash(const void *key)
+panfrost_batch_hash(const void *key)
{
- return _mesa_hash_data(key, sizeof(struct panfrost_job_key));
+ return _mesa_hash_data(key, sizeof(struct panfrost_batch_key));
}
/* Given a new bounding rectangle (scissor), let the job cover the union of the
* new and old bounding rectangles */
void
-panfrost_job_union_scissor(struct panfrost_job *job,
- unsigned minx, unsigned miny,
- unsigned maxx, unsigned maxy)
+panfrost_batch_union_scissor(struct panfrost_batch *batch,
+ unsigned minx, unsigned miny,
+ unsigned maxx, unsigned maxy)
{
- job->minx = MIN2(job->minx, minx);
- job->miny = MIN2(job->miny, miny);
- job->maxx = MAX2(job->maxx, maxx);
- job->maxy = MAX2(job->maxy, maxy);
+ batch->minx = MIN2(batch->minx, minx);
+ batch->miny = MIN2(batch->miny, miny);
+ batch->maxx = MAX2(batch->maxx, maxx);
+ batch->maxy = MAX2(batch->maxy, maxy);
}
void
-panfrost_job_intersection_scissor(struct panfrost_job *job,
+panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
unsigned minx, unsigned miny,
unsigned maxx, unsigned maxy)
{
- job->minx = MAX2(job->minx, minx);
- job->miny = MAX2(job->miny, miny);
- job->maxx = MIN2(job->maxx, maxx);
- job->maxy = MIN2(job->maxy, maxy);
+ batch->minx = MAX2(batch->minx, minx);
+ batch->miny = MAX2(batch->miny, miny);
+ batch->maxx = MIN2(batch->maxx, maxx);
+ batch->maxy = MIN2(batch->maxy, maxy);
}
void
-panfrost_job_init(struct panfrost_context *ctx)
+panfrost_batch_init(struct panfrost_context *ctx)
{
- ctx->jobs = _mesa_hash_table_create(ctx,
- panfrost_job_hash,
- panfrost_job_compare);
+ ctx->batches = _mesa_hash_table_create(ctx,
+ panfrost_batch_hash,
+ panfrost_batch_compare);
ctx->write_jobs = _mesa_hash_table_create(ctx,
_mesa_hash_pointer,
/* Used as a hash table key */
-struct panfrost_job_key {
+struct panfrost_batch_key {
struct pipe_surface *cbufs[4];
struct pipe_surface *zsbuf;
};
#define PAN_REQ_MSAA (1 << 0)
#define PAN_REQ_DEPTH_WRITE (1 << 1)
-/* A panfrost_job corresponds to a bound FBO we're rendering to,
+/* A panfrost_batch corresponds to a bound FBO we're rendering to,
* collecting over multiple draws. */
-struct panfrost_job {
+struct panfrost_batch {
struct panfrost_context *ctx;
- struct panfrost_job_key key;
+ struct panfrost_batch_key key;
/* Buffers cleared (PIPE_CLEAR_* bitmask) */
unsigned clear;
/* Functions for managing the above */
-struct panfrost_job *
-panfrost_create_job(struct panfrost_context *ctx);
+struct panfrost_batch *
+panfrost_create_batch(struct panfrost_context *ctx);
void
-panfrost_free_job(struct panfrost_context *ctx, struct panfrost_job *job);
+panfrost_free_batch(struct panfrost_context *ctx,
+ struct panfrost_batch *batch);
-struct panfrost_job *
-panfrost_get_job(struct panfrost_context *ctx,
- struct pipe_surface **cbufs, struct pipe_surface *zsbuf);
+struct panfrost_batch *
+panfrost_get_batch(struct panfrost_context *ctx,
+ struct pipe_surface **cbufs,
+ struct pipe_surface *zsbuf);
-struct panfrost_job *
-panfrost_get_job_for_fbo(struct panfrost_context *ctx);
+struct panfrost_batch *
+panfrost_get_batch_for_fbo(struct panfrost_context *ctx);
void
-panfrost_job_init(struct panfrost_context *ctx);
+panfrost_batch_init(struct panfrost_context *ctx);
void
-panfrost_job_add_bo(struct panfrost_job *job, struct panfrost_bo *bo);
+panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo);
void
panfrost_flush_jobs_writing_resource(struct panfrost_context *panfrost,
struct pipe_resource *prsc);
void
-panfrost_job_submit(struct panfrost_context *ctx, struct panfrost_job *job);
+panfrost_batch_submit(struct panfrost_context *ctx, struct panfrost_batch *batch);
void
-panfrost_job_set_requirements(struct panfrost_context *ctx,
- struct panfrost_job *job);
+panfrost_batch_set_requirements(struct panfrost_context *ctx,
+ struct panfrost_batch *batch);
mali_ptr
-panfrost_job_get_polygon_list(struct panfrost_job *batch, unsigned size);
+panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size);
void
-panfrost_job_clear(struct panfrost_context *ctx,
- struct panfrost_job *job,
- unsigned buffers,
- const union pipe_color_union *color,
- double depth, unsigned stencil);
+panfrost_batch_clear(struct panfrost_context *ctx,
+ struct panfrost_batch *batch,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth, unsigned stencil);
void
-panfrost_job_union_scissor(struct panfrost_job *job,
- unsigned minx, unsigned miny,
- unsigned maxx, unsigned maxy);
+panfrost_batch_union_scissor(struct panfrost_batch *batch,
+ unsigned minx, unsigned miny,
+ unsigned maxx, unsigned maxy);
void
-panfrost_job_intersection_scissor(struct panfrost_job *job,
- unsigned minx, unsigned miny,
- unsigned maxx, unsigned maxy);
+panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
+ unsigned minx, unsigned miny,
+ unsigned maxx, unsigned maxy);
/* Scoreboarding */
void
panfrost_scoreboard_queue_compute_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer job);
void
panfrost_scoreboard_queue_vertex_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
bool requires_tiling);
void
panfrost_scoreboard_queue_tiler_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer tiler);
void
panfrost_scoreboard_queue_fused_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
struct panfrost_transfer tiler);
void
panfrost_scoreboard_queue_fused_job_prepend(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
struct panfrost_transfer tiler);
void
-panfrost_scoreboard_link_batch(struct panfrost_job *batch);
+panfrost_scoreboard_link_batch(struct panfrost_batch *batch);
#endif
static void
panfrost_mfbd_clear(
- struct panfrost_job *job,
+ struct panfrost_batch *batch,
struct bifrost_framebuffer *fb,
struct bifrost_fb_extra *fbx,
struct bifrost_render_target *rts,
unsigned rt_count)
{
for (unsigned i = 0; i < rt_count; ++i) {
- if (!(job->clear & (PIPE_CLEAR_COLOR0 << i)))
+ if (!(batch->clear & (PIPE_CLEAR_COLOR0 << i)))
continue;
- rts[i].clear_color_1 = job->clear_color[i][0];
- rts[i].clear_color_2 = job->clear_color[i][1];
- rts[i].clear_color_3 = job->clear_color[i][2];
- rts[i].clear_color_4 = job->clear_color[i][3];
+ rts[i].clear_color_1 = batch->clear_color[i][0];
+ rts[i].clear_color_2 = batch->clear_color[i][1];
+ rts[i].clear_color_3 = batch->clear_color[i][2];
+ rts[i].clear_color_4 = batch->clear_color[i][3];
}
- if (job->clear & PIPE_CLEAR_DEPTH) {
- fb->clear_depth = job->clear_depth;
+ if (batch->clear & PIPE_CLEAR_DEPTH) {
+ fb->clear_depth = batch->clear_depth;
}
- if (job->clear & PIPE_CLEAR_STENCIL) {
- fb->clear_stencil = job->clear_stencil;
+ if (batch->clear & PIPE_CLEAR_STENCIL) {
+ fb->clear_stencil = batch->clear_stencil;
}
}
mali_ptr
panfrost_mfbd_fragment(struct panfrost_context *ctx, bool has_draws)
{
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct bifrost_framebuffer fb = panfrost_emit_mfbd(ctx, has_draws);
struct bifrost_fb_extra fbx = {};
fb.mfbd_flags = 0x100;
/* TODO: MRT clear */
- panfrost_mfbd_clear(job, &fb, &fbx, rts, fb.rt_count_2);
+ panfrost_mfbd_clear(batch, &fb, &fbx, rts, fb.rt_count_2);
/* Upload either the render target or a dummy GL_NONE target */
* can safely ignore it. */
if (panfrost_is_scanout(ctx)) {
- job->requirements &= ~PAN_REQ_DEPTH_WRITE;
+ batch->requirements &= ~PAN_REQ_DEPTH_WRITE;
}
/* Actualize the requirements */
- if (job->requirements & PAN_REQ_MSAA) {
+ if (batch->requirements & PAN_REQ_MSAA) {
rts[0].format.flags |= MALI_MFBD_FORMAT_MSAA;
/* XXX */
fb.rt_count_2 = 4;
}
- if (job->requirements & PAN_REQ_DEPTH_WRITE)
+ if (batch->requirements & PAN_REQ_DEPTH_WRITE)
fb.mfbd_flags |= MALI_MFBD_DEPTH_WRITE;
/* Checksumming only works with a single render target */
* reorder-type optimizations in place. But for now prioritize
* correctness. */
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
- bool has_draws = job->last_job.gpu;
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ bool has_draws = batch->last_job.gpu;
if (has_draws)
panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
static void
panfrost_assign_index(
- struct panfrost_job *job,
+ struct panfrost_batch *batch,
struct panfrost_transfer transfer)
{
/* Assign the index */
- unsigned index = ++job->job_index;
+ unsigned index = ++batch->job_index;
job_descriptor_header(transfer)->job_index = index;
}
static void
panfrost_scoreboard_queue_job_internal(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer job)
{
panfrost_assign_index(batch, job);
void
panfrost_scoreboard_queue_compute_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer job)
{
panfrost_scoreboard_queue_job_internal(batch, job);
void
panfrost_scoreboard_queue_vertex_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
bool requires_tiling)
{
void
panfrost_scoreboard_queue_tiler_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer tiler)
{
panfrost_scoreboard_queue_compute_job(batch, tiler);
void
panfrost_scoreboard_queue_fused_job(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
struct panfrost_transfer tiler)
{
void
panfrost_scoreboard_queue_fused_job_prepend(
- struct panfrost_job *batch,
+ struct panfrost_batch *batch,
struct panfrost_transfer vertex,
struct panfrost_transfer tiler)
{
* linked to the first vertex job feeding into tiling. */
static void
-panfrost_scoreboard_set_value(struct panfrost_job *batch)
+panfrost_scoreboard_set_value(struct panfrost_batch *batch)
{
/* Check if we even need tiling */
if (!batch->last_tiler.gpu)
* regardless of size. */
struct panfrost_context *ctx = batch->ctx;
- mali_ptr polygon_list = panfrost_job_get_polygon_list(batch, 0);
+ mali_ptr polygon_list = panfrost_batch_get_polygon_list(batch, 0);
struct panfrost_transfer job =
panfrost_set_value_job(ctx, polygon_list);
mali_ptr, count))
void
-panfrost_scoreboard_link_batch(struct panfrost_job *batch)
+panfrost_scoreboard_link_batch(struct panfrost_batch *batch)
{
/* Finalize the batch */
panfrost_scoreboard_set_value(batch);
int
panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo);
int
-panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws);
+panfrost_drm_submit_vs_fs_batch(struct panfrost_context *ctx, bool has_draws);
void
panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
struct pipe_fence_handle **fence);
static void
panfrost_sfbd_clear(
- struct panfrost_job *job,
+ struct panfrost_batch *batch,
struct mali_single_framebuffer *sfbd)
{
- if (job->clear & PIPE_CLEAR_COLOR) {
- sfbd->clear_color_1 = job->clear_color[0][0];
- sfbd->clear_color_2 = job->clear_color[0][1];
- sfbd->clear_color_3 = job->clear_color[0][2];
- sfbd->clear_color_4 = job->clear_color[0][3];
+ if (batch->clear & PIPE_CLEAR_COLOR) {
+ sfbd->clear_color_1 = batch->clear_color[0][0];
+ sfbd->clear_color_2 = batch->clear_color[0][1];
+ sfbd->clear_color_3 = batch->clear_color[0][2];
+ sfbd->clear_color_4 = batch->clear_color[0][3];
}
- if (job->clear & PIPE_CLEAR_DEPTH) {
- sfbd->clear_depth_1 = job->clear_depth;
- sfbd->clear_depth_2 = job->clear_depth;
- sfbd->clear_depth_3 = job->clear_depth;
- sfbd->clear_depth_4 = job->clear_depth;
+ if (batch->clear & PIPE_CLEAR_DEPTH) {
+ sfbd->clear_depth_1 = batch->clear_depth;
+ sfbd->clear_depth_2 = batch->clear_depth;
+ sfbd->clear_depth_3 = batch->clear_depth;
+ sfbd->clear_depth_4 = batch->clear_depth;
}
- if (job->clear & PIPE_CLEAR_STENCIL) {
- sfbd->clear_stencil = job->clear_stencil;
+ if (batch->clear & PIPE_CLEAR_STENCIL) {
+ sfbd->clear_stencil = batch->clear_stencil;
}
/* Set flags based on what has been cleared, for the SFBD case */
/* XXX: What do these flags mean? */
int clear_flags = 0x101100;
- if (!(job->clear & ~(PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
+ if (!(batch->clear & ~(PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
/* On a tiler like this, it's fastest to clear all three buffers at once */
clear_flags |= MALI_CLEAR_FAST;
} else {
clear_flags |= MALI_CLEAR_SLOW;
- if (job->clear & PIPE_CLEAR_STENCIL)
+ if (batch->clear & PIPE_CLEAR_STENCIL)
clear_flags |= MALI_CLEAR_SLOW_STENCIL;
}
mali_ptr
panfrost_sfbd_fragment(struct panfrost_context *ctx, bool has_draws)
{
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct mali_single_framebuffer fb = panfrost_emit_sfbd(ctx, has_draws);
- panfrost_sfbd_clear(job, &fb);
+ panfrost_sfbd_clear(batch, &fb);
/* SFBD does not support MRT natively; sanity check */
assert(ctx->pipe_framebuffer.nr_cbufs == 1);
if (ctx->pipe_framebuffer.zsbuf)
panfrost_sfbd_set_zsbuf(&fb, ctx->pipe_framebuffer.zsbuf);
- if (job->requirements & PAN_REQ_MSAA)
+ if (batch->requirements & PAN_REQ_MSAA)
fb.format |= MALI_FRAMEBUFFER_MSAA_A | MALI_FRAMEBUFFER_MSAA_B;
return panfrost_upload_transient(ctx, &fb, sizeof(fb)) | MALI_SFBD;
slot->size = MIN2(max_size, expected_size);
/* Grab the BO and bind it to the batch */
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
- panfrost_job_add_bo(batch, bo);
+ panfrost_batch_add_bo(batch, bo);
mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
slot->elements = addr;