#include "pan_context.h"
#include "util/hash_table.h"
#include "util/ralloc.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_pack_color.h"
+#include "util/rounding.h"
#include "pan_util.h"
#include "pandecode/decode.h"
+#include "panfrost-quirks.h"
/* panfrost_bo_access is here to help us keep track of batch accesses to BOs
* and build a proper dependency graph such that batches can be pipelined for
pipe_reference_init(&fence->reference, 1);
fence->ctx = batch->ctx;
fence->batch = batch;
- ret = drmSyncobjCreate(pan_screen(batch->ctx->base.screen)->fd, 0,
+ ret = drmSyncobjCreate(pan_device(batch->ctx->base.screen)->fd, 0,
&fence->syncobj);
assert(!ret);
static void
panfrost_free_batch_fence(struct panfrost_batch_fence *fence)
{
- drmSyncobjDestroy(pan_screen(fence->ctx->base.screen)->fd,
+ drmSyncobjDestroy(pan_device(fence->ctx->base.screen)->fd,
fence->syncobj);
ralloc_free(fence);
}
batch->maxx = batch->maxy = 0;
batch->transient_offset = 0;
- util_dynarray_init(&batch->headers, batch);
- util_dynarray_init(&batch->gpu_headers, batch);
- util_dynarray_init(&batch->dependencies, batch);
batch->out_sync = panfrost_create_batch_fence(batch);
util_copy_framebuffer_state(&batch->key, key);
}
}
-#ifndef NDEBUG
+#ifdef PAN_BATCH_DEBUG
static bool panfrost_batch_is_frozen(struct panfrost_batch *batch)
{
struct panfrost_context *ctx = batch->ctx;
if (!batch)
return;
+#ifdef PAN_BATCH_DEBUG
assert(panfrost_batch_is_frozen(batch));
+#endif
hash_table_foreach(batch->bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
ralloc_free(batch);
}
-#ifndef NDEBUG
+#ifdef PAN_BATCH_DEBUG
static bool
panfrost_dep_graph_contains_batch(struct panfrost_batch *root,
struct panfrost_batch *batch)
return;
}
+#ifdef PAN_BATCH_DEBUG
/* Make sure the dependency graph is acyclic. */
assert(!panfrost_dep_graph_contains_batch(newdep->batch, batch));
+#endif
panfrost_batch_fence_reference(newdep);
util_dynarray_append(&batch->dependencies,
return batch;
}
+struct panfrost_batch *
+panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
+{
+ struct panfrost_batch *batch;
+
+ batch = panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
+
+ /* The batch has no draw/clear queued, let's return it directly.
+ * Note that it's perfectly fine to re-use a batch with an
+ * existing clear, we'll just update it with the new clear request.
+ */
+ if (!batch->first_job)
+ return batch;
+
+ /* Otherwise, we need to freeze the existing one and instantiate a new
+ * one.
+ */
+ panfrost_freeze_batch(batch);
+ return panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
+}
+
static bool
panfrost_batch_fence_is_signaled(struct panfrost_batch_fence *fence)
{
if (fence->batch)
return false;
- int ret = drmSyncobjWait(pan_screen(fence->ctx->base.screen)->fd,
+ int ret = drmSyncobjWait(pan_device(fence->ctx->base.screen)->fd,
&fence->syncobj, 1, 0, 0, NULL);
/* Cache whether the fence was signaled */
access->writer = NULL;
}
- unsigned nreaders = 0;
+ struct panfrost_batch_fence **readers_array = util_dynarray_begin(&access->readers);
+ struct panfrost_batch_fence **new_readers = readers_array;
+
util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
reader) {
if (!(*reader))
panfrost_batch_fence_unreference(*reader);
*reader = NULL;
} else {
- nreaders++;
+ /* Build a new array of only unsignaled fences in-place */
+ *(new_readers++) = *reader;
}
}
- if (!nreaders)
- util_dynarray_clear(&access->readers);
+ if (!util_dynarray_resize(&access->readers, struct panfrost_batch_fence *,
+ new_readers - readers_array) &&
+ new_readers != readers_array)
+ unreachable("Invalid dynarray access->readers");
}
/* Collect signaled fences to keep the kernel-side syncobj-map small. The
panfrost_bo_access_gc_fences(ctx, access, entry->key);
if (!util_dynarray_num_elements(&access->readers,
struct panfrost_batch_fence *) &&
- !access->writer)
+ !access->writer) {
+ ralloc_free(access);
_mesa_hash_table_remove(ctx->accessed_bos, entry);
+ }
}
}
-#ifndef NDEBUG
+#ifdef PAN_BATCH_DEBUG
static bool
panfrost_batch_in_readers(struct panfrost_batch *batch,
struct panfrost_bo_access *access)
/* We already accessed this BO before, so we should already be
* in the reader array.
*/
+#ifdef PAN_BATCH_DEBUG
if (already_accessed) {
assert(panfrost_batch_in_readers(batch, access));
return;
}
+#endif
/* Previous access was a read and we want to read this BO.
* Add ourselves to the readers array and add a dependency on
panfrost_batch_update_bo_access(batch, bo, flags, old_flags != 0);
}
+static void
+panfrost_batch_add_resource_bos(struct panfrost_batch *batch,
+ struct panfrost_resource *rsrc,
+ uint32_t flags)
+{
+ panfrost_batch_add_bo(batch, rsrc->bo, flags);
+
+ for (unsigned i = 0; i < MAX_MIP_LEVELS; i++)
+ if (rsrc->slices[i].checksum_bo)
+ panfrost_batch_add_bo(batch, rsrc->slices[i].checksum_bo, flags);
+
+ if (rsrc->separate_stencil)
+ panfrost_batch_add_bo(batch, rsrc->separate_stencil->bo, flags);
+}
+
void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
{
uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
struct panfrost_resource *rsrc = pan_resource(batch->key.cbufs[i]->texture);
- panfrost_batch_add_bo(batch, rsrc->bo, flags);
+ panfrost_batch_add_resource_bos(batch, rsrc, flags);
}
if (batch->key.zsbuf) {
struct panfrost_resource *rsrc = pan_resource(batch->key.zsbuf->texture);
- panfrost_batch_add_bo(batch, rsrc->bo, flags);
+ panfrost_batch_add_resource_bos(batch, rsrc, flags);
}
}
{
struct panfrost_bo *bo;
- bo = panfrost_bo_create(pan_screen(batch->ctx->base.screen), size,
+ bo = pan_bo_create(pan_device(batch->ctx->base.screen), size,
create_flags);
panfrost_batch_add_bo(batch, bo, access_flags);
/* panfrost_batch_add_bo() has retained a reference and
- * panfrost_bo_create() initialize the refcnt to 1, so let's
+ * pan_bo_create() initialize the refcnt to 1, so let's
* unreference the BO here so it gets released when the batch is
* destroyed (unless it's retained by someone else in the meantime).
*/
assert(batch->polygon_list->size >= size);
} else {
/* Create the BO as invisible, as there's no reason to map */
+ size = util_next_power_of_two(size);
batch->polygon_list = panfrost_batch_create_bo(batch, size,
PAN_BO_INVISIBLE,
}
struct panfrost_bo *
-panfrost_batch_get_scratchpad(struct panfrost_batch *batch)
+panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
+ unsigned shift,
+ unsigned thread_tls_alloc,
+ unsigned core_count)
{
- if (batch->scratchpad)
- return batch->scratchpad;
+ unsigned size = panfrost_get_total_stack_size(shift,
+ thread_tls_alloc,
+ core_count);
+
+ if (batch->scratchpad) {
+ assert(batch->scratchpad->size >= size);
+ } else {
+ batch->scratchpad = panfrost_batch_create_bo(batch, size,
+ PAN_BO_INVISIBLE,
+ PAN_BO_ACCESS_PRIVATE |
+ PAN_BO_ACCESS_RW |
+ PAN_BO_ACCESS_VERTEX_TILER |
+ PAN_BO_ACCESS_FRAGMENT);
+ }
- batch->scratchpad = panfrost_batch_create_bo(batch, 64 * 4 * 4096,
- PAN_BO_INVISIBLE,
- PAN_BO_ACCESS_PRIVATE |
- PAN_BO_ACCESS_RW |
- PAN_BO_ACCESS_VERTEX_TILER |
- PAN_BO_ACCESS_FRAGMENT);
- assert(batch->scratchpad);
return batch->scratchpad;
}
+struct panfrost_bo *
+panfrost_batch_get_shared_memory(struct panfrost_batch *batch,
+ unsigned size,
+ unsigned workgroup_count)
+{
+ if (batch->shared_memory) {
+ assert(batch->shared_memory->size >= size);
+ } else {
+ batch->shared_memory = panfrost_batch_create_bo(batch, size,
+ PAN_BO_INVISIBLE,
+ PAN_BO_ACCESS_PRIVATE |
+ PAN_BO_ACCESS_RW |
+ PAN_BO_ACCESS_VERTEX_TILER);
+ }
+
+ return batch->shared_memory;
+}
+
struct panfrost_bo *
panfrost_batch_get_tiler_heap(struct panfrost_batch *batch)
{
return batch->tiler_heap;
}
+mali_ptr
+panfrost_batch_get_tiler_meta(struct panfrost_batch *batch, unsigned vertex_count)
+{
+ if (!vertex_count)
+ return 0;
+
+ if (batch->tiler_meta)
+ return batch->tiler_meta;
+
+ struct panfrost_bo *tiler_heap;
+ tiler_heap = panfrost_batch_get_tiler_heap(batch);
+
+ struct bifrost_tiler_heap_meta tiler_heap_meta = {
+ .heap_size = tiler_heap->size,
+ .tiler_heap_start = tiler_heap->gpu,
+ .tiler_heap_free = tiler_heap->gpu,
+ .tiler_heap_end = tiler_heap->gpu + tiler_heap->size,
+ .unk1 = 0x1,
+ .unk7e007e = 0x7e007e,
+ };
+
+ struct bifrost_tiler_meta tiler_meta = {
+ .hierarchy_mask = 0x28,
+ .flags = 0x0,
+ .width = MALI_POSITIVE(batch->key.width),
+ .height = MALI_POSITIVE(batch->key.height),
+ .tiler_heap_meta = panfrost_upload_transient(batch, &tiler_heap_meta, sizeof(tiler_heap_meta)),
+ };
+
+ batch->tiler_meta = panfrost_upload_transient(batch, &tiler_meta, sizeof(tiler_meta));
+ return batch->tiler_meta;
+}
+
struct panfrost_bo *
panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch)
{
+ struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
+
+ uint32_t create_flags = 0;
+
if (batch->tiler_dummy)
return batch->tiler_dummy;
+ if (!(dev->quirks & MIDGARD_NO_HIER_TILING))
+ create_flags = PAN_BO_INVISIBLE;
+
batch->tiler_dummy = panfrost_batch_create_bo(batch, 4096,
- PAN_BO_INVISIBLE,
+ create_flags,
PAN_BO_ACCESS_PRIVATE |
PAN_BO_ACCESS_RW |
PAN_BO_ACCESS_VERTEX_TILER |
static void
panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
{
+ /* Color 0 is cleared, no need to draw the wallpaper.
+ * TODO: MRT wallpapers.
+ */
+ if (batch->clear & PIPE_CLEAR_COLOR0)
+ return;
+
/* Nothing to reload? TODO: MRT wallpapers */
if (batch->key.cbufs[0] == NULL)
return;
+ /* No draw calls, and no clear on the depth/stencil bufs.
+ * Drawing the wallpaper would be useless.
+ */
+ if (!batch->tiler_dep &&
+ !(batch->clear & PIPE_CLEAR_DEPTHSTENCIL))
+ return;
+
/* Check if the buffer has any content on it worth preserving */
struct pipe_surface *surf = batch->key.cbufs[0];
damage.maxx = MIN2(batch->maxx,
rsrc->damage.biggest_rect.x +
rsrc->damage.biggest_rect.width);
+ damage.maxx = MAX2(damage.maxx, damage.minx);
damage.maxy = MIN2(batch->maxy,
rsrc->damage.biggest_rect.y +
rsrc->damage.biggest_rect.height);
+ damage.maxy = MAX2(damage.maxy, damage.miny);
/* One damage rectangle means we can end up with at most 4 reload
* regions:
{
struct panfrost_context *ctx = batch->ctx;
struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
+ struct panfrost_device *dev = pan_device(gallium->screen);
struct drm_panfrost_submit submit = {0,};
uint32_t *bo_handles, *in_syncs = NULL;
bool is_fragment_shader;
int ret;
- is_fragment_shader = (reqs & PANFROST_JD_REQ_FS) && batch->first_job.gpu;
+ is_fragment_shader = (reqs & PANFROST_JD_REQ_FS) && batch->first_job;
if (is_fragment_shader)
submit.in_sync_count = 1;
else
}
submit.bo_handles = (u64) (uintptr_t) bo_handles;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
+ ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
free(bo_handles);
free(in_syncs);
if (ret) {
- fprintf(stderr, "Error submitting: %m\n");
+ DBG("Error submitting: %m\n");
return errno;
}
/* Trace the job if we're doing that */
- if (pan_debug & PAN_DBG_TRACE) {
+ if (pan_debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
/* Wait so we can get errors reported back */
- drmSyncobjWait(screen->fd, &batch->out_sync->syncobj, 1,
+ drmSyncobjWait(dev->fd, &batch->out_sync->syncobj, 1,
INT64_MAX, 0, NULL);
- pandecode_jc(submit.jc, FALSE);
+
+ /* Trace gets priority over sync */
+ bool minimal = !(pan_debug & PAN_DBG_TRACE);
+ pandecode_jc(submit.jc, dev->quirks & IS_BIFROST, dev->gpu_id, minimal);
}
return 0;
static int
panfrost_batch_submit_jobs(struct panfrost_batch *batch)
{
- bool has_draws = batch->first_job.gpu;
+ bool has_draws = batch->first_job;
int ret = 0;
if (has_draws) {
- ret = panfrost_batch_submit_ioctl(batch, batch->first_job.gpu, 0);
+ ret = panfrost_batch_submit_ioctl(batch, batch->first_job, 0);
assert(!ret);
}
- if (batch->first_tiler.gpu || batch->clear) {
+ if (batch->tiler_dep || batch->clear) {
mali_ptr fragjob = panfrost_fragment_job(batch, has_draws);
-
ret = panfrost_batch_submit_ioctl(batch, fragjob, PANFROST_JD_REQ_FS);
assert(!ret);
}
panfrost_batch_submit((*dep)->batch);
}
- struct panfrost_context *ctx = batch->ctx;
int ret;
/* Nothing to do! */
- if (!batch->last_job.gpu && !batch->clear) {
+ if (!batch->first_job && !batch->clear) {
/* Mark the fence as signaled so the fence logic does not try
* to wait on it.
*/
goto out;
}
- if (!batch->clear && batch->last_tiler.gpu)
- panfrost_batch_draw_wallpaper(batch);
+ panfrost_batch_draw_wallpaper(batch);
+
+ /* Now that all draws are in, we can finally prepare the
+ * FBD for the batch */
- panfrost_scoreboard_link_batch(batch);
+ if (batch->framebuffer.gpu && batch->first_job) {
+ struct panfrost_context *ctx = batch->ctx;
+ struct pipe_context *gallium = (struct pipe_context *) ctx;
+ struct panfrost_device *dev = pan_device(gallium->screen);
+
+ if (dev->quirks & MIDGARD_SFBD)
+ panfrost_attach_sfbd(batch, ~0);
+ else
+ panfrost_attach_mfbd(batch, ~0);
+ }
+
+ panfrost_scoreboard_initialize_tiler(batch);
ret = panfrost_batch_submit_jobs(batch);
if (ret)
- fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
+ DBG("panfrost_batch_submit failed: %d\n", ret);
+
+ /* We must reset the damage info of our render targets here even
+ * though a damage reset normally happens when the DRI layer swaps
+ * buffers. That's because there can be implicit flushes the GL
+ * app is not aware of, and those might impact the damage region: if
+ * part of the damaged portion is drawn during those implicit flushes,
+ * you have to reload those areas before next draws are pushed, and
+ * since the driver can't easily know what's been modified by the draws
+ * it flushed, the easiest solution is to reload everything.
+ */
+ for (unsigned i = 0; i < batch->key.nr_cbufs; i++) {
+ struct panfrost_resource *res;
-out:
- panfrost_freeze_batch(batch);
+ if (!batch->key.cbufs[i])
+ continue;
- /* We always stall the pipeline for correct results since pipelined
- * rendering is quite broken right now (to be fixed by the panfrost_job
- * refactor, just take the perf hit for correctness)
- */
- if (!batch->out_sync->signaled)
- drmSyncobjWait(pan_screen(ctx->base.screen)->fd,
- &batch->out_sync->syncobj, 1, INT64_MAX, 0,
- NULL);
+ res = pan_resource(batch->key.cbufs[i]->texture);
+ panfrost_resource_reset_damage(res);
+ }
+out:
+ panfrost_freeze_batch(batch);
panfrost_free_batch(batch);
-
}
void
if (!wait)
return;
- drmSyncobjWait(pan_screen(ctx->base.screen)->fd,
+ drmSyncobjWait(pan_device(ctx->base.screen)->fd,
util_dynarray_begin(&syncobjs),
util_dynarray_num_elements(&syncobjs, uint32_t),
INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, NULL);
util_dynarray_fini(&syncobjs);
}
+bool
+panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
+ const struct panfrost_bo *bo)
+{
+ struct panfrost_bo_access *access;
+ struct hash_entry *hentry;
+
+ hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
+ access = hentry ? hentry->data : NULL;
+ if (!access)
+ return false;
+
+ if (access->writer && access->writer->batch)
+ return true;
+
+ util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
+ reader) {
+ if (*reader && (*reader)->batch)
+ return true;
+ }
+
+ return false;
+}
+
void
panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
struct panfrost_bo *bo,
batch->requirements |= PAN_REQ_DEPTH_WRITE;
}
+void
+panfrost_batch_adjust_stack_size(struct panfrost_batch *batch)
+{
+ struct panfrost_context *ctx = batch->ctx;
+
+ for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i) {
+ struct panfrost_shader_state *ss;
+
+ ss = panfrost_get_shader_state(ctx, i);
+ if (!ss)
+ continue;
+
+ batch->stack_size = MAX2(batch->stack_size, ss->stack_size);
+ }
+}
+
/* Helper to smear a 32-bit color across 128-bit components */
static void
if (util_format_is_rgba8_variant(desc)) {
pan_pack_color_32(packed,
- (float_to_ubyte(clear_alpha) << 24) |
- (float_to_ubyte(color->f[2]) << 16) |
- (float_to_ubyte(color->f[1]) << 8) |
- (float_to_ubyte(color->f[0]) << 0));
+ ((uint32_t) float_to_ubyte(clear_alpha) << 24) |
+ ((uint32_t) float_to_ubyte(color->f[2]) << 16) |
+ ((uint32_t) float_to_ubyte(color->f[1]) << 8) |
+ ((uint32_t) float_to_ubyte(color->f[0]) << 0));
} else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
/* First, we convert the components to R5, G6, B5 separately */
- unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
- unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0;
- unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0;
+ unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
+ unsigned g6 = _mesa_roundevenf(SATURATE(color->f[1]) * 63.0);
+ unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
/* Then we pack into a sparse u32. TODO: Why these shifts? */
pan_pack_color_32(packed, (b5 << 25) | (g6 << 14) | (r5 << 5));
} else if (format == PIPE_FORMAT_B4G4R4A4_UNORM) {
- /* We scale the components against 0xF0 (=240.0), rather than 0xFF */
- unsigned r4 = CLAMP(color->f[0], 0.0, 1.0) * 240.0;
- unsigned g4 = CLAMP(color->f[1], 0.0, 1.0) * 240.0;
- unsigned b4 = CLAMP(color->f[2], 0.0, 1.0) * 240.0;
- unsigned a4 = CLAMP(clear_alpha, 0.0, 1.0) * 240.0;
+ /* Convert to 4-bits */
+ unsigned r4 = _mesa_roundevenf(SATURATE(color->f[0]) * 15.0);
+ unsigned g4 = _mesa_roundevenf(SATURATE(color->f[1]) * 15.0);
+ unsigned b4 = _mesa_roundevenf(SATURATE(color->f[2]) * 15.0);
+ unsigned a4 = _mesa_roundevenf(SATURATE(clear_alpha) * 15.0);
/* Pack on *byte* intervals */
- pan_pack_color_32(packed, (a4 << 24) | (b4 << 16) | (g4 << 8) | r4);
+ pan_pack_color_32(packed, (a4 << 28) | (b4 << 20) | (g4 << 12) | (r4 << 4));
} else if (format == PIPE_FORMAT_B5G5R5A1_UNORM) {
/* Scale as expected but shift oddly */
- unsigned r5 = round(CLAMP(color->f[0], 0.0, 1.0)) * 31.0;
- unsigned g5 = round(CLAMP(color->f[1], 0.0, 1.0)) * 31.0;
- unsigned b5 = round(CLAMP(color->f[2], 0.0, 1.0)) * 31.0;
- unsigned a1 = round(CLAMP(clear_alpha, 0.0, 1.0)) * 1.0;
+ unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
+ unsigned g5 = _mesa_roundevenf(SATURATE(color->f[1]) * 31.0);
+ unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
+ unsigned a1 = _mesa_roundevenf(SATURATE(clear_alpha) * 1.0);
pan_pack_color_32(packed, (a1 << 31) | (b5 << 25) | (g5 << 15) | (r5 << 5));
} else {
- /* Try Gallium's generic default path. Doesn't work for all
- * formats but it's a good guess. */
-
- union util_color out;
-
- if (util_format_is_pure_integer(format)) {
- memcpy(out.ui, color->ui, 16);
- } else {
- util_pack_color(color->f, format, &out);
- }
+ /* Otherwise, it's generic subject to replication */
+ union util_color out = { 0 };
unsigned size = util_format_get_blocksize(format);
+ util_pack_color(color->f, format, &out);
+
if (size == 1) {
unsigned b = out.ui[0];
unsigned s = b | (b << 8);
pan_pack_color_32(packed, s | (s << 16));
} else if (size == 2)
pan_pack_color_32(packed, out.ui[0] | (out.ui[0] << 16));
- else if (size == 4)
+ else if (size == 3 || size == 4)
pan_pack_color_32(packed, out.ui[0]);
+ else if (size == 6)
+ pan_pack_color_64(packed, out.ui[0], out.ui[1] | (out.ui[1] << 16)); /* RGB16F -- RGBB */
else if (size == 8)
pan_pack_color_64(packed, out.ui[0], out.ui[1]);
else if (size == 16)
/* Clearing affects the entire framebuffer (by definition -- this is
* the Gallium clear callback, which clears the whole framebuffer. If
- * the scissor test were enabled from the GL side, the state tracker
+ * the scissor test were enabled from the GL side, the gallium frontend
* would emit a quad instead and we wouldn't go down this code path) */
panfrost_batch_union_scissor(batch, 0, 0,
batch->maxy = MIN2(batch->maxy, maxy);
}
-/* Are we currently rendering to the screen (rather than an FBO)? */
+/* Are we currently rendering to the dev (rather than an FBO)? */
bool
panfrost_batch_is_scanout(struct panfrost_batch *batch)