2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "drm-uapi/panfrost_drm.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
37 #include "pandecode/decode.h"
38 #include "panfrost-quirks.h"
40 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
41 * and build a proper dependency graph such that batches can be pipelined for
42 * better GPU utilization.
44 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
45 * A BO is either being written or read at any time, that's what the type field
47 * When the last access is a write, the batch writing the BO might have read
48 * dependencies (readers that have not been executed yet and want to read the
49 * previous BO content), and when the last access is a read, all readers might
50 * depend on another batch to push its results to memory. That's what the
51 * readers/writers keep track off.
52 * There can only be one writer at any given time, if a new batch wants to
53 * write to the same BO, a dependency will be added between the new writer and
54 * the old writer (at the batch level), and panfrost_bo_access->writer will be
55 * updated to point to the new writer.
57 struct panfrost_bo_access
{
59 struct util_dynarray readers
;
60 struct panfrost_batch_fence
*writer
;
63 static struct panfrost_batch_fence
*
64 panfrost_create_batch_fence(struct panfrost_batch
*batch
)
66 struct panfrost_batch_fence
*fence
;
69 fence
= rzalloc(NULL
, struct panfrost_batch_fence
);
71 pipe_reference_init(&fence
->reference
, 1);
72 fence
->ctx
= batch
->ctx
;
74 ret
= drmSyncobjCreate(pan_screen(batch
->ctx
->base
.screen
)->fd
, 0,
82 panfrost_free_batch_fence(struct panfrost_batch_fence
*fence
)
84 drmSyncobjDestroy(pan_screen(fence
->ctx
->base
.screen
)->fd
,
90 panfrost_batch_fence_unreference(struct panfrost_batch_fence
*fence
)
92 if (pipe_reference(&fence
->reference
, NULL
))
93 panfrost_free_batch_fence(fence
);
97 panfrost_batch_fence_reference(struct panfrost_batch_fence
*fence
)
99 pipe_reference(NULL
, &fence
->reference
);
102 static struct panfrost_batch
*
103 panfrost_create_batch(struct panfrost_context
*ctx
,
104 const struct pipe_framebuffer_state
*key
)
106 struct panfrost_batch
*batch
= rzalloc(ctx
, struct panfrost_batch
);
110 batch
->bos
= _mesa_hash_table_create(batch
, _mesa_hash_pointer
,
111 _mesa_key_pointer_equal
);
113 batch
->minx
= batch
->miny
= ~0;
114 batch
->maxx
= batch
->maxy
= 0;
115 batch
->transient_offset
= 0;
117 util_dynarray_init(&batch
->headers
, batch
);
118 util_dynarray_init(&batch
->gpu_headers
, batch
);
119 util_dynarray_init(&batch
->dependencies
, batch
);
120 batch
->out_sync
= panfrost_create_batch_fence(batch
);
121 util_copy_framebuffer_state(&batch
->key
, key
);
127 panfrost_freeze_batch(struct panfrost_batch
*batch
)
129 struct panfrost_context
*ctx
= batch
->ctx
;
130 struct hash_entry
*entry
;
132 /* Remove the entry in the FBO -> batch hash table if the batch
133 * matches. This way, next draws/clears targeting this FBO will trigger
134 * the creation of a new batch.
136 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
137 if (entry
&& entry
->data
== batch
)
138 _mesa_hash_table_remove(ctx
->batches
, entry
);
140 /* If this is the bound batch, the panfrost_context parameters are
141 * relevant so submitting it invalidates those parameters, but if it's
142 * not bound, the context parameters are for some other batch so we
143 * can't invalidate them.
145 if (ctx
->batch
== batch
) {
146 panfrost_invalidate_frame(ctx
);
152 static bool panfrost_batch_is_frozen(struct panfrost_batch
*batch
)
154 struct panfrost_context
*ctx
= batch
->ctx
;
155 struct hash_entry
*entry
;
157 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
158 if (entry
&& entry
->data
== batch
)
161 if (ctx
->batch
== batch
)
169 panfrost_free_batch(struct panfrost_batch
*batch
)
174 assert(panfrost_batch_is_frozen(batch
));
176 hash_table_foreach(batch
->bos
, entry
)
177 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
179 util_dynarray_foreach(&batch
->dependencies
,
180 struct panfrost_batch_fence
*, dep
) {
181 panfrost_batch_fence_unreference(*dep
);
184 /* The out_sync fence lifetime is different from the the batch one
185 * since other batches might want to wait on a fence of already
186 * submitted/signaled batch. All we need to do here is make sure the
187 * fence does not point to an invalid batch, which the core will
188 * interpret as 'batch is already submitted'.
190 batch
->out_sync
->batch
= NULL
;
191 panfrost_batch_fence_unreference(batch
->out_sync
);
193 util_unreference_framebuffer_state(&batch
->key
);
199 panfrost_dep_graph_contains_batch(struct panfrost_batch
*root
,
200 struct panfrost_batch
*batch
)
205 util_dynarray_foreach(&root
->dependencies
,
206 struct panfrost_batch_fence
*, dep
) {
207 if ((*dep
)->batch
== batch
||
208 panfrost_dep_graph_contains_batch((*dep
)->batch
, batch
))
217 panfrost_batch_add_dep(struct panfrost_batch
*batch
,
218 struct panfrost_batch_fence
*newdep
)
220 if (batch
== newdep
->batch
)
223 /* We might want to turn ->dependencies into a set if the number of
224 * deps turns out to be big enough to make this 'is dep already there'
225 * search inefficient.
227 util_dynarray_foreach(&batch
->dependencies
,
228 struct panfrost_batch_fence
*, dep
) {
233 /* Make sure the dependency graph is acyclic. */
234 assert(!panfrost_dep_graph_contains_batch(newdep
->batch
, batch
));
236 panfrost_batch_fence_reference(newdep
);
237 util_dynarray_append(&batch
->dependencies
,
238 struct panfrost_batch_fence
*, newdep
);
240 /* We now have a batch depending on us, let's make sure new draw/clear
241 * calls targeting the same FBO use a new batch object.
244 panfrost_freeze_batch(newdep
->batch
);
247 static struct panfrost_batch
*
248 panfrost_get_batch(struct panfrost_context
*ctx
,
249 const struct pipe_framebuffer_state
*key
)
251 /* Lookup the job first */
252 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->batches
, key
);
257 /* Otherwise, let's create a job */
259 struct panfrost_batch
*batch
= panfrost_create_batch(ctx
, key
);
261 /* Save the created job */
262 _mesa_hash_table_insert(ctx
->batches
, &batch
->key
, batch
);
267 /* Get the job corresponding to the FBO we're currently rendering into */
269 struct panfrost_batch
*
270 panfrost_get_batch_for_fbo(struct panfrost_context
*ctx
)
272 /* If we're wallpapering, we special case to workaround
275 if (ctx
->wallpaper_batch
)
276 return ctx
->wallpaper_batch
;
278 /* If we already began rendering, use that */
281 assert(util_framebuffer_state_equal(&ctx
->batch
->key
,
282 &ctx
->pipe_framebuffer
));
286 /* If not, look up the job */
287 struct panfrost_batch
*batch
= panfrost_get_batch(ctx
,
288 &ctx
->pipe_framebuffer
);
290 /* Set this job as the current FBO job. Will be reset when updating the
291 * FB state and when submitting or releasing a job.
297 struct panfrost_batch
*
298 panfrost_get_fresh_batch_for_fbo(struct panfrost_context
*ctx
)
300 struct panfrost_batch
*batch
;
302 batch
= panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
304 /* The batch has no draw/clear queued, let's return it directly.
305 * Note that it's perfectly fine to re-use a batch with an
306 * existing clear, we'll just update it with the new clear request.
308 if (!batch
->last_job
.gpu
)
311 /* Otherwise, we need to freeze the existing one and instantiate a new
314 panfrost_freeze_batch(batch
);
315 return panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
319 panfrost_batch_fence_is_signaled(struct panfrost_batch_fence
*fence
)
324 /* Batch has not been submitted yet. */
328 int ret
= drmSyncobjWait(pan_screen(fence
->ctx
->base
.screen
)->fd
,
329 &fence
->syncobj
, 1, 0, 0, NULL
);
331 /* Cache whether the fence was signaled */
332 fence
->signaled
= ret
>= 0;
333 return fence
->signaled
;
337 panfrost_bo_access_gc_fences(struct panfrost_context
*ctx
,
338 struct panfrost_bo_access
*access
,
339 const struct panfrost_bo
*bo
)
341 if (access
->writer
&& panfrost_batch_fence_is_signaled(access
->writer
)) {
342 panfrost_batch_fence_unreference(access
->writer
);
343 access
->writer
= NULL
;
346 unsigned nreaders
= 0;
347 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
352 if (panfrost_batch_fence_is_signaled(*reader
)) {
353 panfrost_batch_fence_unreference(*reader
);
361 util_dynarray_clear(&access
->readers
);
364 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
365 * idea is to collect those signaled fences at the end of each flush_all
366 * call. This function is likely to collect only fences from previous
367 * batch flushes not the one that have just have just been submitted and
368 * are probably still in flight when we trigger the garbage collection.
369 * Anyway, we need to do this garbage collection at some point if we don't
370 * want the BO access map to keep invalid entries around and retain
374 panfrost_gc_fences(struct panfrost_context
*ctx
)
376 hash_table_foreach(ctx
->accessed_bos
, entry
) {
377 struct panfrost_bo_access
*access
= entry
->data
;
380 panfrost_bo_access_gc_fences(ctx
, access
, entry
->key
);
381 if (!util_dynarray_num_elements(&access
->readers
,
382 struct panfrost_batch_fence
*) &&
384 _mesa_hash_table_remove(ctx
->accessed_bos
, entry
);
390 panfrost_batch_in_readers(struct panfrost_batch
*batch
,
391 struct panfrost_bo_access
*access
)
393 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
395 if (*reader
&& (*reader
)->batch
== batch
)
404 panfrost_batch_update_bo_access(struct panfrost_batch
*batch
,
405 struct panfrost_bo
*bo
, uint32_t access_type
,
406 bool already_accessed
)
408 struct panfrost_context
*ctx
= batch
->ctx
;
409 struct panfrost_bo_access
*access
;
410 uint32_t old_access_type
;
411 struct hash_entry
*entry
;
413 assert(access_type
== PAN_BO_ACCESS_WRITE
||
414 access_type
== PAN_BO_ACCESS_READ
);
416 entry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
417 access
= entry
? entry
->data
: NULL
;
419 old_access_type
= access
->type
;
421 access
= rzalloc(ctx
, struct panfrost_bo_access
);
422 util_dynarray_init(&access
->readers
, access
);
423 _mesa_hash_table_insert(ctx
->accessed_bos
, bo
, access
);
424 /* We are the first to access this BO, let's initialize
425 * old_access_type to our own access type in that case.
427 old_access_type
= access_type
;
428 access
->type
= access_type
;
433 if (access_type
== PAN_BO_ACCESS_WRITE
&&
434 old_access_type
== PAN_BO_ACCESS_READ
) {
435 /* Previous access was a read and we want to write this BO.
436 * We first need to add explicit deps between our batch and
437 * the previous readers.
439 util_dynarray_foreach(&access
->readers
,
440 struct panfrost_batch_fence
*, reader
) {
441 /* We were already reading the BO, no need to add a dep
442 * on ourself (the acyclic check would complain about
445 if (!(*reader
) || (*reader
)->batch
== batch
)
448 panfrost_batch_add_dep(batch
, *reader
);
450 panfrost_batch_fence_reference(batch
->out_sync
);
452 /* We now are the new writer. */
453 access
->writer
= batch
->out_sync
;
454 access
->type
= access_type
;
456 /* Release the previous readers and reset the readers array. */
457 util_dynarray_foreach(&access
->readers
,
458 struct panfrost_batch_fence
*,
462 panfrost_batch_fence_unreference(*reader
);
465 util_dynarray_clear(&access
->readers
);
466 } else if (access_type
== PAN_BO_ACCESS_WRITE
&&
467 old_access_type
== PAN_BO_ACCESS_WRITE
) {
468 /* Previous access was a write and we want to write this BO.
469 * First check if we were the previous writer, in that case
470 * there's nothing to do. Otherwise we need to add a
471 * dependency between the new writer and the old one.
473 if (access
->writer
!= batch
->out_sync
) {
474 if (access
->writer
) {
475 panfrost_batch_add_dep(batch
, access
->writer
);
476 panfrost_batch_fence_unreference(access
->writer
);
478 panfrost_batch_fence_reference(batch
->out_sync
);
479 access
->writer
= batch
->out_sync
;
481 } else if (access_type
== PAN_BO_ACCESS_READ
&&
482 old_access_type
== PAN_BO_ACCESS_WRITE
) {
483 /* Previous access was a write and we want to read this BO.
484 * First check if we were the previous writer, in that case
485 * we want to keep the access type unchanged, as a write is
486 * more constraining than a read.
488 if (access
->writer
!= batch
->out_sync
) {
489 /* Add a dependency on the previous writer. */
490 panfrost_batch_add_dep(batch
, access
->writer
);
492 /* The previous access was a write, there's no reason
493 * to have entries in the readers array.
495 assert(!util_dynarray_num_elements(&access
->readers
,
496 struct panfrost_batch_fence
*));
498 /* Add ourselves to the readers array. */
499 panfrost_batch_fence_reference(batch
->out_sync
);
500 util_dynarray_append(&access
->readers
,
501 struct panfrost_batch_fence
*,
503 access
->type
= PAN_BO_ACCESS_READ
;
506 /* We already accessed this BO before, so we should already be
507 * in the reader array.
509 if (already_accessed
) {
510 assert(panfrost_batch_in_readers(batch
, access
));
514 /* Previous access was a read and we want to read this BO.
515 * Add ourselves to the readers array and add a dependency on
516 * the previous writer if any.
518 panfrost_batch_fence_reference(batch
->out_sync
);
519 util_dynarray_append(&access
->readers
,
520 struct panfrost_batch_fence
*,
524 panfrost_batch_add_dep(batch
, access
->writer
);
529 panfrost_batch_add_bo(struct panfrost_batch
*batch
, struct panfrost_bo
*bo
,
535 struct hash_entry
*entry
;
536 uint32_t old_flags
= 0;
538 entry
= _mesa_hash_table_search(batch
->bos
, bo
);
540 entry
= _mesa_hash_table_insert(batch
->bos
, bo
,
541 (void *)(uintptr_t)flags
);
542 panfrost_bo_reference(bo
);
544 old_flags
= (uintptr_t)entry
->data
;
546 /* All batches have to agree on the shared flag. */
547 assert((old_flags
& PAN_BO_ACCESS_SHARED
) ==
548 (flags
& PAN_BO_ACCESS_SHARED
));
553 if (old_flags
== flags
)
557 entry
->data
= (void *)(uintptr_t)flags
;
559 /* If this is not a shared BO, we don't really care about dependency
562 if (!(flags
& PAN_BO_ACCESS_SHARED
))
565 /* All dependencies should have been flushed before we execute the
566 * wallpaper draw, so it should be harmless to skip the
567 * update_bo_access() call.
569 if (batch
== batch
->ctx
->wallpaper_batch
)
572 /* Only pass R/W flags to the dep tracking logic. */
573 assert(flags
& PAN_BO_ACCESS_RW
);
574 flags
= (flags
& PAN_BO_ACCESS_WRITE
) ?
575 PAN_BO_ACCESS_WRITE
: PAN_BO_ACCESS_READ
;
576 panfrost_batch_update_bo_access(batch
, bo
, flags
, old_flags
!= 0);
579 void panfrost_batch_add_fbo_bos(struct panfrost_batch
*batch
)
581 uint32_t flags
= PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_WRITE
|
582 PAN_BO_ACCESS_VERTEX_TILER
|
583 PAN_BO_ACCESS_FRAGMENT
;
585 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; ++i
) {
586 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
587 panfrost_batch_add_bo(batch
, rsrc
->bo
, flags
);
590 if (batch
->key
.zsbuf
) {
591 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.zsbuf
->texture
);
592 panfrost_batch_add_bo(batch
, rsrc
->bo
, flags
);
597 panfrost_batch_create_bo(struct panfrost_batch
*batch
, size_t size
,
598 uint32_t create_flags
, uint32_t access_flags
)
600 struct panfrost_bo
*bo
;
602 bo
= panfrost_bo_create(pan_screen(batch
->ctx
->base
.screen
), size
,
604 panfrost_batch_add_bo(batch
, bo
, access_flags
);
606 /* panfrost_batch_add_bo() has retained a reference and
607 * panfrost_bo_create() initialize the refcnt to 1, so let's
608 * unreference the BO here so it gets released when the batch is
609 * destroyed (unless it's retained by someone else in the meantime).
611 panfrost_bo_unreference(bo
);
615 /* Returns the polygon list's GPU address if available, or otherwise allocates
616 * the polygon list. It's perfectly fast to use allocate/free BO directly,
617 * since we'll hit the BO cache and this is one-per-batch anyway. */
620 panfrost_batch_get_polygon_list(struct panfrost_batch
*batch
, unsigned size
)
622 if (batch
->polygon_list
) {
623 assert(batch
->polygon_list
->size
>= size
);
625 /* Create the BO as invisible, as there's no reason to map */
627 batch
->polygon_list
= panfrost_batch_create_bo(batch
, size
,
629 PAN_BO_ACCESS_PRIVATE
|
631 PAN_BO_ACCESS_VERTEX_TILER
|
632 PAN_BO_ACCESS_FRAGMENT
);
635 return batch
->polygon_list
->gpu
;
639 panfrost_batch_get_scratchpad(struct panfrost_batch
*batch
)
641 if (batch
->scratchpad
)
642 return batch
->scratchpad
;
644 batch
->scratchpad
= panfrost_batch_create_bo(batch
, 64 * 4 * 4096,
646 PAN_BO_ACCESS_PRIVATE
|
648 PAN_BO_ACCESS_VERTEX_TILER
|
649 PAN_BO_ACCESS_FRAGMENT
);
650 assert(batch
->scratchpad
);
651 return batch
->scratchpad
;
655 panfrost_batch_get_tiler_heap(struct panfrost_batch
*batch
)
657 if (batch
->tiler_heap
)
658 return batch
->tiler_heap
;
660 batch
->tiler_heap
= panfrost_batch_create_bo(batch
, 4096 * 4096,
663 PAN_BO_ACCESS_PRIVATE
|
665 PAN_BO_ACCESS_VERTEX_TILER
|
666 PAN_BO_ACCESS_FRAGMENT
);
667 assert(batch
->tiler_heap
);
668 return batch
->tiler_heap
;
672 panfrost_batch_get_tiler_dummy(struct panfrost_batch
*batch
)
674 struct panfrost_screen
*screen
= pan_screen(batch
->ctx
->base
.screen
);
676 uint32_t create_flags
= 0;
678 if (batch
->tiler_dummy
)
679 return batch
->tiler_dummy
;
681 if (!(screen
->quirks
& MIDGARD_NO_HIER_TILING
))
682 create_flags
= PAN_BO_INVISIBLE
;
684 batch
->tiler_dummy
= panfrost_batch_create_bo(batch
, 4096,
686 PAN_BO_ACCESS_PRIVATE
|
688 PAN_BO_ACCESS_VERTEX_TILER
|
689 PAN_BO_ACCESS_FRAGMENT
);
690 assert(batch
->tiler_dummy
);
691 return batch
->tiler_dummy
;
695 panfrost_batch_draw_wallpaper(struct panfrost_batch
*batch
)
697 /* Color 0 is cleared, no need to draw the wallpaper.
698 * TODO: MRT wallpapers.
700 if (batch
->clear
& PIPE_CLEAR_COLOR0
)
703 /* Nothing to reload? TODO: MRT wallpapers */
704 if (batch
->key
.cbufs
[0] == NULL
)
707 /* No draw calls, and no clear on the depth/stencil bufs.
708 * Drawing the wallpaper would be useless.
710 if (!batch
->last_tiler
.gpu
&&
711 !(batch
->clear
& PIPE_CLEAR_DEPTHSTENCIL
))
714 /* Check if the buffer has any content on it worth preserving */
716 struct pipe_surface
*surf
= batch
->key
.cbufs
[0];
717 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
718 unsigned level
= surf
->u
.tex
.level
;
720 if (!rsrc
->slices
[level
].initialized
)
723 batch
->ctx
->wallpaper_batch
= batch
;
725 /* Clamp the rendering area to the damage extent. The
726 * KHR_partial_update() spec states that trying to render outside of
727 * the damage region is "undefined behavior", so we should be safe.
729 unsigned damage_width
= (rsrc
->damage
.extent
.maxx
- rsrc
->damage
.extent
.minx
);
730 unsigned damage_height
= (rsrc
->damage
.extent
.maxy
- rsrc
->damage
.extent
.miny
);
732 if (damage_width
&& damage_height
) {
733 panfrost_batch_intersection_scissor(batch
,
734 rsrc
->damage
.extent
.minx
,
735 rsrc
->damage
.extent
.miny
,
736 rsrc
->damage
.extent
.maxx
,
737 rsrc
->damage
.extent
.maxy
);
740 /* FIXME: Looks like aligning on a tile is not enough, but
741 * aligning on twice the tile size seems to works. We don't
742 * know exactly what happens here but this deserves extra
743 * investigation to figure it out.
745 batch
->minx
= batch
->minx
& ~((MALI_TILE_LENGTH
* 2) - 1);
746 batch
->miny
= batch
->miny
& ~((MALI_TILE_LENGTH
* 2) - 1);
747 batch
->maxx
= MIN2(ALIGN_POT(batch
->maxx
, MALI_TILE_LENGTH
* 2),
749 batch
->maxy
= MIN2(ALIGN_POT(batch
->maxy
, MALI_TILE_LENGTH
* 2),
752 struct pipe_scissor_state damage
;
753 struct pipe_box rects
[4];
755 /* Clamp the damage box to the rendering area. */
756 damage
.minx
= MAX2(batch
->minx
, rsrc
->damage
.biggest_rect
.x
);
757 damage
.miny
= MAX2(batch
->miny
, rsrc
->damage
.biggest_rect
.y
);
758 damage
.maxx
= MIN2(batch
->maxx
,
759 rsrc
->damage
.biggest_rect
.x
+
760 rsrc
->damage
.biggest_rect
.width
);
761 damage
.maxy
= MIN2(batch
->maxy
,
762 rsrc
->damage
.biggest_rect
.y
+
763 rsrc
->damage
.biggest_rect
.height
);
765 /* One damage rectangle means we can end up with at most 4 reload
767 * 1: left region, only exists if damage.x > 0
768 * 2: right region, only exists if damage.x + damage.width < fb->width
769 * 3: top region, only exists if damage.y > 0. The intersection with
770 * the left and right regions are dropped
771 * 4: bottom region, only exists if damage.y + damage.height < fb->height.
772 * The intersection with the left and right regions are dropped
774 * ____________________________
781 * |_______|___________|______|
783 u_box_2d(batch
->minx
, batch
->miny
, damage
.minx
- batch
->minx
,
784 batch
->maxy
- batch
->miny
, &rects
[0]);
785 u_box_2d(damage
.maxx
, batch
->miny
, batch
->maxx
- damage
.maxx
,
786 batch
->maxy
- batch
->miny
, &rects
[1]);
787 u_box_2d(damage
.minx
, batch
->miny
, damage
.maxx
- damage
.minx
,
788 damage
.miny
- batch
->miny
, &rects
[2]);
789 u_box_2d(damage
.minx
, damage
.maxy
, damage
.maxx
- damage
.minx
,
790 batch
->maxy
- damage
.maxy
, &rects
[3]);
792 for (unsigned i
= 0; i
< 4; i
++) {
793 /* Width and height are always >= 0 even if width is declared as a
794 * signed integer: u_box_2d() helper takes unsigned args and
795 * panfrost_set_damage_region() is taking care of clamping
798 if (!rects
[i
].width
|| !rects
[i
].height
)
801 /* Blit the wallpaper in */
802 panfrost_blit_wallpaper(batch
->ctx
, &rects
[i
]);
804 batch
->ctx
->wallpaper_batch
= NULL
;
808 panfrost_batch_submit_ioctl(struct panfrost_batch
*batch
,
809 mali_ptr first_job_desc
,
811 struct mali_job_descriptor_header
*header
)
813 struct panfrost_context
*ctx
= batch
->ctx
;
814 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
815 struct panfrost_screen
*screen
= pan_screen(gallium
->screen
);
816 struct drm_panfrost_submit submit
= {0,};
817 uint32_t *bo_handles
, *in_syncs
= NULL
;
818 bool is_fragment_shader
;
821 is_fragment_shader
= (reqs
& PANFROST_JD_REQ_FS
) && batch
->first_job
.gpu
;
822 if (is_fragment_shader
)
823 submit
.in_sync_count
= 1;
825 submit
.in_sync_count
= util_dynarray_num_elements(&batch
->dependencies
,
826 struct panfrost_batch_fence
*);
828 if (submit
.in_sync_count
) {
829 in_syncs
= calloc(submit
.in_sync_count
, sizeof(*in_syncs
));
833 /* The fragment job always depends on the vertex/tiler job if there's
836 if (is_fragment_shader
) {
837 in_syncs
[0] = batch
->out_sync
->syncobj
;
841 util_dynarray_foreach(&batch
->dependencies
,
842 struct panfrost_batch_fence
*, dep
)
843 in_syncs
[i
++] = (*dep
)->syncobj
;
846 submit
.in_syncs
= (uintptr_t)in_syncs
;
847 submit
.out_sync
= batch
->out_sync
->syncobj
;
848 submit
.jc
= first_job_desc
;
849 submit
.requirements
= reqs
;
851 bo_handles
= calloc(batch
->bos
->entries
, sizeof(*bo_handles
));
854 hash_table_foreach(batch
->bos
, entry
) {
855 struct panfrost_bo
*bo
= (struct panfrost_bo
*)entry
->key
;
856 uint32_t flags
= (uintptr_t)entry
->data
;
858 assert(bo
->gem_handle
> 0);
859 bo_handles
[submit
.bo_handle_count
++] = bo
->gem_handle
;
861 /* Update the BO access flags so that panfrost_bo_wait() knows
862 * about all pending accesses.
863 * We only keep the READ/WRITE info since this is all the BO
864 * wait logic cares about.
865 * We also preserve existing flags as this batch might not
866 * be the first one to access the BO.
868 bo
->gpu_access
|= flags
& (PAN_BO_ACCESS_RW
);
871 submit
.bo_handles
= (u64
) (uintptr_t) bo_handles
;
872 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_SUBMIT
, &submit
);
877 fprintf(stderr
, "Error submitting: %m\n");
881 if (pan_debug
& PAN_DBG_SYNC
) {
884 /* Wait so we can get errors reported back */
885 drmSyncobjWait(screen
->fd
, &batch
->out_sync
->syncobj
, 1,
888 status
= header
->exception_status
;
890 if (status
&& status
!= 0x1) {
891 fprintf(stderr
, "Job %" PRIx64
" failed: source ID: 0x%x access: %s exception: 0x%x (exception_status 0x%x) fault_pointer 0x%" PRIx64
" \n",
893 (status
>> 16) & 0xFFFF,
894 pandecode_exception_access((status
>> 8) & 0x3),
897 header
->fault_pointer
);
901 /* Trace the job if we're doing that */
902 if (pan_debug
& PAN_DBG_TRACE
) {
903 /* Wait so we can get errors reported back */
904 drmSyncobjWait(screen
->fd
, &batch
->out_sync
->syncobj
, 1,
906 pandecode_jc(submit
.jc
, FALSE
, screen
->gpu_id
);
913 panfrost_batch_submit_jobs(struct panfrost_batch
*batch
)
915 bool has_draws
= batch
->first_job
.gpu
;
916 struct mali_job_descriptor_header
*header
;
920 header
= (struct mali_job_descriptor_header
*)batch
->first_job
.cpu
;
921 ret
= panfrost_batch_submit_ioctl(batch
, batch
->first_job
.gpu
, 0, header
);
925 if (batch
->first_tiler
.gpu
|| batch
->clear
) {
926 mali_ptr fragjob
= panfrost_fragment_job(batch
, has_draws
, &header
);
928 ret
= panfrost_batch_submit_ioctl(batch
, fragjob
, PANFROST_JD_REQ_FS
, header
);
936 panfrost_batch_submit(struct panfrost_batch
*batch
)
940 /* Submit the dependencies first. */
941 util_dynarray_foreach(&batch
->dependencies
,
942 struct panfrost_batch_fence
*, dep
) {
944 panfrost_batch_submit((*dep
)->batch
);
950 if (!batch
->last_job
.gpu
&& !batch
->clear
) {
951 /* Mark the fence as signaled so the fence logic does not try
954 batch
->out_sync
->signaled
= true;
958 panfrost_batch_draw_wallpaper(batch
);
960 panfrost_scoreboard_link_batch(batch
);
962 ret
= panfrost_batch_submit_jobs(batch
);
965 fprintf(stderr
, "panfrost_batch_submit failed: %d\n", ret
);
967 /* We must reset the damage info of our render targets here even
968 * though a damage reset normally happens when the DRI layer swaps
969 * buffers. That's because there can be implicit flushes the GL
970 * app is not aware of, and those might impact the damage region: if
971 * part of the damaged portion is drawn during those implicit flushes,
972 * you have to reload those areas before next draws are pushed, and
973 * since the driver can't easily know what's been modified by the draws
974 * it flushed, the easiest solution is to reload everything.
976 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; i
++) {
977 struct panfrost_resource
*res
;
979 if (!batch
->key
.cbufs
[i
])
982 res
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
983 panfrost_resource_reset_damage(res
);
987 panfrost_freeze_batch(batch
);
988 panfrost_free_batch(batch
);
992 panfrost_flush_all_batches(struct panfrost_context
*ctx
, bool wait
)
994 struct util_dynarray fences
, syncobjs
;
997 util_dynarray_init(&fences
, NULL
);
998 util_dynarray_init(&syncobjs
, NULL
);
1001 hash_table_foreach(ctx
->batches
, hentry
) {
1002 struct panfrost_batch
*batch
= hentry
->data
;
1007 panfrost_batch_fence_reference(batch
->out_sync
);
1008 util_dynarray_append(&fences
, struct panfrost_batch_fence
*,
1010 util_dynarray_append(&syncobjs
, uint32_t,
1011 batch
->out_sync
->syncobj
);
1014 panfrost_batch_submit(batch
);
1017 assert(!ctx
->batches
->entries
);
1019 /* Collect batch fences before returning */
1020 panfrost_gc_fences(ctx
);
1025 drmSyncobjWait(pan_screen(ctx
->base
.screen
)->fd
,
1026 util_dynarray_begin(&syncobjs
),
1027 util_dynarray_num_elements(&syncobjs
, uint32_t),
1028 INT64_MAX
, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
, NULL
);
1030 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
1031 panfrost_batch_fence_unreference(*fence
);
1033 util_dynarray_fini(&fences
);
1034 util_dynarray_fini(&syncobjs
);
1038 panfrost_pending_batches_access_bo(struct panfrost_context
*ctx
,
1039 const struct panfrost_bo
*bo
)
1041 struct panfrost_bo_access
*access
;
1042 struct hash_entry
*hentry
;
1044 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1045 access
= hentry
? hentry
->data
: NULL
;
1049 if (access
->writer
&& access
->writer
->batch
)
1052 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1054 if (*reader
&& (*reader
)->batch
)
1062 panfrost_flush_batches_accessing_bo(struct panfrost_context
*ctx
,
1063 struct panfrost_bo
*bo
,
1064 uint32_t access_type
)
1066 struct panfrost_bo_access
*access
;
1067 struct hash_entry
*hentry
;
1069 /* It doesn't make any to flush only the readers. */
1070 assert(access_type
== PAN_BO_ACCESS_WRITE
||
1071 access_type
== PAN_BO_ACCESS_RW
);
1073 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1074 access
= hentry
? hentry
->data
: NULL
;
1078 if (access_type
& PAN_BO_ACCESS_WRITE
&& access
->writer
&&
1079 access
->writer
->batch
)
1080 panfrost_batch_submit(access
->writer
->batch
);
1082 if (!(access_type
& PAN_BO_ACCESS_READ
))
1085 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1087 if (*reader
&& (*reader
)->batch
)
1088 panfrost_batch_submit((*reader
)->batch
);
1093 panfrost_batch_set_requirements(struct panfrost_batch
*batch
)
1095 struct panfrost_context
*ctx
= batch
->ctx
;
1097 if (ctx
->rasterizer
&& ctx
->rasterizer
->base
.multisample
)
1098 batch
->requirements
|= PAN_REQ_MSAA
;
1100 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->depth
.writemask
)
1101 batch
->requirements
|= PAN_REQ_DEPTH_WRITE
;
1104 /* Helper to smear a 32-bit color across 128-bit components */
1107 pan_pack_color_32(uint32_t *packed
, uint32_t v
)
1109 for (unsigned i
= 0; i
< 4; ++i
)
1114 pan_pack_color_64(uint32_t *packed
, uint32_t lo
, uint32_t hi
)
1116 for (unsigned i
= 0; i
< 4; i
+= 2) {
1123 pan_pack_color(uint32_t *packed
, const union pipe_color_union
*color
, enum pipe_format format
)
1125 /* Alpha magicked to 1.0 if there is no alpha */
1127 bool has_alpha
= util_format_has_alpha(format
);
1128 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
1130 /* Packed color depends on the framebuffer format */
1132 const struct util_format_description
*desc
=
1133 util_format_description(format
);
1135 if (util_format_is_rgba8_variant(desc
)) {
1136 pan_pack_color_32(packed
,
1137 ((uint32_t) float_to_ubyte(clear_alpha
) << 24) |
1138 ((uint32_t) float_to_ubyte(color
->f
[2]) << 16) |
1139 ((uint32_t) float_to_ubyte(color
->f
[1]) << 8) |
1140 ((uint32_t) float_to_ubyte(color
->f
[0]) << 0));
1141 } else if (format
== PIPE_FORMAT_B5G6R5_UNORM
) {
1142 /* First, we convert the components to R5, G6, B5 separately */
1143 unsigned r5
= CLAMP(color
->f
[0], 0.0, 1.0) * 31.0;
1144 unsigned g6
= CLAMP(color
->f
[1], 0.0, 1.0) * 63.0;
1145 unsigned b5
= CLAMP(color
->f
[2], 0.0, 1.0) * 31.0;
1147 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1148 pan_pack_color_32(packed
, (b5
<< 25) | (g6
<< 14) | (r5
<< 5));
1149 } else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
) {
1150 /* We scale the components against 0xF0 (=240.0), rather than 0xFF */
1151 unsigned r4
= CLAMP(color
->f
[0], 0.0, 1.0) * 240.0;
1152 unsigned g4
= CLAMP(color
->f
[1], 0.0, 1.0) * 240.0;
1153 unsigned b4
= CLAMP(color
->f
[2], 0.0, 1.0) * 240.0;
1154 unsigned a4
= CLAMP(clear_alpha
, 0.0, 1.0) * 240.0;
1156 /* Pack on *byte* intervals */
1157 pan_pack_color_32(packed
, (a4
<< 24) | (b4
<< 16) | (g4
<< 8) | r4
);
1158 } else if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
) {
1159 /* Scale as expected but shift oddly */
1160 unsigned r5
= round(CLAMP(color
->f
[0], 0.0, 1.0)) * 31.0;
1161 unsigned g5
= round(CLAMP(color
->f
[1], 0.0, 1.0)) * 31.0;
1162 unsigned b5
= round(CLAMP(color
->f
[2], 0.0, 1.0)) * 31.0;
1163 unsigned a1
= round(CLAMP(clear_alpha
, 0.0, 1.0)) * 1.0;
1165 pan_pack_color_32(packed
, (a1
<< 31) | (b5
<< 25) | (g5
<< 15) | (r5
<< 5));
1167 /* Try Gallium's generic default path. Doesn't work for all
1168 * formats but it's a good guess. */
1170 union util_color out
;
1172 if (util_format_is_pure_integer(format
)) {
1173 memcpy(out
.ui
, color
->ui
, 16);
1175 util_pack_color(color
->f
, format
, &out
);
1178 unsigned size
= util_format_get_blocksize(format
);
1181 unsigned b
= out
.ui
[0];
1182 unsigned s
= b
| (b
<< 8);
1183 pan_pack_color_32(packed
, s
| (s
<< 16));
1184 } else if (size
== 2)
1185 pan_pack_color_32(packed
, out
.ui
[0] | (out
.ui
[0] << 16));
1186 else if (size
== 3 || size
== 4)
1187 pan_pack_color_32(packed
, out
.ui
[0]);
1189 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1]);
1190 else if (size
== 16)
1191 memcpy(packed
, out
.ui
, 16);
1193 unreachable("Unknown generic format size packing clear colour");
1198 panfrost_batch_clear(struct panfrost_batch
*batch
,
1200 const union pipe_color_union
*color
,
1201 double depth
, unsigned stencil
)
1203 struct panfrost_context
*ctx
= batch
->ctx
;
1205 if (buffers
& PIPE_CLEAR_COLOR
) {
1206 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; ++i
) {
1207 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1210 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
1211 pan_pack_color(batch
->clear_color
[i
], color
, format
);
1215 if (buffers
& PIPE_CLEAR_DEPTH
) {
1216 batch
->clear_depth
= depth
;
1219 if (buffers
& PIPE_CLEAR_STENCIL
) {
1220 batch
->clear_stencil
= stencil
;
1223 batch
->clear
|= buffers
;
1225 /* Clearing affects the entire framebuffer (by definition -- this is
1226 * the Gallium clear callback, which clears the whole framebuffer. If
1227 * the scissor test were enabled from the GL side, the state tracker
1228 * would emit a quad instead and we wouldn't go down this code path) */
1230 panfrost_batch_union_scissor(batch
, 0, 0,
1231 ctx
->pipe_framebuffer
.width
,
1232 ctx
->pipe_framebuffer
.height
);
1236 panfrost_batch_compare(const void *a
, const void *b
)
1238 return util_framebuffer_state_equal(a
, b
);
1242 panfrost_batch_hash(const void *key
)
1244 return _mesa_hash_data(key
, sizeof(struct pipe_framebuffer_state
));
1247 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1248 * new and old bounding rectangles */
1251 panfrost_batch_union_scissor(struct panfrost_batch
*batch
,
1252 unsigned minx
, unsigned miny
,
1253 unsigned maxx
, unsigned maxy
)
1255 batch
->minx
= MIN2(batch
->minx
, minx
);
1256 batch
->miny
= MIN2(batch
->miny
, miny
);
1257 batch
->maxx
= MAX2(batch
->maxx
, maxx
);
1258 batch
->maxy
= MAX2(batch
->maxy
, maxy
);
1262 panfrost_batch_intersection_scissor(struct panfrost_batch
*batch
,
1263 unsigned minx
, unsigned miny
,
1264 unsigned maxx
, unsigned maxy
)
1266 batch
->minx
= MAX2(batch
->minx
, minx
);
1267 batch
->miny
= MAX2(batch
->miny
, miny
);
1268 batch
->maxx
= MIN2(batch
->maxx
, maxx
);
1269 batch
->maxy
= MIN2(batch
->maxy
, maxy
);
1272 /* Are we currently rendering to the screen (rather than an FBO)? */
1275 panfrost_batch_is_scanout(struct panfrost_batch
*batch
)
1277 /* If there is no color buffer, it's an FBO */
1278 if (batch
->key
.nr_cbufs
!= 1)
1281 /* If we're too early that no framebuffer was sent, it's scanout */
1282 if (!batch
->key
.cbufs
[0])
1285 return batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
1286 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
1287 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
1291 panfrost_batch_init(struct panfrost_context
*ctx
)
1293 ctx
->batches
= _mesa_hash_table_create(ctx
,
1294 panfrost_batch_hash
,
1295 panfrost_batch_compare
);
1296 ctx
->accessed_bos
= _mesa_hash_table_create(ctx
, _mesa_hash_pointer
,
1297 _mesa_key_pointer_equal
);