2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "drm-uapi/panfrost_drm.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
36 #include "util/rounding.h"
38 #include "pan_blending.h"
40 #include "panfrost-quirks.h"
42 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
43 * and build a proper dependency graph such that batches can be pipelined for
44 * better GPU utilization.
46 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
47 * A BO is either being written or read at any time (see if writer != NULL).
48 * When the last access is a write, the batch writing the BO might have read
49 * dependencies (readers that have not been executed yet and want to read the
50 * previous BO content), and when the last access is a read, all readers might
51 * depend on another batch to push its results to memory. That's what the
52 * readers/writers keep track off.
53 * There can only be one writer at any given time, if a new batch wants to
54 * write to the same BO, a dependency will be added between the new writer and
55 * the old writer (at the batch level), and panfrost_bo_access->writer will be
56 * updated to point to the new writer.
58 struct panfrost_bo_access
{
59 struct util_dynarray readers
;
60 struct panfrost_batch_fence
*writer
;
63 static struct panfrost_batch_fence
*
64 panfrost_create_batch_fence(struct panfrost_batch
*batch
)
66 struct panfrost_batch_fence
*fence
;
68 fence
= rzalloc(NULL
, struct panfrost_batch_fence
);
70 pipe_reference_init(&fence
->reference
, 1);
77 panfrost_free_batch_fence(struct panfrost_batch_fence
*fence
)
83 panfrost_batch_fence_unreference(struct panfrost_batch_fence
*fence
)
85 if (pipe_reference(&fence
->reference
, NULL
))
86 panfrost_free_batch_fence(fence
);
90 panfrost_batch_fence_reference(struct panfrost_batch_fence
*fence
)
92 pipe_reference(NULL
, &fence
->reference
);
96 panfrost_batch_add_fbo_bos(struct panfrost_batch
*batch
);
98 static struct panfrost_batch
*
99 panfrost_create_batch(struct panfrost_context
*ctx
,
100 const struct pipe_framebuffer_state
*key
)
102 struct panfrost_batch
*batch
= rzalloc(ctx
, struct panfrost_batch
);
103 struct panfrost_device
*dev
= pan_device(ctx
->base
.screen
);
107 batch
->bos
= _mesa_hash_table_create(batch
, _mesa_hash_pointer
,
108 _mesa_key_pointer_equal
);
110 batch
->minx
= batch
->miny
= ~0;
111 batch
->maxx
= batch
->maxy
= 0;
113 batch
->out_sync
= panfrost_create_batch_fence(batch
);
114 util_copy_framebuffer_state(&batch
->key
, key
);
116 /* Preallocate the main pool, since every batch has at least one job
117 * structure so it will be used */
118 batch
->pool
= panfrost_create_pool(batch
, dev
, 0, true);
120 /* Don't preallocate the invisible pool, since not every batch will use
121 * the pre-allocation, particularly if the varyings are larger than the
122 * preallocation and a reallocation is needed after anyway. */
123 batch
->invisible_pool
=
124 panfrost_create_pool(batch
, dev
, PAN_BO_INVISIBLE
, false);
126 panfrost_batch_add_fbo_bos(batch
);
132 panfrost_freeze_batch(struct panfrost_batch
*batch
)
134 struct panfrost_context
*ctx
= batch
->ctx
;
135 struct hash_entry
*entry
;
137 /* Remove the entry in the FBO -> batch hash table if the batch
138 * matches and drop the context reference. This way, next draws/clears
139 * targeting this FBO will trigger the creation of a new batch.
141 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
142 if (entry
&& entry
->data
== batch
)
143 _mesa_hash_table_remove(ctx
->batches
, entry
);
145 if (ctx
->batch
== batch
)
149 #ifdef PAN_BATCH_DEBUG
150 static bool panfrost_batch_is_frozen(struct panfrost_batch
*batch
)
152 struct panfrost_context
*ctx
= batch
->ctx
;
153 struct hash_entry
*entry
;
155 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
156 if (entry
&& entry
->data
== batch
)
159 if (ctx
->batch
== batch
)
167 panfrost_free_batch(struct panfrost_batch
*batch
)
172 #ifdef PAN_BATCH_DEBUG
173 assert(panfrost_batch_is_frozen(batch
));
176 hash_table_foreach(batch
->bos
, entry
)
177 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
179 hash_table_foreach(batch
->pool
.bos
, entry
)
180 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
182 hash_table_foreach(batch
->invisible_pool
.bos
, entry
)
183 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
185 util_dynarray_foreach(&batch
->dependencies
,
186 struct panfrost_batch_fence
*, dep
) {
187 panfrost_batch_fence_unreference(*dep
);
190 /* The out_sync fence lifetime is different from the the batch one
191 * since other batches might want to wait on a fence of already
192 * submitted/signaled batch. All we need to do here is make sure the
193 * fence does not point to an invalid batch, which the core will
194 * interpret as 'batch is already submitted'.
196 batch
->out_sync
->batch
= NULL
;
197 panfrost_batch_fence_unreference(batch
->out_sync
);
199 util_unreference_framebuffer_state(&batch
->key
);
203 #ifdef PAN_BATCH_DEBUG
205 panfrost_dep_graph_contains_batch(struct panfrost_batch
*root
,
206 struct panfrost_batch
*batch
)
211 util_dynarray_foreach(&root
->dependencies
,
212 struct panfrost_batch_fence
*, dep
) {
213 if ((*dep
)->batch
== batch
||
214 panfrost_dep_graph_contains_batch((*dep
)->batch
, batch
))
223 panfrost_batch_add_dep(struct panfrost_batch
*batch
,
224 struct panfrost_batch_fence
*newdep
)
226 if (batch
== newdep
->batch
)
229 /* We might want to turn ->dependencies into a set if the number of
230 * deps turns out to be big enough to make this 'is dep already there'
231 * search inefficient.
233 util_dynarray_foreach(&batch
->dependencies
,
234 struct panfrost_batch_fence
*, dep
) {
239 #ifdef PAN_BATCH_DEBUG
240 /* Make sure the dependency graph is acyclic. */
241 assert(!panfrost_dep_graph_contains_batch(newdep
->batch
, batch
));
244 panfrost_batch_fence_reference(newdep
);
245 util_dynarray_append(&batch
->dependencies
,
246 struct panfrost_batch_fence
*, newdep
);
248 /* We now have a batch depending on us, let's make sure new draw/clear
249 * calls targeting the same FBO use a new batch object.
252 panfrost_freeze_batch(newdep
->batch
);
255 static struct panfrost_batch
*
256 panfrost_get_batch(struct panfrost_context
*ctx
,
257 const struct pipe_framebuffer_state
*key
)
259 /* Lookup the job first */
260 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->batches
, key
);
265 /* Otherwise, let's create a job */
267 struct panfrost_batch
*batch
= panfrost_create_batch(ctx
, key
);
269 /* Save the created job */
270 _mesa_hash_table_insert(ctx
->batches
, &batch
->key
, batch
);
275 /* Get the job corresponding to the FBO we're currently rendering into */
277 struct panfrost_batch
*
278 panfrost_get_batch_for_fbo(struct panfrost_context
*ctx
)
280 /* If we're wallpapering, we special case to workaround
283 if (ctx
->wallpaper_batch
)
284 return ctx
->wallpaper_batch
;
286 /* If we already began rendering, use that */
289 assert(util_framebuffer_state_equal(&ctx
->batch
->key
,
290 &ctx
->pipe_framebuffer
));
294 /* If not, look up the job */
295 struct panfrost_batch
*batch
= panfrost_get_batch(ctx
,
296 &ctx
->pipe_framebuffer
);
298 /* Set this job as the current FBO job. Will be reset when updating the
299 * FB state and when submitting or releasing a job.
305 struct panfrost_batch
*
306 panfrost_get_fresh_batch_for_fbo(struct panfrost_context
*ctx
)
308 struct panfrost_batch
*batch
;
310 batch
= panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
312 /* The batch has no draw/clear queued, let's return it directly.
313 * Note that it's perfectly fine to re-use a batch with an
314 * existing clear, we'll just update it with the new clear request.
316 if (!batch
->scoreboard
.first_job
)
319 /* Otherwise, we need to freeze the existing one and instantiate a new
322 panfrost_freeze_batch(batch
);
323 return panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
327 panfrost_bo_access_gc_fences(struct panfrost_context
*ctx
,
328 struct panfrost_bo_access
*access
,
329 const struct panfrost_bo
*bo
)
331 if (access
->writer
) {
332 panfrost_batch_fence_unreference(access
->writer
);
333 access
->writer
= NULL
;
336 struct panfrost_batch_fence
**readers_array
= util_dynarray_begin(&access
->readers
);
337 struct panfrost_batch_fence
**new_readers
= readers_array
;
339 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
344 panfrost_batch_fence_unreference(*reader
);
348 if (!util_dynarray_resize(&access
->readers
, struct panfrost_batch_fence
*,
349 new_readers
- readers_array
) &&
350 new_readers
!= readers_array
)
351 unreachable("Invalid dynarray access->readers");
354 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
355 * idea is to collect those signaled fences at the end of each flush_all
356 * call. This function is likely to collect only fences from previous
357 * batch flushes not the one that have just have just been submitted and
358 * are probably still in flight when we trigger the garbage collection.
359 * Anyway, we need to do this garbage collection at some point if we don't
360 * want the BO access map to keep invalid entries around and retain
364 panfrost_gc_fences(struct panfrost_context
*ctx
)
366 hash_table_foreach(ctx
->accessed_bos
, entry
) {
367 struct panfrost_bo_access
*access
= entry
->data
;
370 panfrost_bo_access_gc_fences(ctx
, access
, entry
->key
);
371 if (!util_dynarray_num_elements(&access
->readers
,
372 struct panfrost_batch_fence
*) &&
375 _mesa_hash_table_remove(ctx
->accessed_bos
, entry
);
380 #ifdef PAN_BATCH_DEBUG
382 panfrost_batch_in_readers(struct panfrost_batch
*batch
,
383 struct panfrost_bo_access
*access
)
385 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
387 if (*reader
&& (*reader
)->batch
== batch
)
396 panfrost_batch_update_bo_access(struct panfrost_batch
*batch
,
397 struct panfrost_bo
*bo
, bool writes
,
398 bool already_accessed
)
400 struct panfrost_context
*ctx
= batch
->ctx
;
401 struct panfrost_bo_access
*access
;
402 bool old_writes
= false;
403 struct hash_entry
*entry
;
405 entry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
406 access
= entry
? entry
->data
: NULL
;
408 old_writes
= access
->writer
!= NULL
;
410 access
= rzalloc(ctx
, struct panfrost_bo_access
);
411 util_dynarray_init(&access
->readers
, access
);
412 _mesa_hash_table_insert(ctx
->accessed_bos
, bo
, access
);
413 /* We are the first to access this BO, let's initialize
414 * old_writes to our own access type in that case.
421 if (writes
&& !old_writes
) {
422 /* Previous access was a read and we want to write this BO.
423 * We first need to add explicit deps between our batch and
424 * the previous readers.
426 util_dynarray_foreach(&access
->readers
,
427 struct panfrost_batch_fence
*, reader
) {
428 /* We were already reading the BO, no need to add a dep
429 * on ourself (the acyclic check would complain about
432 if (!(*reader
) || (*reader
)->batch
== batch
)
435 panfrost_batch_add_dep(batch
, *reader
);
437 panfrost_batch_fence_reference(batch
->out_sync
);
440 panfrost_batch_fence_unreference(access
->writer
);
442 /* We now are the new writer. */
443 access
->writer
= batch
->out_sync
;
445 /* Release the previous readers and reset the readers array. */
446 util_dynarray_foreach(&access
->readers
,
447 struct panfrost_batch_fence
*,
451 panfrost_batch_fence_unreference(*reader
);
454 util_dynarray_clear(&access
->readers
);
455 } else if (writes
&& old_writes
) {
456 /* First check if we were the previous writer, in that case
457 * there's nothing to do. Otherwise we need to add a
458 * dependency between the new writer and the old one.
460 if (access
->writer
!= batch
->out_sync
) {
461 if (access
->writer
) {
462 panfrost_batch_add_dep(batch
, access
->writer
);
463 panfrost_batch_fence_unreference(access
->writer
);
465 panfrost_batch_fence_reference(batch
->out_sync
);
466 access
->writer
= batch
->out_sync
;
468 } else if (!writes
&& old_writes
) {
469 /* First check if we were the previous writer, in that case
470 * we want to keep the access type unchanged, as a write is
471 * more constraining than a read.
473 if (access
->writer
!= batch
->out_sync
) {
474 /* Add a dependency on the previous writer. */
475 panfrost_batch_add_dep(batch
, access
->writer
);
477 /* The previous access was a write, there's no reason
478 * to have entries in the readers array.
480 assert(!util_dynarray_num_elements(&access
->readers
,
481 struct panfrost_batch_fence
*));
483 /* Add ourselves to the readers array. */
484 panfrost_batch_fence_reference(batch
->out_sync
);
485 util_dynarray_append(&access
->readers
,
486 struct panfrost_batch_fence
*,
488 access
->writer
= NULL
;
491 /* We already accessed this BO before, so we should already be
492 * in the reader array.
494 #ifdef PAN_BATCH_DEBUG
495 if (already_accessed
) {
496 assert(panfrost_batch_in_readers(batch
, access
));
501 /* Previous access was a read and we want to read this BO.
502 * Add ourselves to the readers array and add a dependency on
503 * the previous writer if any.
505 panfrost_batch_fence_reference(batch
->out_sync
);
506 util_dynarray_append(&access
->readers
,
507 struct panfrost_batch_fence
*,
511 panfrost_batch_add_dep(batch
, access
->writer
);
516 panfrost_batch_add_bo(struct panfrost_batch
*batch
, struct panfrost_bo
*bo
,
522 struct hash_entry
*entry
;
523 uint32_t old_flags
= 0;
525 entry
= _mesa_hash_table_search(batch
->bos
, bo
);
527 entry
= _mesa_hash_table_insert(batch
->bos
, bo
,
528 (void *)(uintptr_t)flags
);
529 panfrost_bo_reference(bo
);
531 old_flags
= (uintptr_t)entry
->data
;
533 /* All batches have to agree on the shared flag. */
534 assert((old_flags
& PAN_BO_ACCESS_SHARED
) ==
535 (flags
& PAN_BO_ACCESS_SHARED
));
540 if (old_flags
== flags
)
544 entry
->data
= (void *)(uintptr_t)flags
;
546 /* If this is not a shared BO, we don't really care about dependency
549 if (!(flags
& PAN_BO_ACCESS_SHARED
))
552 /* All dependencies should have been flushed before we execute the
553 * wallpaper draw, so it should be harmless to skip the
554 * update_bo_access() call.
556 if (batch
== batch
->ctx
->wallpaper_batch
)
559 assert(flags
& PAN_BO_ACCESS_RW
);
560 panfrost_batch_update_bo_access(batch
, bo
, flags
& PAN_BO_ACCESS_WRITE
,
565 panfrost_batch_add_resource_bos(struct panfrost_batch
*batch
,
566 struct panfrost_resource
*rsrc
,
569 panfrost_batch_add_bo(batch
, rsrc
->bo
, flags
);
571 for (unsigned i
= 0; i
< MAX_MIP_LEVELS
; i
++)
572 if (rsrc
->slices
[i
].checksum_bo
)
573 panfrost_batch_add_bo(batch
, rsrc
->slices
[i
].checksum_bo
, flags
);
575 if (rsrc
->separate_stencil
)
576 panfrost_batch_add_bo(batch
, rsrc
->separate_stencil
->bo
, flags
);
580 panfrost_batch_add_fbo_bos(struct panfrost_batch
*batch
)
582 uint32_t flags
= PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_WRITE
|
583 PAN_BO_ACCESS_VERTEX_TILER
|
584 PAN_BO_ACCESS_FRAGMENT
;
586 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; ++i
) {
587 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
588 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
591 if (batch
->key
.zsbuf
) {
592 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.zsbuf
->texture
);
593 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
598 panfrost_batch_create_bo(struct panfrost_batch
*batch
, size_t size
,
599 uint32_t create_flags
, uint32_t access_flags
)
601 struct panfrost_bo
*bo
;
603 bo
= panfrost_bo_create(pan_device(batch
->ctx
->base
.screen
), size
,
605 panfrost_batch_add_bo(batch
, bo
, access_flags
);
607 /* panfrost_batch_add_bo() has retained a reference and
608 * panfrost_bo_create() initialize the refcnt to 1, so let's
609 * unreference the BO here so it gets released when the batch is
610 * destroyed (unless it's retained by someone else in the meantime).
612 panfrost_bo_unreference(bo
);
616 /* Returns the polygon list's GPU address if available, or otherwise allocates
617 * the polygon list. It's perfectly fast to use allocate/free BO directly,
618 * since we'll hit the BO cache and this is one-per-batch anyway. */
621 panfrost_batch_get_polygon_list(struct panfrost_batch
*batch
, unsigned size
)
623 if (batch
->polygon_list
) {
624 assert(batch
->polygon_list
->size
>= size
);
626 /* Create the BO as invisible, as there's no reason to map */
627 size
= util_next_power_of_two(size
);
629 batch
->polygon_list
= panfrost_batch_create_bo(batch
, size
,
631 PAN_BO_ACCESS_PRIVATE
|
633 PAN_BO_ACCESS_VERTEX_TILER
|
634 PAN_BO_ACCESS_FRAGMENT
);
637 return batch
->polygon_list
->gpu
;
641 panfrost_batch_get_scratchpad(struct panfrost_batch
*batch
,
643 unsigned thread_tls_alloc
,
646 unsigned size
= panfrost_get_total_stack_size(shift
,
650 if (batch
->scratchpad
) {
651 assert(batch
->scratchpad
->size
>= size
);
653 batch
->scratchpad
= panfrost_batch_create_bo(batch
, size
,
655 PAN_BO_ACCESS_PRIVATE
|
657 PAN_BO_ACCESS_VERTEX_TILER
|
658 PAN_BO_ACCESS_FRAGMENT
);
661 return batch
->scratchpad
;
665 panfrost_batch_get_shared_memory(struct panfrost_batch
*batch
,
667 unsigned workgroup_count
)
669 if (batch
->shared_memory
) {
670 assert(batch
->shared_memory
->size
>= size
);
672 batch
->shared_memory
= panfrost_batch_create_bo(batch
, size
,
674 PAN_BO_ACCESS_PRIVATE
|
676 PAN_BO_ACCESS_VERTEX_TILER
);
679 return batch
->shared_memory
;
683 panfrost_batch_get_tiler_heap(struct panfrost_batch
*batch
)
685 if (batch
->tiler_heap
)
686 return batch
->tiler_heap
;
688 batch
->tiler_heap
= panfrost_batch_create_bo(batch
, 4096 * 4096,
691 PAN_BO_ACCESS_PRIVATE
|
693 PAN_BO_ACCESS_VERTEX_TILER
|
694 PAN_BO_ACCESS_FRAGMENT
);
695 assert(batch
->tiler_heap
);
696 return batch
->tiler_heap
;
700 panfrost_batch_get_tiler_meta(struct panfrost_batch
*batch
, unsigned vertex_count
)
705 if (batch
->tiler_meta
)
706 return batch
->tiler_meta
;
708 struct panfrost_bo
*tiler_heap
;
709 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
711 struct bifrost_tiler_heap_meta tiler_heap_meta
= {
712 .heap_size
= tiler_heap
->size
,
713 .tiler_heap_start
= tiler_heap
->gpu
,
714 .tiler_heap_free
= tiler_heap
->gpu
,
715 .tiler_heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
,
717 .unk7e007e
= 0x7e007e,
720 struct bifrost_tiler_meta tiler_meta
= {
721 .hierarchy_mask
= 0x28,
723 .width
= MALI_POSITIVE(batch
->key
.width
),
724 .height
= MALI_POSITIVE(batch
->key
.height
),
725 .tiler_heap_meta
= panfrost_pool_upload(&batch
->pool
, &tiler_heap_meta
, sizeof(tiler_heap_meta
)),
728 batch
->tiler_meta
= panfrost_pool_upload(&batch
->pool
, &tiler_meta
, sizeof(tiler_meta
));
729 return batch
->tiler_meta
;
733 panfrost_batch_get_tiler_dummy(struct panfrost_batch
*batch
)
735 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
737 uint32_t create_flags
= 0;
739 if (batch
->tiler_dummy
)
740 return batch
->tiler_dummy
;
742 if (!(dev
->quirks
& MIDGARD_NO_HIER_TILING
))
743 create_flags
= PAN_BO_INVISIBLE
;
745 batch
->tiler_dummy
= panfrost_batch_create_bo(batch
, 4096,
747 PAN_BO_ACCESS_PRIVATE
|
749 PAN_BO_ACCESS_VERTEX_TILER
|
750 PAN_BO_ACCESS_FRAGMENT
);
751 assert(batch
->tiler_dummy
);
752 return batch
->tiler_dummy
;
756 panfrost_batch_reserve_framebuffer(struct panfrost_batch
*batch
)
758 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
760 /* If we haven't, reserve space for the framebuffer */
762 if (!batch
->framebuffer
.gpu
) {
763 unsigned size
= (dev
->quirks
& MIDGARD_SFBD
) ?
764 sizeof(struct mali_single_framebuffer
) :
765 sizeof(struct mali_framebuffer
);
767 batch
->framebuffer
= panfrost_pool_alloc(&batch
->pool
, size
);
769 /* Tag the pointer */
770 if (!(dev
->quirks
& MIDGARD_SFBD
))
771 batch
->framebuffer
.gpu
|= MALI_MFBD
;
774 return batch
->framebuffer
.gpu
;
780 panfrost_load_surface(struct panfrost_batch
*batch
, struct pipe_surface
*surf
, unsigned loc
)
785 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
786 unsigned level
= surf
->u
.tex
.level
;
788 if (!rsrc
->slices
[level
].initialized
)
791 if (!rsrc
->damage
.inverted_len
)
794 /* Clamp the rendering area to the damage extent. The
795 * KHR_partial_update() spec states that trying to render outside of
796 * the damage region is "undefined behavior", so we should be safe.
798 unsigned damage_width
= (rsrc
->damage
.extent
.maxx
- rsrc
->damage
.extent
.minx
);
799 unsigned damage_height
= (rsrc
->damage
.extent
.maxy
- rsrc
->damage
.extent
.miny
);
801 if (damage_width
&& damage_height
) {
802 panfrost_batch_intersection_scissor(batch
,
803 rsrc
->damage
.extent
.minx
,
804 rsrc
->damage
.extent
.miny
,
805 rsrc
->damage
.extent
.maxx
,
806 rsrc
->damage
.extent
.maxy
);
809 /* XXX: Native blits on Bifrost */
810 if (batch
->pool
.dev
->quirks
& IS_BIFROST
) {
811 if (loc
!= FRAG_RESULT_DATA0
)
814 /* XXX: why align on *twice* the tile length? */
815 batch
->minx
= batch
->minx
& ~((MALI_TILE_LENGTH
* 2) - 1);
816 batch
->miny
= batch
->miny
& ~((MALI_TILE_LENGTH
* 2) - 1);
817 batch
->maxx
= MIN2(ALIGN_POT(batch
->maxx
, MALI_TILE_LENGTH
* 2),
819 batch
->maxy
= MIN2(ALIGN_POT(batch
->maxy
, MALI_TILE_LENGTH
* 2),
822 struct pipe_box rect
;
823 batch
->ctx
->wallpaper_batch
= batch
;
824 u_box_2d(batch
->minx
, batch
->miny
, batch
->maxx
- batch
->minx
,
825 batch
->maxy
- batch
->miny
, &rect
);
826 panfrost_blit_wallpaper(batch
->ctx
, &rect
);
827 batch
->ctx
->wallpaper_batch
= NULL
;
831 enum pipe_format format
= rsrc
->base
.format
;
833 if (loc
== FRAG_RESULT_DEPTH
) {
834 if (!util_format_has_depth(util_format_description(format
)))
837 format
= util_format_get_depth_only(format
);
838 } else if (loc
== FRAG_RESULT_STENCIL
) {
839 if (!util_format_has_stencil(util_format_description(format
)))
842 if (rsrc
->separate_stencil
) {
843 rsrc
= rsrc
->separate_stencil
;
844 format
= rsrc
->base
.format
;
847 format
= util_format_stencil_only(format
);
850 enum mali_texture_dimension dim
=
851 panfrost_translate_texture_dimension(rsrc
->base
.target
);
853 struct pan_image img
= {
854 .width0
= rsrc
->base
.width0
,
855 .height0
= rsrc
->base
.height0
,
856 .depth0
= rsrc
->base
.depth0
,
859 .modifier
= rsrc
->modifier
,
860 .array_size
= rsrc
->base
.array_size
,
861 .first_level
= level
,
863 .first_layer
= surf
->u
.tex
.first_layer
,
864 .last_layer
= surf
->u
.tex
.last_layer
,
865 .nr_samples
= rsrc
->base
.nr_samples
,
866 .cubemap_stride
= rsrc
->cubemap_stride
,
868 .slices
= rsrc
->slices
871 mali_ptr blend_shader
= 0;
873 if (loc
>= FRAG_RESULT_DATA0
&& !panfrost_can_fixed_blend(rsrc
->base
.format
)) {
874 struct panfrost_blend_shader
*b
=
875 panfrost_get_blend_shader(batch
->ctx
, &batch
->ctx
->blit_blend
, rsrc
->base
.format
, loc
- FRAG_RESULT_DATA0
);
877 struct panfrost_bo
*bo
= panfrost_batch_create_bo(batch
, b
->size
,
879 PAN_BO_ACCESS_PRIVATE
|
881 PAN_BO_ACCESS_FRAGMENT
);
883 memcpy(bo
->cpu
, b
->buffer
, b
->size
);
884 assert(b
->work_count
<= 4);
886 blend_shader
= bo
->gpu
| b
->first_tag
;
889 struct panfrost_transfer transfer
= panfrost_pool_alloc(&batch
->pool
,
890 4 * 4 * 6 * rsrc
->damage
.inverted_len
);
892 for (unsigned i
= 0; i
< rsrc
->damage
.inverted_len
; ++i
) {
893 float *o
= (float *) (transfer
.cpu
+ (4 * 4 * 6 * i
));
894 struct pan_rect r
= rsrc
->damage
.inverted_rects
[i
];
897 r
.minx
, rsrc
->base
.height0
- r
.miny
, 0.0, 1.0,
898 r
.maxx
, rsrc
->base
.height0
- r
.miny
, 0.0, 1.0,
899 r
.minx
, rsrc
->base
.height0
- r
.maxy
, 0.0, 1.0,
901 r
.maxx
, rsrc
->base
.height0
- r
.miny
, 0.0, 1.0,
902 r
.minx
, rsrc
->base
.height0
- r
.maxy
, 0.0, 1.0,
903 r
.maxx
, rsrc
->base
.height0
- r
.maxy
, 0.0, 1.0,
906 assert(sizeof(rect
) == 4 * 4 * 6);
907 memcpy(o
, rect
, sizeof(rect
));
910 panfrost_load_midg(&batch
->pool
, &batch
->scoreboard
,
912 batch
->framebuffer
.gpu
, transfer
.gpu
,
913 rsrc
->damage
.inverted_len
* 6,
916 panfrost_batch_add_bo(batch
, batch
->pool
.dev
->blit_shaders
.bo
,
917 PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_READ
| PAN_BO_ACCESS_FRAGMENT
);
921 panfrost_batch_draw_wallpaper(struct panfrost_batch
*batch
)
923 panfrost_batch_reserve_framebuffer(batch
);
925 /* Assume combined. If either depth or stencil is written, they will
926 * both be written so we need to be careful for reloading */
928 unsigned draws
= batch
->draws
;
930 if (draws
& PIPE_CLEAR_DEPTHSTENCIL
)
931 draws
|= PIPE_CLEAR_DEPTHSTENCIL
;
933 /* Mask of buffers which need reload since they are not cleared and
934 * they are drawn. (If they are cleared, reload is useless; if they are
935 * not drawn and also not cleared, we can generally omit the attachment
936 * at the framebuffer descriptor level */
938 unsigned reload
= ~batch
->clear
& draws
;
940 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; ++i
) {
941 if (reload
& (PIPE_CLEAR_COLOR0
<< i
))
942 panfrost_load_surface(batch
, batch
->key
.cbufs
[i
], FRAG_RESULT_DATA0
+ i
);
945 if (reload
& PIPE_CLEAR_DEPTH
)
946 panfrost_load_surface(batch
, batch
->key
.zsbuf
, FRAG_RESULT_DEPTH
);
948 if (reload
& PIPE_CLEAR_STENCIL
)
949 panfrost_load_surface(batch
, batch
->key
.zsbuf
, FRAG_RESULT_STENCIL
);
953 panfrost_batch_record_bo(struct hash_entry
*entry
, unsigned *bo_handles
, unsigned idx
)
955 struct panfrost_bo
*bo
= (struct panfrost_bo
*)entry
->key
;
956 uint32_t flags
= (uintptr_t)entry
->data
;
958 assert(bo
->gem_handle
> 0);
959 bo_handles
[idx
] = bo
->gem_handle
;
961 /* Update the BO access flags so that panfrost_bo_wait() knows
962 * about all pending accesses.
963 * We only keep the READ/WRITE info since this is all the BO
964 * wait logic cares about.
965 * We also preserve existing flags as this batch might not
966 * be the first one to access the BO.
968 bo
->gpu_access
|= flags
& (PAN_BO_ACCESS_RW
);
972 panfrost_batch_submit_ioctl(struct panfrost_batch
*batch
,
973 mali_ptr first_job_desc
,
977 struct panfrost_context
*ctx
= batch
->ctx
;
978 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
979 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
980 struct drm_panfrost_submit submit
= {0,};
981 uint32_t *bo_handles
;
984 /* If we trace, we always need a syncobj, so make one of our own if we
985 * weren't given one to use. Remember that we did so, so we can free it
986 * after we're done but preventing double-frees if we were given a
989 bool our_sync
= false;
991 if (!out_sync
&& dev
->debug
& (PAN_DBG_TRACE
| PAN_DBG_SYNC
)) {
992 drmSyncobjCreate(dev
->fd
, 0, &out_sync
);
996 submit
.out_sync
= out_sync
;
997 submit
.jc
= first_job_desc
;
998 submit
.requirements
= reqs
;
1000 bo_handles
= calloc(batch
->pool
.bos
->entries
+ batch
->invisible_pool
.bos
->entries
+ batch
->bos
->entries
, sizeof(*bo_handles
));
1003 hash_table_foreach(batch
->bos
, entry
)
1004 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
1006 hash_table_foreach(batch
->pool
.bos
, entry
)
1007 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
1009 hash_table_foreach(batch
->invisible_pool
.bos
, entry
)
1010 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
1012 submit
.bo_handles
= (u64
) (uintptr_t) bo_handles
;
1013 ret
= drmIoctl(dev
->fd
, DRM_IOCTL_PANFROST_SUBMIT
, &submit
);
1017 if (dev
->debug
& PAN_DBG_MSGS
)
1018 fprintf(stderr
, "Error submitting: %m\n");
1023 /* Trace the job if we're doing that */
1024 if (dev
->debug
& (PAN_DBG_TRACE
| PAN_DBG_SYNC
)) {
1025 /* Wait so we can get errors reported back */
1026 drmSyncobjWait(dev
->fd
, &out_sync
, 1,
1027 INT64_MAX
, 0, NULL
);
1029 /* Trace gets priority over sync */
1030 bool minimal
= !(dev
->debug
& PAN_DBG_TRACE
);
1031 pandecode_jc(submit
.jc
, dev
->quirks
& IS_BIFROST
, dev
->gpu_id
, minimal
);
1034 /* Cleanup if we created the syncobj */
1036 drmSyncobjDestroy(dev
->fd
, out_sync
);
1041 /* Submit both vertex/tiler and fragment jobs for a batch, possibly with an
1042 * outsync corresponding to the later of the two (since there will be an
1043 * implicit dep between them) */
1046 panfrost_batch_submit_jobs(struct panfrost_batch
*batch
, uint32_t out_sync
)
1048 bool has_draws
= batch
->scoreboard
.first_job
;
1049 bool has_frag
= batch
->scoreboard
.tiler_dep
|| batch
->clear
;
1053 ret
= panfrost_batch_submit_ioctl(batch
, batch
->scoreboard
.first_job
,
1054 0, has_frag
? 0 : out_sync
);
1059 /* Whether we program the fragment job for draws or not depends
1060 * on whether there is any *tiler* activity (so fragment
1061 * shaders). If there are draws but entirely RASTERIZER_DISCARD
1062 * (say, for transform feedback), we want a fragment job that
1063 * *only* clears, since otherwise the tiler structures will be
1064 * uninitialized leading to faults (or state leaks) */
1066 mali_ptr fragjob
= panfrost_fragment_job(batch
,
1067 batch
->scoreboard
.tiler_dep
!= 0);
1068 ret
= panfrost_batch_submit_ioctl(batch
, fragjob
,
1069 PANFROST_JD_REQ_FS
, out_sync
);
1077 panfrost_batch_submit(struct panfrost_batch
*batch
, uint32_t out_sync
)
1080 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
1082 /* Submit the dependencies first. Don't pass along the out_sync since
1083 * they are guaranteed to terminate sooner */
1084 util_dynarray_foreach(&batch
->dependencies
,
1085 struct panfrost_batch_fence
*, dep
) {
1087 panfrost_batch_submit((*dep
)->batch
, 0);
1092 /* Nothing to do! */
1093 if (!batch
->scoreboard
.first_job
&& !batch
->clear
) {
1095 drmSyncobjSignal(dev
->fd
, &out_sync
, 1);
1099 panfrost_batch_draw_wallpaper(batch
);
1101 /* Now that all draws are in, we can finally prepare the
1102 * FBD for the batch */
1104 if (batch
->framebuffer
.gpu
&& batch
->scoreboard
.first_job
) {
1105 struct panfrost_context
*ctx
= batch
->ctx
;
1106 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1107 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
1109 if (dev
->quirks
& MIDGARD_SFBD
)
1110 panfrost_attach_sfbd(batch
, ~0);
1112 panfrost_attach_mfbd(batch
, ~0);
1115 mali_ptr polygon_list
= panfrost_batch_get_polygon_list(batch
,
1116 MALI_TILER_MINIMUM_HEADER_SIZE
);
1118 panfrost_scoreboard_initialize_tiler(&batch
->pool
, &batch
->scoreboard
, polygon_list
);
1120 ret
= panfrost_batch_submit_jobs(batch
, out_sync
);
1122 if (ret
&& dev
->debug
& PAN_DBG_MSGS
)
1123 fprintf(stderr
, "panfrost_batch_submit failed: %d\n", ret
);
1125 /* We must reset the damage info of our render targets here even
1126 * though a damage reset normally happens when the DRI layer swaps
1127 * buffers. That's because there can be implicit flushes the GL
1128 * app is not aware of, and those might impact the damage region: if
1129 * part of the damaged portion is drawn during those implicit flushes,
1130 * you have to reload those areas before next draws are pushed, and
1131 * since the driver can't easily know what's been modified by the draws
1132 * it flushed, the easiest solution is to reload everything.
1134 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; i
++) {
1135 if (!batch
->key
.cbufs
[i
])
1138 panfrost_resource_set_damage_region(NULL
,
1139 batch
->key
.cbufs
[i
]->texture
, 0, NULL
);
1143 panfrost_freeze_batch(batch
);
1144 panfrost_free_batch(batch
);
1147 /* Submit all batches, applying the out_sync to the currently bound batch */
1150 panfrost_flush_all_batches(struct panfrost_context
*ctx
, uint32_t out_sync
)
1152 struct panfrost_batch
*batch
= panfrost_get_batch_for_fbo(ctx
);
1153 panfrost_batch_submit(batch
, out_sync
);
1155 hash_table_foreach(ctx
->batches
, hentry
) {
1156 struct panfrost_batch
*batch
= hentry
->data
;
1159 panfrost_batch_submit(batch
, 0);
1162 assert(!ctx
->batches
->entries
);
1164 /* Collect batch fences before returning */
1165 panfrost_gc_fences(ctx
);
1169 panfrost_pending_batches_access_bo(struct panfrost_context
*ctx
,
1170 const struct panfrost_bo
*bo
)
1172 struct panfrost_bo_access
*access
;
1173 struct hash_entry
*hentry
;
1175 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1176 access
= hentry
? hentry
->data
: NULL
;
1180 if (access
->writer
&& access
->writer
->batch
)
1183 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1185 if (*reader
&& (*reader
)->batch
)
1192 /* We always flush writers. We might also need to flush readers */
1195 panfrost_flush_batches_accessing_bo(struct panfrost_context
*ctx
,
1196 struct panfrost_bo
*bo
,
1199 struct panfrost_bo_access
*access
;
1200 struct hash_entry
*hentry
;
1202 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1203 access
= hentry
? hentry
->data
: NULL
;
1207 if (access
->writer
&& access
->writer
->batch
)
1208 panfrost_batch_submit(access
->writer
->batch
, 0);
1213 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1215 if (*reader
&& (*reader
)->batch
)
1216 panfrost_batch_submit((*reader
)->batch
, 0);
1221 panfrost_batch_set_requirements(struct panfrost_batch
*batch
)
1223 struct panfrost_context
*ctx
= batch
->ctx
;
1225 if (ctx
->rasterizer
->base
.multisample
)
1226 batch
->requirements
|= PAN_REQ_MSAA
;
1228 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->base
.depth
.writemask
) {
1229 batch
->requirements
|= PAN_REQ_DEPTH_WRITE
;
1230 batch
->draws
|= PIPE_CLEAR_DEPTH
;
1233 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->base
.stencil
[0].enabled
)
1234 batch
->draws
|= PIPE_CLEAR_STENCIL
;
1238 panfrost_batch_adjust_stack_size(struct panfrost_batch
*batch
)
1240 struct panfrost_context
*ctx
= batch
->ctx
;
1242 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1243 struct panfrost_shader_state
*ss
;
1245 ss
= panfrost_get_shader_state(ctx
, i
);
1249 batch
->stack_size
= MAX2(batch
->stack_size
, ss
->stack_size
);
1253 /* Helper to smear a 32-bit color across 128-bit components */
1256 pan_pack_color_32(uint32_t *packed
, uint32_t v
)
1258 for (unsigned i
= 0; i
< 4; ++i
)
1263 pan_pack_color_64(uint32_t *packed
, uint32_t lo
, uint32_t hi
)
1265 for (unsigned i
= 0; i
< 4; i
+= 2) {
1272 pan_pack_color(uint32_t *packed
, const union pipe_color_union
*color
, enum pipe_format format
)
1274 /* Alpha magicked to 1.0 if there is no alpha */
1276 bool has_alpha
= util_format_has_alpha(format
);
1277 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
1279 /* Packed color depends on the framebuffer format */
1281 const struct util_format_description
*desc
=
1282 util_format_description(format
);
1284 if (util_format_is_rgba8_variant(desc
) && desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
1285 pan_pack_color_32(packed
,
1286 ((uint32_t) float_to_ubyte(clear_alpha
) << 24) |
1287 ((uint32_t) float_to_ubyte(color
->f
[2]) << 16) |
1288 ((uint32_t) float_to_ubyte(color
->f
[1]) << 8) |
1289 ((uint32_t) float_to_ubyte(color
->f
[0]) << 0));
1290 } else if (format
== PIPE_FORMAT_B5G6R5_UNORM
) {
1291 /* First, we convert the components to R5, G6, B5 separately */
1292 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1293 unsigned g6
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 63.0);
1294 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1296 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1297 pan_pack_color_32(packed
, (b5
<< 25) | (g6
<< 14) | (r5
<< 5));
1298 } else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
) {
1299 /* Convert to 4-bits */
1300 unsigned r4
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 15.0);
1301 unsigned g4
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 15.0);
1302 unsigned b4
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 15.0);
1303 unsigned a4
= _mesa_roundevenf(SATURATE(clear_alpha
) * 15.0);
1305 /* Pack on *byte* intervals */
1306 pan_pack_color_32(packed
, (a4
<< 28) | (b4
<< 20) | (g4
<< 12) | (r4
<< 4));
1307 } else if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
) {
1308 /* Scale as expected but shift oddly */
1309 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1310 unsigned g5
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 31.0);
1311 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1312 unsigned a1
= _mesa_roundevenf(SATURATE(clear_alpha
) * 1.0);
1314 pan_pack_color_32(packed
, (a1
<< 31) | (b5
<< 25) | (g5
<< 15) | (r5
<< 5));
1316 /* Otherwise, it's generic subject to replication */
1318 union util_color out
= { 0 };
1319 unsigned size
= util_format_get_blocksize(format
);
1321 util_pack_color(color
->f
, format
, &out
);
1324 unsigned b
= out
.ui
[0];
1325 unsigned s
= b
| (b
<< 8);
1326 pan_pack_color_32(packed
, s
| (s
<< 16));
1327 } else if (size
== 2)
1328 pan_pack_color_32(packed
, out
.ui
[0] | (out
.ui
[0] << 16));
1329 else if (size
== 3 || size
== 4)
1330 pan_pack_color_32(packed
, out
.ui
[0]);
1332 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1] | (out
.ui
[1] << 16)); /* RGB16F -- RGBB */
1334 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1]);
1335 else if (size
== 16)
1336 memcpy(packed
, out
.ui
, 16);
1338 unreachable("Unknown generic format size packing clear colour");
1343 panfrost_batch_clear(struct panfrost_batch
*batch
,
1345 const union pipe_color_union
*color
,
1346 double depth
, unsigned stencil
)
1348 struct panfrost_context
*ctx
= batch
->ctx
;
1350 if (buffers
& PIPE_CLEAR_COLOR
) {
1351 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; ++i
) {
1352 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1355 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
1356 pan_pack_color(batch
->clear_color
[i
], color
, format
);
1360 if (buffers
& PIPE_CLEAR_DEPTH
) {
1361 batch
->clear_depth
= depth
;
1364 if (buffers
& PIPE_CLEAR_STENCIL
) {
1365 batch
->clear_stencil
= stencil
;
1368 batch
->clear
|= buffers
;
1370 /* Clearing affects the entire framebuffer (by definition -- this is
1371 * the Gallium clear callback, which clears the whole framebuffer. If
1372 * the scissor test were enabled from the GL side, the gallium frontend
1373 * would emit a quad instead and we wouldn't go down this code path) */
1375 panfrost_batch_union_scissor(batch
, 0, 0,
1376 ctx
->pipe_framebuffer
.width
,
1377 ctx
->pipe_framebuffer
.height
);
1381 panfrost_batch_compare(const void *a
, const void *b
)
1383 return util_framebuffer_state_equal(a
, b
);
1387 panfrost_batch_hash(const void *key
)
1389 return _mesa_hash_data(key
, sizeof(struct pipe_framebuffer_state
));
1392 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1393 * new and old bounding rectangles */
1396 panfrost_batch_union_scissor(struct panfrost_batch
*batch
,
1397 unsigned minx
, unsigned miny
,
1398 unsigned maxx
, unsigned maxy
)
1400 batch
->minx
= MIN2(batch
->minx
, minx
);
1401 batch
->miny
= MIN2(batch
->miny
, miny
);
1402 batch
->maxx
= MAX2(batch
->maxx
, maxx
);
1403 batch
->maxy
= MAX2(batch
->maxy
, maxy
);
1407 panfrost_batch_intersection_scissor(struct panfrost_batch
*batch
,
1408 unsigned minx
, unsigned miny
,
1409 unsigned maxx
, unsigned maxy
)
1411 batch
->minx
= MAX2(batch
->minx
, minx
);
1412 batch
->miny
= MAX2(batch
->miny
, miny
);
1413 batch
->maxx
= MIN2(batch
->maxx
, maxx
);
1414 batch
->maxy
= MIN2(batch
->maxy
, maxy
);
1417 /* Are we currently rendering to the dev (rather than an FBO)? */
1420 panfrost_batch_is_scanout(struct panfrost_batch
*batch
)
1422 /* If there is no color buffer, it's an FBO */
1423 if (batch
->key
.nr_cbufs
!= 1)
1426 /* If we're too early that no framebuffer was sent, it's scanout */
1427 if (!batch
->key
.cbufs
[0])
1430 return batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
1431 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
1432 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
1436 panfrost_batch_init(struct panfrost_context
*ctx
)
1438 ctx
->batches
= _mesa_hash_table_create(ctx
,
1439 panfrost_batch_hash
,
1440 panfrost_batch_compare
);
1441 ctx
->accessed_bos
= _mesa_hash_table_create(ctx
, _mesa_hash_pointer
,
1442 _mesa_key_pointer_equal
);