panfrost: Pre-allocate memory for pool
[mesa.git] / src / gallium / drivers / panfrost / pan_job.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include <assert.h>
27
28 #include "drm-uapi/panfrost_drm.h"
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
36 #include "util/rounding.h"
37 #include "pan_util.h"
38 #include "pan_blending.h"
39 #include "decode.h"
40 #include "panfrost-quirks.h"
41
42 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
43 * and build a proper dependency graph such that batches can be pipelined for
44 * better GPU utilization.
45 *
46 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
47 * A BO is either being written or read at any time (see if writer != NULL).
48 * When the last access is a write, the batch writing the BO might have read
49 * dependencies (readers that have not been executed yet and want to read the
50 * previous BO content), and when the last access is a read, all readers might
51 * depend on another batch to push its results to memory. That's what the
52 * readers/writers keep track off.
53 * There can only be one writer at any given time, if a new batch wants to
54 * write to the same BO, a dependency will be added between the new writer and
55 * the old writer (at the batch level), and panfrost_bo_access->writer will be
56 * updated to point to the new writer.
57 */
58 struct panfrost_bo_access {
59 struct util_dynarray readers;
60 struct panfrost_batch_fence *writer;
61 };
62
63 static struct panfrost_batch_fence *
64 panfrost_create_batch_fence(struct panfrost_batch *batch)
65 {
66 struct panfrost_batch_fence *fence;
67
68 fence = rzalloc(NULL, struct panfrost_batch_fence);
69 assert(fence);
70 pipe_reference_init(&fence->reference, 1);
71 fence->batch = batch;
72
73 return fence;
74 }
75
76 static void
77 panfrost_free_batch_fence(struct panfrost_batch_fence *fence)
78 {
79 ralloc_free(fence);
80 }
81
82 void
83 panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence)
84 {
85 if (pipe_reference(&fence->reference, NULL))
86 panfrost_free_batch_fence(fence);
87 }
88
89 void
90 panfrost_batch_fence_reference(struct panfrost_batch_fence *fence)
91 {
92 pipe_reference(NULL, &fence->reference);
93 }
94
95 static void
96 panfrost_batch_add_fbo_bos(struct panfrost_batch *batch);
97
98 static struct panfrost_batch *
99 panfrost_create_batch(struct panfrost_context *ctx,
100 const struct pipe_framebuffer_state *key)
101 {
102 struct panfrost_batch *batch = rzalloc(ctx, struct panfrost_batch);
103
104 batch->ctx = ctx;
105
106 batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
107 _mesa_key_pointer_equal);
108
109 batch->minx = batch->miny = ~0;
110 batch->maxx = batch->maxy = 0;
111
112 batch->out_sync = panfrost_create_batch_fence(batch);
113 util_copy_framebuffer_state(&batch->key, key);
114
115 batch->pool = panfrost_create_pool(batch, pan_device(ctx->base.screen), 0, true);
116
117 panfrost_batch_add_fbo_bos(batch);
118
119 return batch;
120 }
121
122 static void
123 panfrost_freeze_batch(struct panfrost_batch *batch)
124 {
125 struct panfrost_context *ctx = batch->ctx;
126 struct hash_entry *entry;
127
128 /* Remove the entry in the FBO -> batch hash table if the batch
129 * matches and drop the context reference. This way, next draws/clears
130 * targeting this FBO will trigger the creation of a new batch.
131 */
132 entry = _mesa_hash_table_search(ctx->batches, &batch->key);
133 if (entry && entry->data == batch)
134 _mesa_hash_table_remove(ctx->batches, entry);
135
136 if (ctx->batch == batch)
137 ctx->batch = NULL;
138 }
139
140 #ifdef PAN_BATCH_DEBUG
141 static bool panfrost_batch_is_frozen(struct panfrost_batch *batch)
142 {
143 struct panfrost_context *ctx = batch->ctx;
144 struct hash_entry *entry;
145
146 entry = _mesa_hash_table_search(ctx->batches, &batch->key);
147 if (entry && entry->data == batch)
148 return false;
149
150 if (ctx->batch == batch)
151 return false;
152
153 return true;
154 }
155 #endif
156
157 static void
158 panfrost_free_batch(struct panfrost_batch *batch)
159 {
160 if (!batch)
161 return;
162
163 #ifdef PAN_BATCH_DEBUG
164 assert(panfrost_batch_is_frozen(batch));
165 #endif
166
167 hash_table_foreach(batch->bos, entry)
168 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
169
170 hash_table_foreach(batch->pool.bos, entry)
171 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
172
173 util_dynarray_foreach(&batch->dependencies,
174 struct panfrost_batch_fence *, dep) {
175 panfrost_batch_fence_unreference(*dep);
176 }
177
178 /* The out_sync fence lifetime is different from the the batch one
179 * since other batches might want to wait on a fence of already
180 * submitted/signaled batch. All we need to do here is make sure the
181 * fence does not point to an invalid batch, which the core will
182 * interpret as 'batch is already submitted'.
183 */
184 batch->out_sync->batch = NULL;
185 panfrost_batch_fence_unreference(batch->out_sync);
186
187 util_unreference_framebuffer_state(&batch->key);
188 ralloc_free(batch);
189 }
190
191 #ifdef PAN_BATCH_DEBUG
192 static bool
193 panfrost_dep_graph_contains_batch(struct panfrost_batch *root,
194 struct panfrost_batch *batch)
195 {
196 if (!root)
197 return false;
198
199 util_dynarray_foreach(&root->dependencies,
200 struct panfrost_batch_fence *, dep) {
201 if ((*dep)->batch == batch ||
202 panfrost_dep_graph_contains_batch((*dep)->batch, batch))
203 return true;
204 }
205
206 return false;
207 }
208 #endif
209
210 static void
211 panfrost_batch_add_dep(struct panfrost_batch *batch,
212 struct panfrost_batch_fence *newdep)
213 {
214 if (batch == newdep->batch)
215 return;
216
217 /* We might want to turn ->dependencies into a set if the number of
218 * deps turns out to be big enough to make this 'is dep already there'
219 * search inefficient.
220 */
221 util_dynarray_foreach(&batch->dependencies,
222 struct panfrost_batch_fence *, dep) {
223 if (*dep == newdep)
224 return;
225 }
226
227 #ifdef PAN_BATCH_DEBUG
228 /* Make sure the dependency graph is acyclic. */
229 assert(!panfrost_dep_graph_contains_batch(newdep->batch, batch));
230 #endif
231
232 panfrost_batch_fence_reference(newdep);
233 util_dynarray_append(&batch->dependencies,
234 struct panfrost_batch_fence *, newdep);
235
236 /* We now have a batch depending on us, let's make sure new draw/clear
237 * calls targeting the same FBO use a new batch object.
238 */
239 if (newdep->batch)
240 panfrost_freeze_batch(newdep->batch);
241 }
242
243 static struct panfrost_batch *
244 panfrost_get_batch(struct panfrost_context *ctx,
245 const struct pipe_framebuffer_state *key)
246 {
247 /* Lookup the job first */
248 struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, key);
249
250 if (entry)
251 return entry->data;
252
253 /* Otherwise, let's create a job */
254
255 struct panfrost_batch *batch = panfrost_create_batch(ctx, key);
256
257 /* Save the created job */
258 _mesa_hash_table_insert(ctx->batches, &batch->key, batch);
259
260 return batch;
261 }
262
263 /* Get the job corresponding to the FBO we're currently rendering into */
264
265 struct panfrost_batch *
266 panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
267 {
268 /* If we're wallpapering, we special case to workaround
269 * u_blitter abuse */
270
271 if (ctx->wallpaper_batch)
272 return ctx->wallpaper_batch;
273
274 /* If we already began rendering, use that */
275
276 if (ctx->batch) {
277 assert(util_framebuffer_state_equal(&ctx->batch->key,
278 &ctx->pipe_framebuffer));
279 return ctx->batch;
280 }
281
282 /* If not, look up the job */
283 struct panfrost_batch *batch = panfrost_get_batch(ctx,
284 &ctx->pipe_framebuffer);
285
286 /* Set this job as the current FBO job. Will be reset when updating the
287 * FB state and when submitting or releasing a job.
288 */
289 ctx->batch = batch;
290 return batch;
291 }
292
293 struct panfrost_batch *
294 panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
295 {
296 struct panfrost_batch *batch;
297
298 batch = panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
299
300 /* The batch has no draw/clear queued, let's return it directly.
301 * Note that it's perfectly fine to re-use a batch with an
302 * existing clear, we'll just update it with the new clear request.
303 */
304 if (!batch->scoreboard.first_job)
305 return batch;
306
307 /* Otherwise, we need to freeze the existing one and instantiate a new
308 * one.
309 */
310 panfrost_freeze_batch(batch);
311 return panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
312 }
313
314 static void
315 panfrost_bo_access_gc_fences(struct panfrost_context *ctx,
316 struct panfrost_bo_access *access,
317 const struct panfrost_bo *bo)
318 {
319 if (access->writer) {
320 panfrost_batch_fence_unreference(access->writer);
321 access->writer = NULL;
322 }
323
324 struct panfrost_batch_fence **readers_array = util_dynarray_begin(&access->readers);
325 struct panfrost_batch_fence **new_readers = readers_array;
326
327 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
328 reader) {
329 if (!(*reader))
330 continue;
331
332 panfrost_batch_fence_unreference(*reader);
333 *reader = NULL;
334 }
335
336 if (!util_dynarray_resize(&access->readers, struct panfrost_batch_fence *,
337 new_readers - readers_array) &&
338 new_readers != readers_array)
339 unreachable("Invalid dynarray access->readers");
340 }
341
342 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
343 * idea is to collect those signaled fences at the end of each flush_all
344 * call. This function is likely to collect only fences from previous
345 * batch flushes not the one that have just have just been submitted and
346 * are probably still in flight when we trigger the garbage collection.
347 * Anyway, we need to do this garbage collection at some point if we don't
348 * want the BO access map to keep invalid entries around and retain
349 * syncobjs forever.
350 */
351 static void
352 panfrost_gc_fences(struct panfrost_context *ctx)
353 {
354 hash_table_foreach(ctx->accessed_bos, entry) {
355 struct panfrost_bo_access *access = entry->data;
356
357 assert(access);
358 panfrost_bo_access_gc_fences(ctx, access, entry->key);
359 if (!util_dynarray_num_elements(&access->readers,
360 struct panfrost_batch_fence *) &&
361 !access->writer) {
362 ralloc_free(access);
363 _mesa_hash_table_remove(ctx->accessed_bos, entry);
364 }
365 }
366 }
367
368 #ifdef PAN_BATCH_DEBUG
369 static bool
370 panfrost_batch_in_readers(struct panfrost_batch *batch,
371 struct panfrost_bo_access *access)
372 {
373 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
374 reader) {
375 if (*reader && (*reader)->batch == batch)
376 return true;
377 }
378
379 return false;
380 }
381 #endif
382
383 static void
384 panfrost_batch_update_bo_access(struct panfrost_batch *batch,
385 struct panfrost_bo *bo, bool writes,
386 bool already_accessed)
387 {
388 struct panfrost_context *ctx = batch->ctx;
389 struct panfrost_bo_access *access;
390 bool old_writes = false;
391 struct hash_entry *entry;
392
393 entry = _mesa_hash_table_search(ctx->accessed_bos, bo);
394 access = entry ? entry->data : NULL;
395 if (access) {
396 old_writes = access->writer != NULL;
397 } else {
398 access = rzalloc(ctx, struct panfrost_bo_access);
399 util_dynarray_init(&access->readers, access);
400 _mesa_hash_table_insert(ctx->accessed_bos, bo, access);
401 /* We are the first to access this BO, let's initialize
402 * old_writes to our own access type in that case.
403 */
404 old_writes = writes;
405 }
406
407 assert(access);
408
409 if (writes && !old_writes) {
410 /* Previous access was a read and we want to write this BO.
411 * We first need to add explicit deps between our batch and
412 * the previous readers.
413 */
414 util_dynarray_foreach(&access->readers,
415 struct panfrost_batch_fence *, reader) {
416 /* We were already reading the BO, no need to add a dep
417 * on ourself (the acyclic check would complain about
418 * that).
419 */
420 if (!(*reader) || (*reader)->batch == batch)
421 continue;
422
423 panfrost_batch_add_dep(batch, *reader);
424 }
425 panfrost_batch_fence_reference(batch->out_sync);
426
427 if (access->writer)
428 panfrost_batch_fence_unreference(access->writer);
429
430 /* We now are the new writer. */
431 access->writer = batch->out_sync;
432
433 /* Release the previous readers and reset the readers array. */
434 util_dynarray_foreach(&access->readers,
435 struct panfrost_batch_fence *,
436 reader) {
437 if (!*reader)
438 continue;
439 panfrost_batch_fence_unreference(*reader);
440 }
441
442 util_dynarray_clear(&access->readers);
443 } else if (writes && old_writes) {
444 /* First check if we were the previous writer, in that case
445 * there's nothing to do. Otherwise we need to add a
446 * dependency between the new writer and the old one.
447 */
448 if (access->writer != batch->out_sync) {
449 if (access->writer) {
450 panfrost_batch_add_dep(batch, access->writer);
451 panfrost_batch_fence_unreference(access->writer);
452 }
453 panfrost_batch_fence_reference(batch->out_sync);
454 access->writer = batch->out_sync;
455 }
456 } else if (!writes && old_writes) {
457 /* First check if we were the previous writer, in that case
458 * we want to keep the access type unchanged, as a write is
459 * more constraining than a read.
460 */
461 if (access->writer != batch->out_sync) {
462 /* Add a dependency on the previous writer. */
463 panfrost_batch_add_dep(batch, access->writer);
464
465 /* The previous access was a write, there's no reason
466 * to have entries in the readers array.
467 */
468 assert(!util_dynarray_num_elements(&access->readers,
469 struct panfrost_batch_fence *));
470
471 /* Add ourselves to the readers array. */
472 panfrost_batch_fence_reference(batch->out_sync);
473 util_dynarray_append(&access->readers,
474 struct panfrost_batch_fence *,
475 batch->out_sync);
476 access->writer = NULL;
477 }
478 } else {
479 /* We already accessed this BO before, so we should already be
480 * in the reader array.
481 */
482 #ifdef PAN_BATCH_DEBUG
483 if (already_accessed) {
484 assert(panfrost_batch_in_readers(batch, access));
485 return;
486 }
487 #endif
488
489 /* Previous access was a read and we want to read this BO.
490 * Add ourselves to the readers array and add a dependency on
491 * the previous writer if any.
492 */
493 panfrost_batch_fence_reference(batch->out_sync);
494 util_dynarray_append(&access->readers,
495 struct panfrost_batch_fence *,
496 batch->out_sync);
497
498 if (access->writer)
499 panfrost_batch_add_dep(batch, access->writer);
500 }
501 }
502
503 void
504 panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo,
505 uint32_t flags)
506 {
507 if (!bo)
508 return;
509
510 struct hash_entry *entry;
511 uint32_t old_flags = 0;
512
513 entry = _mesa_hash_table_search(batch->bos, bo);
514 if (!entry) {
515 entry = _mesa_hash_table_insert(batch->bos, bo,
516 (void *)(uintptr_t)flags);
517 panfrost_bo_reference(bo);
518 } else {
519 old_flags = (uintptr_t)entry->data;
520
521 /* All batches have to agree on the shared flag. */
522 assert((old_flags & PAN_BO_ACCESS_SHARED) ==
523 (flags & PAN_BO_ACCESS_SHARED));
524 }
525
526 assert(entry);
527
528 if (old_flags == flags)
529 return;
530
531 flags |= old_flags;
532 entry->data = (void *)(uintptr_t)flags;
533
534 /* If this is not a shared BO, we don't really care about dependency
535 * tracking.
536 */
537 if (!(flags & PAN_BO_ACCESS_SHARED))
538 return;
539
540 /* All dependencies should have been flushed before we execute the
541 * wallpaper draw, so it should be harmless to skip the
542 * update_bo_access() call.
543 */
544 if (batch == batch->ctx->wallpaper_batch)
545 return;
546
547 assert(flags & PAN_BO_ACCESS_RW);
548 panfrost_batch_update_bo_access(batch, bo, flags & PAN_BO_ACCESS_WRITE,
549 old_flags != 0);
550 }
551
552 static void
553 panfrost_batch_add_resource_bos(struct panfrost_batch *batch,
554 struct panfrost_resource *rsrc,
555 uint32_t flags)
556 {
557 panfrost_batch_add_bo(batch, rsrc->bo, flags);
558
559 for (unsigned i = 0; i < MAX_MIP_LEVELS; i++)
560 if (rsrc->slices[i].checksum_bo)
561 panfrost_batch_add_bo(batch, rsrc->slices[i].checksum_bo, flags);
562
563 if (rsrc->separate_stencil)
564 panfrost_batch_add_bo(batch, rsrc->separate_stencil->bo, flags);
565 }
566
567 static void
568 panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
569 {
570 uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
571 PAN_BO_ACCESS_VERTEX_TILER |
572 PAN_BO_ACCESS_FRAGMENT;
573
574 for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
575 struct panfrost_resource *rsrc = pan_resource(batch->key.cbufs[i]->texture);
576 panfrost_batch_add_resource_bos(batch, rsrc, flags);
577 }
578
579 if (batch->key.zsbuf) {
580 struct panfrost_resource *rsrc = pan_resource(batch->key.zsbuf->texture);
581 panfrost_batch_add_resource_bos(batch, rsrc, flags);
582 }
583 }
584
585 struct panfrost_bo *
586 panfrost_batch_create_bo(struct panfrost_batch *batch, size_t size,
587 uint32_t create_flags, uint32_t access_flags)
588 {
589 struct panfrost_bo *bo;
590
591 bo = panfrost_bo_create(pan_device(batch->ctx->base.screen), size,
592 create_flags);
593 panfrost_batch_add_bo(batch, bo, access_flags);
594
595 /* panfrost_batch_add_bo() has retained a reference and
596 * panfrost_bo_create() initialize the refcnt to 1, so let's
597 * unreference the BO here so it gets released when the batch is
598 * destroyed (unless it's retained by someone else in the meantime).
599 */
600 panfrost_bo_unreference(bo);
601 return bo;
602 }
603
604 /* Returns the polygon list's GPU address if available, or otherwise allocates
605 * the polygon list. It's perfectly fast to use allocate/free BO directly,
606 * since we'll hit the BO cache and this is one-per-batch anyway. */
607
608 mali_ptr
609 panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size)
610 {
611 if (batch->polygon_list) {
612 assert(batch->polygon_list->size >= size);
613 } else {
614 /* Create the BO as invisible, as there's no reason to map */
615 size = util_next_power_of_two(size);
616
617 batch->polygon_list = panfrost_batch_create_bo(batch, size,
618 PAN_BO_INVISIBLE,
619 PAN_BO_ACCESS_PRIVATE |
620 PAN_BO_ACCESS_RW |
621 PAN_BO_ACCESS_VERTEX_TILER |
622 PAN_BO_ACCESS_FRAGMENT);
623 }
624
625 return batch->polygon_list->gpu;
626 }
627
628 struct panfrost_bo *
629 panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
630 unsigned shift,
631 unsigned thread_tls_alloc,
632 unsigned core_count)
633 {
634 unsigned size = panfrost_get_total_stack_size(shift,
635 thread_tls_alloc,
636 core_count);
637
638 if (batch->scratchpad) {
639 assert(batch->scratchpad->size >= size);
640 } else {
641 batch->scratchpad = panfrost_batch_create_bo(batch, size,
642 PAN_BO_INVISIBLE,
643 PAN_BO_ACCESS_PRIVATE |
644 PAN_BO_ACCESS_RW |
645 PAN_BO_ACCESS_VERTEX_TILER |
646 PAN_BO_ACCESS_FRAGMENT);
647 }
648
649 return batch->scratchpad;
650 }
651
652 struct panfrost_bo *
653 panfrost_batch_get_shared_memory(struct panfrost_batch *batch,
654 unsigned size,
655 unsigned workgroup_count)
656 {
657 if (batch->shared_memory) {
658 assert(batch->shared_memory->size >= size);
659 } else {
660 batch->shared_memory = panfrost_batch_create_bo(batch, size,
661 PAN_BO_INVISIBLE,
662 PAN_BO_ACCESS_PRIVATE |
663 PAN_BO_ACCESS_RW |
664 PAN_BO_ACCESS_VERTEX_TILER);
665 }
666
667 return batch->shared_memory;
668 }
669
670 struct panfrost_bo *
671 panfrost_batch_get_tiler_heap(struct panfrost_batch *batch)
672 {
673 if (batch->tiler_heap)
674 return batch->tiler_heap;
675
676 batch->tiler_heap = panfrost_batch_create_bo(batch, 4096 * 4096,
677 PAN_BO_INVISIBLE |
678 PAN_BO_GROWABLE,
679 PAN_BO_ACCESS_PRIVATE |
680 PAN_BO_ACCESS_RW |
681 PAN_BO_ACCESS_VERTEX_TILER |
682 PAN_BO_ACCESS_FRAGMENT);
683 assert(batch->tiler_heap);
684 return batch->tiler_heap;
685 }
686
687 mali_ptr
688 panfrost_batch_get_tiler_meta(struct panfrost_batch *batch, unsigned vertex_count)
689 {
690 if (!vertex_count)
691 return 0;
692
693 if (batch->tiler_meta)
694 return batch->tiler_meta;
695
696 struct panfrost_bo *tiler_heap;
697 tiler_heap = panfrost_batch_get_tiler_heap(batch);
698
699 struct bifrost_tiler_heap_meta tiler_heap_meta = {
700 .heap_size = tiler_heap->size,
701 .tiler_heap_start = tiler_heap->gpu,
702 .tiler_heap_free = tiler_heap->gpu,
703 .tiler_heap_end = tiler_heap->gpu + tiler_heap->size,
704 .unk1 = 0x1,
705 .unk7e007e = 0x7e007e,
706 };
707
708 struct bifrost_tiler_meta tiler_meta = {
709 .hierarchy_mask = 0x28,
710 .flags = 0x0,
711 .width = MALI_POSITIVE(batch->key.width),
712 .height = MALI_POSITIVE(batch->key.height),
713 .tiler_heap_meta = panfrost_pool_upload(&batch->pool, &tiler_heap_meta, sizeof(tiler_heap_meta)),
714 };
715
716 batch->tiler_meta = panfrost_pool_upload(&batch->pool, &tiler_meta, sizeof(tiler_meta));
717 return batch->tiler_meta;
718 }
719
720 struct panfrost_bo *
721 panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch)
722 {
723 struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
724
725 uint32_t create_flags = 0;
726
727 if (batch->tiler_dummy)
728 return batch->tiler_dummy;
729
730 if (!(dev->quirks & MIDGARD_NO_HIER_TILING))
731 create_flags = PAN_BO_INVISIBLE;
732
733 batch->tiler_dummy = panfrost_batch_create_bo(batch, 4096,
734 create_flags,
735 PAN_BO_ACCESS_PRIVATE |
736 PAN_BO_ACCESS_RW |
737 PAN_BO_ACCESS_VERTEX_TILER |
738 PAN_BO_ACCESS_FRAGMENT);
739 assert(batch->tiler_dummy);
740 return batch->tiler_dummy;
741 }
742
743 mali_ptr
744 panfrost_batch_reserve_framebuffer(struct panfrost_batch *batch)
745 {
746 struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
747
748 /* If we haven't, reserve space for the framebuffer */
749
750 if (!batch->framebuffer.gpu) {
751 unsigned size = (dev->quirks & MIDGARD_SFBD) ?
752 sizeof(struct mali_single_framebuffer) :
753 sizeof(struct mali_framebuffer);
754
755 batch->framebuffer = panfrost_pool_alloc(&batch->pool, size);
756
757 /* Tag the pointer */
758 if (!(dev->quirks & MIDGARD_SFBD))
759 batch->framebuffer.gpu |= MALI_MFBD;
760 }
761
762 return batch->framebuffer.gpu;
763 }
764
765
766
767 static void
768 panfrost_load_surface(struct panfrost_batch *batch, struct pipe_surface *surf, unsigned loc)
769 {
770 if (!surf)
771 return;
772
773 struct panfrost_resource *rsrc = pan_resource(surf->texture);
774 unsigned level = surf->u.tex.level;
775
776 if (!rsrc->slices[level].initialized)
777 return;
778
779 if (!rsrc->damage.inverted_len)
780 return;
781
782 /* Clamp the rendering area to the damage extent. The
783 * KHR_partial_update() spec states that trying to render outside of
784 * the damage region is "undefined behavior", so we should be safe.
785 */
786 unsigned damage_width = (rsrc->damage.extent.maxx - rsrc->damage.extent.minx);
787 unsigned damage_height = (rsrc->damage.extent.maxy - rsrc->damage.extent.miny);
788
789 if (damage_width && damage_height) {
790 panfrost_batch_intersection_scissor(batch,
791 rsrc->damage.extent.minx,
792 rsrc->damage.extent.miny,
793 rsrc->damage.extent.maxx,
794 rsrc->damage.extent.maxy);
795 }
796
797 /* XXX: Native blits on Bifrost */
798 if (batch->pool.dev->quirks & IS_BIFROST) {
799 if (loc != FRAG_RESULT_DATA0)
800 return;
801
802 /* XXX: why align on *twice* the tile length? */
803 batch->minx = batch->minx & ~((MALI_TILE_LENGTH * 2) - 1);
804 batch->miny = batch->miny & ~((MALI_TILE_LENGTH * 2) - 1);
805 batch->maxx = MIN2(ALIGN_POT(batch->maxx, MALI_TILE_LENGTH * 2),
806 rsrc->base.width0);
807 batch->maxy = MIN2(ALIGN_POT(batch->maxy, MALI_TILE_LENGTH * 2),
808 rsrc->base.height0);
809
810 struct pipe_box rect;
811 batch->ctx->wallpaper_batch = batch;
812 u_box_2d(batch->minx, batch->miny, batch->maxx - batch->minx,
813 batch->maxy - batch->miny, &rect);
814 panfrost_blit_wallpaper(batch->ctx, &rect);
815 batch->ctx->wallpaper_batch = NULL;
816 return;
817 }
818
819 enum pipe_format format = rsrc->base.format;
820
821 if (loc == FRAG_RESULT_DEPTH) {
822 if (!util_format_has_depth(util_format_description(format)))
823 return;
824
825 format = util_format_get_depth_only(format);
826 } else if (loc == FRAG_RESULT_STENCIL) {
827 if (!util_format_has_stencil(util_format_description(format)))
828 return;
829
830 if (rsrc->separate_stencil) {
831 rsrc = rsrc->separate_stencil;
832 format = rsrc->base.format;
833 }
834
835 format = util_format_stencil_only(format);
836 }
837
838 enum mali_texture_dimension dim =
839 panfrost_translate_texture_dimension(rsrc->base.target);
840
841 struct pan_image img = {
842 .width0 = rsrc->base.width0,
843 .height0 = rsrc->base.height0,
844 .depth0 = rsrc->base.depth0,
845 .format = format,
846 .dim = dim,
847 .modifier = rsrc->modifier,
848 .array_size = rsrc->base.array_size,
849 .first_level = level,
850 .last_level = level,
851 .first_layer = surf->u.tex.first_layer,
852 .last_layer = surf->u.tex.last_layer,
853 .nr_samples = rsrc->base.nr_samples,
854 .cubemap_stride = rsrc->cubemap_stride,
855 .bo = rsrc->bo,
856 .slices = rsrc->slices
857 };
858
859 mali_ptr blend_shader = 0;
860
861 if (loc >= FRAG_RESULT_DATA0 && !panfrost_can_fixed_blend(rsrc->base.format)) {
862 struct panfrost_blend_shader *b =
863 panfrost_get_blend_shader(batch->ctx, &batch->ctx->blit_blend, rsrc->base.format, loc - FRAG_RESULT_DATA0);
864
865 struct panfrost_bo *bo = panfrost_batch_create_bo(batch, b->size,
866 PAN_BO_EXECUTE,
867 PAN_BO_ACCESS_PRIVATE |
868 PAN_BO_ACCESS_READ |
869 PAN_BO_ACCESS_FRAGMENT);
870
871 memcpy(bo->cpu, b->buffer, b->size);
872 assert(b->work_count <= 4);
873
874 blend_shader = bo->gpu | b->first_tag;
875 }
876
877 struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
878 4 * 4 * 6 * rsrc->damage.inverted_len);
879
880 for (unsigned i = 0; i < rsrc->damage.inverted_len; ++i) {
881 float *o = (float *) (transfer.cpu + (4 * 4 * 6 * i));
882 struct pan_rect r = rsrc->damage.inverted_rects[i];
883
884 float rect[] = {
885 r.minx, rsrc->base.height0 - r.miny, 0.0, 1.0,
886 r.maxx, rsrc->base.height0 - r.miny, 0.0, 1.0,
887 r.minx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
888
889 r.maxx, rsrc->base.height0 - r.miny, 0.0, 1.0,
890 r.minx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
891 r.maxx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
892 };
893
894 assert(sizeof(rect) == 4 * 4 * 6);
895 memcpy(o, rect, sizeof(rect));
896 }
897
898 panfrost_load_midg(&batch->pool, &batch->scoreboard,
899 blend_shader,
900 batch->framebuffer.gpu, transfer.gpu,
901 rsrc->damage.inverted_len * 6,
902 &img, loc);
903
904 panfrost_batch_add_bo(batch, batch->pool.dev->blit_shaders.bo,
905 PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ | PAN_BO_ACCESS_FRAGMENT);
906 }
907
908 static void
909 panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
910 {
911 panfrost_batch_reserve_framebuffer(batch);
912
913 /* Assume combined. If either depth or stencil is written, they will
914 * both be written so we need to be careful for reloading */
915
916 unsigned draws = batch->draws;
917
918 if (draws & PIPE_CLEAR_DEPTHSTENCIL)
919 draws |= PIPE_CLEAR_DEPTHSTENCIL;
920
921 /* Mask of buffers which need reload since they are not cleared and
922 * they are drawn. (If they are cleared, reload is useless; if they are
923 * not drawn and also not cleared, we can generally omit the attachment
924 * at the framebuffer descriptor level */
925
926 unsigned reload = ~batch->clear & draws;
927
928 for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
929 if (reload & (PIPE_CLEAR_COLOR0 << i))
930 panfrost_load_surface(batch, batch->key.cbufs[i], FRAG_RESULT_DATA0 + i);
931 }
932
933 if (reload & PIPE_CLEAR_DEPTH)
934 panfrost_load_surface(batch, batch->key.zsbuf, FRAG_RESULT_DEPTH);
935
936 if (reload & PIPE_CLEAR_STENCIL)
937 panfrost_load_surface(batch, batch->key.zsbuf, FRAG_RESULT_STENCIL);
938 }
939
940 static void
941 panfrost_batch_record_bo(struct hash_entry *entry, unsigned *bo_handles, unsigned idx)
942 {
943 struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
944 uint32_t flags = (uintptr_t)entry->data;
945
946 assert(bo->gem_handle > 0);
947 bo_handles[idx] = bo->gem_handle;
948
949 /* Update the BO access flags so that panfrost_bo_wait() knows
950 * about all pending accesses.
951 * We only keep the READ/WRITE info since this is all the BO
952 * wait logic cares about.
953 * We also preserve existing flags as this batch might not
954 * be the first one to access the BO.
955 */
956 bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
957 }
958
959 static int
960 panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
961 mali_ptr first_job_desc,
962 uint32_t reqs,
963 uint32_t out_sync)
964 {
965 struct panfrost_context *ctx = batch->ctx;
966 struct pipe_context *gallium = (struct pipe_context *) ctx;
967 struct panfrost_device *dev = pan_device(gallium->screen);
968 struct drm_panfrost_submit submit = {0,};
969 uint32_t *bo_handles;
970 int ret;
971
972 /* If we trace, we always need a syncobj, so make one of our own if we
973 * weren't given one to use. Remember that we did so, so we can free it
974 * after we're done but preventing double-frees if we were given a
975 * syncobj */
976
977 bool our_sync = false;
978
979 if (!out_sync && dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
980 drmSyncobjCreate(dev->fd, 0, &out_sync);
981 our_sync = false;
982 }
983
984 submit.out_sync = out_sync;
985 submit.jc = first_job_desc;
986 submit.requirements = reqs;
987
988 bo_handles = calloc(batch->pool.bos->entries + batch->bos->entries, sizeof(*bo_handles));
989 assert(bo_handles);
990
991 hash_table_foreach(batch->bos, entry)
992 panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
993
994 hash_table_foreach(batch->pool.bos, entry)
995 panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
996
997 submit.bo_handles = (u64) (uintptr_t) bo_handles;
998 ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
999 free(bo_handles);
1000
1001 if (ret) {
1002 if (dev->debug & PAN_DBG_MSGS)
1003 fprintf(stderr, "Error submitting: %m\n");
1004
1005 return errno;
1006 }
1007
1008 /* Trace the job if we're doing that */
1009 if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
1010 /* Wait so we can get errors reported back */
1011 drmSyncobjWait(dev->fd, &out_sync, 1,
1012 INT64_MAX, 0, NULL);
1013
1014 /* Trace gets priority over sync */
1015 bool minimal = !(dev->debug & PAN_DBG_TRACE);
1016 pandecode_jc(submit.jc, dev->quirks & IS_BIFROST, dev->gpu_id, minimal);
1017 }
1018
1019 /* Cleanup if we created the syncobj */
1020 if (our_sync)
1021 drmSyncobjDestroy(dev->fd, out_sync);
1022
1023 return 0;
1024 }
1025
1026 /* Submit both vertex/tiler and fragment jobs for a batch, possibly with an
1027 * outsync corresponding to the later of the two (since there will be an
1028 * implicit dep between them) */
1029
1030 static int
1031 panfrost_batch_submit_jobs(struct panfrost_batch *batch, uint32_t out_sync)
1032 {
1033 bool has_draws = batch->scoreboard.first_job;
1034 bool has_frag = batch->scoreboard.tiler_dep || batch->clear;
1035 int ret = 0;
1036
1037 if (has_draws) {
1038 ret = panfrost_batch_submit_ioctl(batch, batch->scoreboard.first_job,
1039 0, has_frag ? 0 : out_sync);
1040 assert(!ret);
1041 }
1042
1043 if (has_frag) {
1044 /* Whether we program the fragment job for draws or not depends
1045 * on whether there is any *tiler* activity (so fragment
1046 * shaders). If there are draws but entirely RASTERIZER_DISCARD
1047 * (say, for transform feedback), we want a fragment job that
1048 * *only* clears, since otherwise the tiler structures will be
1049 * uninitialized leading to faults (or state leaks) */
1050
1051 mali_ptr fragjob = panfrost_fragment_job(batch,
1052 batch->scoreboard.tiler_dep != 0);
1053 ret = panfrost_batch_submit_ioctl(batch, fragjob,
1054 PANFROST_JD_REQ_FS, out_sync);
1055 assert(!ret);
1056 }
1057
1058 return ret;
1059 }
1060
1061 static void
1062 panfrost_batch_submit(struct panfrost_batch *batch, uint32_t out_sync)
1063 {
1064 assert(batch);
1065 struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
1066
1067 /* Submit the dependencies first. Don't pass along the out_sync since
1068 * they are guaranteed to terminate sooner */
1069 util_dynarray_foreach(&batch->dependencies,
1070 struct panfrost_batch_fence *, dep) {
1071 if ((*dep)->batch)
1072 panfrost_batch_submit((*dep)->batch, 0);
1073 }
1074
1075 int ret;
1076
1077 /* Nothing to do! */
1078 if (!batch->scoreboard.first_job && !batch->clear) {
1079 if (out_sync)
1080 drmSyncobjSignal(dev->fd, &out_sync, 1);
1081 goto out;
1082 }
1083
1084 panfrost_batch_draw_wallpaper(batch);
1085
1086 /* Now that all draws are in, we can finally prepare the
1087 * FBD for the batch */
1088
1089 if (batch->framebuffer.gpu && batch->scoreboard.first_job) {
1090 struct panfrost_context *ctx = batch->ctx;
1091 struct pipe_context *gallium = (struct pipe_context *) ctx;
1092 struct panfrost_device *dev = pan_device(gallium->screen);
1093
1094 if (dev->quirks & MIDGARD_SFBD)
1095 panfrost_attach_sfbd(batch, ~0);
1096 else
1097 panfrost_attach_mfbd(batch, ~0);
1098 }
1099
1100 mali_ptr polygon_list = panfrost_batch_get_polygon_list(batch,
1101 MALI_TILER_MINIMUM_HEADER_SIZE);
1102
1103 panfrost_scoreboard_initialize_tiler(&batch->pool, &batch->scoreboard, polygon_list);
1104
1105 ret = panfrost_batch_submit_jobs(batch, out_sync);
1106
1107 if (ret && dev->debug & PAN_DBG_MSGS)
1108 fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
1109
1110 /* We must reset the damage info of our render targets here even
1111 * though a damage reset normally happens when the DRI layer swaps
1112 * buffers. That's because there can be implicit flushes the GL
1113 * app is not aware of, and those might impact the damage region: if
1114 * part of the damaged portion is drawn during those implicit flushes,
1115 * you have to reload those areas before next draws are pushed, and
1116 * since the driver can't easily know what's been modified by the draws
1117 * it flushed, the easiest solution is to reload everything.
1118 */
1119 for (unsigned i = 0; i < batch->key.nr_cbufs; i++) {
1120 if (!batch->key.cbufs[i])
1121 continue;
1122
1123 panfrost_resource_set_damage_region(NULL,
1124 batch->key.cbufs[i]->texture, 0, NULL);
1125 }
1126
1127 out:
1128 panfrost_freeze_batch(batch);
1129 panfrost_free_batch(batch);
1130 }
1131
1132 /* Submit all batches, applying the out_sync to the currently bound batch */
1133
1134 void
1135 panfrost_flush_all_batches(struct panfrost_context *ctx, uint32_t out_sync)
1136 {
1137 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1138 panfrost_batch_submit(batch, out_sync);
1139
1140 hash_table_foreach(ctx->batches, hentry) {
1141 struct panfrost_batch *batch = hentry->data;
1142 assert(batch);
1143
1144 panfrost_batch_submit(batch, 0);
1145 }
1146
1147 assert(!ctx->batches->entries);
1148
1149 /* Collect batch fences before returning */
1150 panfrost_gc_fences(ctx);
1151 }
1152
1153 bool
1154 panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
1155 const struct panfrost_bo *bo)
1156 {
1157 struct panfrost_bo_access *access;
1158 struct hash_entry *hentry;
1159
1160 hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1161 access = hentry ? hentry->data : NULL;
1162 if (!access)
1163 return false;
1164
1165 if (access->writer && access->writer->batch)
1166 return true;
1167
1168 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1169 reader) {
1170 if (*reader && (*reader)->batch)
1171 return true;
1172 }
1173
1174 return false;
1175 }
1176
1177 /* We always flush writers. We might also need to flush readers */
1178
1179 void
1180 panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
1181 struct panfrost_bo *bo,
1182 bool flush_readers)
1183 {
1184 struct panfrost_bo_access *access;
1185 struct hash_entry *hentry;
1186
1187 hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1188 access = hentry ? hentry->data : NULL;
1189 if (!access)
1190 return;
1191
1192 if (access->writer && access->writer->batch)
1193 panfrost_batch_submit(access->writer->batch, 0);
1194
1195 if (!flush_readers)
1196 return;
1197
1198 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1199 reader) {
1200 if (*reader && (*reader)->batch)
1201 panfrost_batch_submit((*reader)->batch, 0);
1202 }
1203 }
1204
1205 void
1206 panfrost_batch_set_requirements(struct panfrost_batch *batch)
1207 {
1208 struct panfrost_context *ctx = batch->ctx;
1209
1210 if (ctx->rasterizer->base.multisample)
1211 batch->requirements |= PAN_REQ_MSAA;
1212
1213 if (ctx->depth_stencil && ctx->depth_stencil->base.depth.writemask) {
1214 batch->requirements |= PAN_REQ_DEPTH_WRITE;
1215 batch->draws |= PIPE_CLEAR_DEPTH;
1216 }
1217
1218 if (ctx->depth_stencil && ctx->depth_stencil->base.stencil[0].enabled)
1219 batch->draws |= PIPE_CLEAR_STENCIL;
1220 }
1221
1222 void
1223 panfrost_batch_adjust_stack_size(struct panfrost_batch *batch)
1224 {
1225 struct panfrost_context *ctx = batch->ctx;
1226
1227 for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i) {
1228 struct panfrost_shader_state *ss;
1229
1230 ss = panfrost_get_shader_state(ctx, i);
1231 if (!ss)
1232 continue;
1233
1234 batch->stack_size = MAX2(batch->stack_size, ss->stack_size);
1235 }
1236 }
1237
1238 /* Helper to smear a 32-bit color across 128-bit components */
1239
1240 static void
1241 pan_pack_color_32(uint32_t *packed, uint32_t v)
1242 {
1243 for (unsigned i = 0; i < 4; ++i)
1244 packed[i] = v;
1245 }
1246
1247 static void
1248 pan_pack_color_64(uint32_t *packed, uint32_t lo, uint32_t hi)
1249 {
1250 for (unsigned i = 0; i < 4; i += 2) {
1251 packed[i + 0] = lo;
1252 packed[i + 1] = hi;
1253 }
1254 }
1255
1256 static void
1257 pan_pack_color(uint32_t *packed, const union pipe_color_union *color, enum pipe_format format)
1258 {
1259 /* Alpha magicked to 1.0 if there is no alpha */
1260
1261 bool has_alpha = util_format_has_alpha(format);
1262 float clear_alpha = has_alpha ? color->f[3] : 1.0f;
1263
1264 /* Packed color depends on the framebuffer format */
1265
1266 const struct util_format_description *desc =
1267 util_format_description(format);
1268
1269 if (util_format_is_rgba8_variant(desc) && desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
1270 pan_pack_color_32(packed,
1271 ((uint32_t) float_to_ubyte(clear_alpha) << 24) |
1272 ((uint32_t) float_to_ubyte(color->f[2]) << 16) |
1273 ((uint32_t) float_to_ubyte(color->f[1]) << 8) |
1274 ((uint32_t) float_to_ubyte(color->f[0]) << 0));
1275 } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
1276 /* First, we convert the components to R5, G6, B5 separately */
1277 unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
1278 unsigned g6 = _mesa_roundevenf(SATURATE(color->f[1]) * 63.0);
1279 unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
1280
1281 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1282 pan_pack_color_32(packed, (b5 << 25) | (g6 << 14) | (r5 << 5));
1283 } else if (format == PIPE_FORMAT_B4G4R4A4_UNORM) {
1284 /* Convert to 4-bits */
1285 unsigned r4 = _mesa_roundevenf(SATURATE(color->f[0]) * 15.0);
1286 unsigned g4 = _mesa_roundevenf(SATURATE(color->f[1]) * 15.0);
1287 unsigned b4 = _mesa_roundevenf(SATURATE(color->f[2]) * 15.0);
1288 unsigned a4 = _mesa_roundevenf(SATURATE(clear_alpha) * 15.0);
1289
1290 /* Pack on *byte* intervals */
1291 pan_pack_color_32(packed, (a4 << 28) | (b4 << 20) | (g4 << 12) | (r4 << 4));
1292 } else if (format == PIPE_FORMAT_B5G5R5A1_UNORM) {
1293 /* Scale as expected but shift oddly */
1294 unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
1295 unsigned g5 = _mesa_roundevenf(SATURATE(color->f[1]) * 31.0);
1296 unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
1297 unsigned a1 = _mesa_roundevenf(SATURATE(clear_alpha) * 1.0);
1298
1299 pan_pack_color_32(packed, (a1 << 31) | (b5 << 25) | (g5 << 15) | (r5 << 5));
1300 } else {
1301 /* Otherwise, it's generic subject to replication */
1302
1303 union util_color out = { 0 };
1304 unsigned size = util_format_get_blocksize(format);
1305
1306 util_pack_color(color->f, format, &out);
1307
1308 if (size == 1) {
1309 unsigned b = out.ui[0];
1310 unsigned s = b | (b << 8);
1311 pan_pack_color_32(packed, s | (s << 16));
1312 } else if (size == 2)
1313 pan_pack_color_32(packed, out.ui[0] | (out.ui[0] << 16));
1314 else if (size == 3 || size == 4)
1315 pan_pack_color_32(packed, out.ui[0]);
1316 else if (size == 6)
1317 pan_pack_color_64(packed, out.ui[0], out.ui[1] | (out.ui[1] << 16)); /* RGB16F -- RGBB */
1318 else if (size == 8)
1319 pan_pack_color_64(packed, out.ui[0], out.ui[1]);
1320 else if (size == 16)
1321 memcpy(packed, out.ui, 16);
1322 else
1323 unreachable("Unknown generic format size packing clear colour");
1324 }
1325 }
1326
1327 void
1328 panfrost_batch_clear(struct panfrost_batch *batch,
1329 unsigned buffers,
1330 const union pipe_color_union *color,
1331 double depth, unsigned stencil)
1332 {
1333 struct panfrost_context *ctx = batch->ctx;
1334
1335 if (buffers & PIPE_CLEAR_COLOR) {
1336 for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
1337 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1338 continue;
1339
1340 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
1341 pan_pack_color(batch->clear_color[i], color, format);
1342 }
1343 }
1344
1345 if (buffers & PIPE_CLEAR_DEPTH) {
1346 batch->clear_depth = depth;
1347 }
1348
1349 if (buffers & PIPE_CLEAR_STENCIL) {
1350 batch->clear_stencil = stencil;
1351 }
1352
1353 batch->clear |= buffers;
1354
1355 /* Clearing affects the entire framebuffer (by definition -- this is
1356 * the Gallium clear callback, which clears the whole framebuffer. If
1357 * the scissor test were enabled from the GL side, the gallium frontend
1358 * would emit a quad instead and we wouldn't go down this code path) */
1359
1360 panfrost_batch_union_scissor(batch, 0, 0,
1361 ctx->pipe_framebuffer.width,
1362 ctx->pipe_framebuffer.height);
1363 }
1364
1365 static bool
1366 panfrost_batch_compare(const void *a, const void *b)
1367 {
1368 return util_framebuffer_state_equal(a, b);
1369 }
1370
1371 static uint32_t
1372 panfrost_batch_hash(const void *key)
1373 {
1374 return _mesa_hash_data(key, sizeof(struct pipe_framebuffer_state));
1375 }
1376
1377 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1378 * new and old bounding rectangles */
1379
1380 void
1381 panfrost_batch_union_scissor(struct panfrost_batch *batch,
1382 unsigned minx, unsigned miny,
1383 unsigned maxx, unsigned maxy)
1384 {
1385 batch->minx = MIN2(batch->minx, minx);
1386 batch->miny = MIN2(batch->miny, miny);
1387 batch->maxx = MAX2(batch->maxx, maxx);
1388 batch->maxy = MAX2(batch->maxy, maxy);
1389 }
1390
1391 void
1392 panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
1393 unsigned minx, unsigned miny,
1394 unsigned maxx, unsigned maxy)
1395 {
1396 batch->minx = MAX2(batch->minx, minx);
1397 batch->miny = MAX2(batch->miny, miny);
1398 batch->maxx = MIN2(batch->maxx, maxx);
1399 batch->maxy = MIN2(batch->maxy, maxy);
1400 }
1401
1402 /* Are we currently rendering to the dev (rather than an FBO)? */
1403
1404 bool
1405 panfrost_batch_is_scanout(struct panfrost_batch *batch)
1406 {
1407 /* If there is no color buffer, it's an FBO */
1408 if (batch->key.nr_cbufs != 1)
1409 return false;
1410
1411 /* If we're too early that no framebuffer was sent, it's scanout */
1412 if (!batch->key.cbufs[0])
1413 return true;
1414
1415 return batch->key.cbufs[0]->texture->bind & PIPE_BIND_DISPLAY_TARGET ||
1416 batch->key.cbufs[0]->texture->bind & PIPE_BIND_SCANOUT ||
1417 batch->key.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
1418 }
1419
1420 void
1421 panfrost_batch_init(struct panfrost_context *ctx)
1422 {
1423 ctx->batches = _mesa_hash_table_create(ctx,
1424 panfrost_batch_hash,
1425 panfrost_batch_compare);
1426 ctx->accessed_bos = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
1427 _mesa_key_pointer_equal);
1428 }