2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018-2019 Alyssa Rosenzweig
5 * Copyright (C) 2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Authors (Collabora):
27 * Tomeu Vizoso <tomeu.vizoso@collabora.com>
28 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
34 #include "drm-uapi/drm_fourcc.h"
36 #include "state_tracker/winsys_handle.h"
37 #include "util/u_format.h"
38 #include "util/u_memory.h"
39 #include "util/u_surface.h"
40 #include "util/u_transfer.h"
41 #include "util/u_transfer_helper.h"
42 #include "util/u_gen_mipmap.h"
44 #include "pan_context.h"
45 #include "pan_screen.h"
46 #include "pan_resource.h"
48 #include "pan_tiling.h"
50 static struct pipe_resource
*
51 panfrost_resource_from_handle(struct pipe_screen
*pscreen
,
52 const struct pipe_resource
*templat
,
53 struct winsys_handle
*whandle
,
56 struct panfrost_screen
*screen
= pan_screen(pscreen
);
57 struct panfrost_resource
*rsc
;
58 struct pipe_resource
*prsc
;
60 assert(whandle
->type
== WINSYS_HANDLE_TYPE_FD
);
62 rsc
= rzalloc(pscreen
, struct panfrost_resource
);
70 pipe_reference_init(&prsc
->reference
, 1);
71 prsc
->screen
= pscreen
;
73 rsc
->bo
= panfrost_drm_import_bo(screen
, whandle
);
74 rsc
->bo
->slices
[0].stride
= whandle
->stride
;
75 rsc
->bo
->slices
[0].initialized
= true;
79 renderonly_create_gpu_import_for_resource(prsc
, screen
->ro
, NULL
);
80 /* failure is expected in some cases.. */
87 panfrost_resource_get_handle(struct pipe_screen
*pscreen
,
88 struct pipe_context
*ctx
,
89 struct pipe_resource
*pt
,
90 struct winsys_handle
*handle
,
93 struct panfrost_screen
*screen
= pan_screen(pscreen
);
94 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) pt
;
95 struct renderonly_scanout
*scanout
= rsrc
->scanout
;
97 handle
->modifier
= DRM_FORMAT_MOD_INVALID
;
99 if (handle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
101 } else if (handle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
102 if (renderonly_get_handle(scanout
, handle
))
105 handle
->handle
= rsrc
->bo
->gem_handle
;
106 handle
->stride
= rsrc
->bo
->slices
[0].stride
;
108 } else if (handle
->type
== WINSYS_HANDLE_TYPE_FD
) {
110 struct drm_prime_handle args
= {
111 .handle
= scanout
->handle
,
112 .flags
= DRM_CLOEXEC
,
115 int ret
= drmIoctl(screen
->ro
->kms_fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
119 handle
->stride
= scanout
->stride
;
120 handle
->handle
= args
.fd
;
124 return panfrost_drm_export_bo(screen
, rsrc
->bo
->gem_handle
,
125 rsrc
->bo
->slices
[0].stride
,
133 panfrost_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
135 //DBG("TODO %s\n", __func__);
138 static struct pipe_surface
*
139 panfrost_create_surface(struct pipe_context
*pipe
,
140 struct pipe_resource
*pt
,
141 const struct pipe_surface
*surf_tmpl
)
143 struct pipe_surface
*ps
= NULL
;
145 ps
= rzalloc(pipe
, struct pipe_surface
);
148 pipe_reference_init(&ps
->reference
, 1);
149 pipe_resource_reference(&ps
->texture
, pt
);
151 ps
->format
= surf_tmpl
->format
;
153 if (pt
->target
!= PIPE_BUFFER
) {
154 assert(surf_tmpl
->u
.tex
.level
<= pt
->last_level
);
155 ps
->width
= u_minify(pt
->width0
, surf_tmpl
->u
.tex
.level
);
156 ps
->height
= u_minify(pt
->height0
, surf_tmpl
->u
.tex
.level
);
157 ps
->u
.tex
.level
= surf_tmpl
->u
.tex
.level
;
158 ps
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
159 ps
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
161 /* setting width as number of elements should get us correct renderbuffer width */
162 ps
->width
= surf_tmpl
->u
.buf
.last_element
- surf_tmpl
->u
.buf
.first_element
+ 1;
163 ps
->height
= pt
->height0
;
164 ps
->u
.buf
.first_element
= surf_tmpl
->u
.buf
.first_element
;
165 ps
->u
.buf
.last_element
= surf_tmpl
->u
.buf
.last_element
;
166 assert(ps
->u
.buf
.first_element
<= ps
->u
.buf
.last_element
);
167 assert(ps
->u
.buf
.last_element
< ps
->width
);
175 panfrost_surface_destroy(struct pipe_context
*pipe
,
176 struct pipe_surface
*surf
)
178 assert(surf
->texture
);
179 pipe_resource_reference(&surf
->texture
, NULL
);
183 /* Computes sizes for checksumming, which is 8 bytes per 16x16 tile */
185 #define CHECKSUM_TILE_WIDTH 16
186 #define CHECKSUM_TILE_HEIGHT 16
187 #define CHECKSUM_BYTES_PER_TILE 8
190 panfrost_compute_checksum_sizes(
191 struct panfrost_slice
*slice
,
195 unsigned aligned_width
= ALIGN(width
, CHECKSUM_TILE_WIDTH
);
196 unsigned aligned_height
= ALIGN(height
, CHECKSUM_TILE_HEIGHT
);
198 unsigned tile_count_x
= aligned_width
/ CHECKSUM_TILE_WIDTH
;
199 unsigned tile_count_y
= aligned_height
/ CHECKSUM_TILE_HEIGHT
;
201 slice
->checksum_stride
= tile_count_x
* CHECKSUM_BYTES_PER_TILE
;
203 return slice
->checksum_stride
* tile_count_y
;
206 /* Setup the mip tree given a particular layout, possibly with checksumming */
209 panfrost_setup_slices(const struct pipe_resource
*tmpl
, struct panfrost_bo
*bo
)
211 unsigned width
= tmpl
->width0
;
212 unsigned height
= tmpl
->height0
;
213 unsigned depth
= tmpl
->depth0
;
214 unsigned bytes_per_pixel
= util_format_get_blocksize(tmpl
->format
);
218 /* Tiled operates blockwise; linear is packed. Also, anything
219 * we render to has to be tile-aligned. Maybe not strictly
220 * necessary, but we're not *that* pressed for memory and it
221 * makes code a lot simpler */
223 bool renderable
= tmpl
->bind
&
224 (PIPE_BIND_RENDER_TARGET
| PIPE_BIND_DEPTH_STENCIL
);
225 bool afbc
= bo
->layout
== PAN_AFBC
;
226 bool tiled
= bo
->layout
== PAN_TILED
;
227 bool should_align
= renderable
|| tiled
;
229 /* We don't know how to specify a 2D stride for 3D textures */
231 bool can_align_stride
=
232 tmpl
->target
!= PIPE_TEXTURE_3D
;
234 should_align
&= can_align_stride
;
237 unsigned size_2d
= 0;
239 for (unsigned l
= 0; l
<= tmpl
->last_level
; ++l
) {
240 struct panfrost_slice
*slice
= &bo
->slices
[l
];
242 unsigned effective_width
= width
;
243 unsigned effective_height
= height
;
244 unsigned effective_depth
= depth
;
247 effective_width
= ALIGN(effective_width
, 16);
248 effective_height
= ALIGN(effective_height
, 16);
250 /* We don't need to align depth */
253 slice
->offset
= offset
;
255 /* Compute the would-be stride */
256 unsigned stride
= bytes_per_pixel
* effective_width
;
258 /* ..but cache-line align it for performance */
259 if (can_align_stride
&& bo
->layout
== PAN_LINEAR
)
260 stride
= ALIGN(stride
, 64);
262 slice
->stride
= stride
;
264 unsigned slice_one_size
= slice
->stride
* effective_height
;
265 unsigned slice_full_size
= slice_one_size
* effective_depth
;
267 /* Report 2D size for 3D texturing */
270 size_2d
= slice_one_size
;
272 /* Compute AFBC sizes if necessary */
275 panfrost_afbc_header_size(width
, height
);
277 offset
+= slice
->header_size
;
280 offset
+= slice_full_size
;
282 /* Add a checksum region if necessary */
283 if (bo
->checksummed
) {
284 slice
->checksum_offset
= offset
;
286 unsigned size
= panfrost_compute_checksum_sizes(
287 slice
, width
, height
);
292 width
= u_minify(width
, 1);
293 height
= u_minify(height
, 1);
294 depth
= u_minify(depth
, 1);
297 assert(tmpl
->array_size
);
299 if (tmpl
->target
!= PIPE_TEXTURE_3D
) {
300 /* Arrays and cubemaps have the entire miptree duplicated */
302 bo
->cubemap_stride
= ALIGN(offset
, 64);
303 bo
->size
= ALIGN(bo
->cubemap_stride
* tmpl
->array_size
, 4096);
305 /* 3D strides across the 2D layers */
306 assert(tmpl
->array_size
== 1);
308 bo
->cubemap_stride
= size_2d
;
309 bo
->size
= ALIGN(offset
, 4096);
313 static struct panfrost_bo
*
314 panfrost_create_bo(struct panfrost_screen
*screen
, const struct pipe_resource
*template)
316 struct panfrost_bo
*bo
= rzalloc(screen
, struct panfrost_bo
);
317 pipe_reference_init(&bo
->reference
, 1);
319 /* Based on the usage, figure out what storing will be used. There are
322 * Linear: the basic format, bad for memory bandwidth, bad for cache
323 * use. Zero-copy, though. Renderable.
325 * Tiled: Not compressed, but cache-optimized. Expensive to write into
326 * (due to software tiling), but cheap to sample from. Ideal for most
329 * AFBC: Compressed and renderable (so always desirable for non-scanout
330 * rendertargets). Cheap to sample from. The format is black box, so we
331 * can't read/write from software.
334 /* Tiling textures is almost always faster, unless we only use it once */
336 bool is_texture
= (template->bind
& PIPE_BIND_SAMPLER_VIEW
);
337 bool is_2d
= template->depth0
== 1 && template->array_size
== 1;
338 bool is_streaming
= (template->usage
!= PIPE_USAGE_STREAM
);
340 bool should_tile
= is_streaming
&& is_texture
&& is_2d
;
342 /* Depth/stencil can't be tiled, only linear or AFBC */
343 should_tile
&= !(template->bind
& PIPE_BIND_DEPTH_STENCIL
);
345 /* FBOs we would like to checksum, if at all possible */
346 bool can_checksum
= !(template->bind
& (PIPE_BIND_SCANOUT
| PIPE_BIND_SHARED
));
347 bool should_checksum
= template->bind
& PIPE_BIND_RENDER_TARGET
;
349 bo
->checksummed
= can_checksum
&& should_checksum
;
351 /* Set the layout appropriately */
352 bo
->layout
= should_tile
? PAN_TILED
: PAN_LINEAR
;
354 panfrost_setup_slices(template, bo
);
356 struct panfrost_memory mem
;
358 panfrost_drm_allocate_slab(screen
, &mem
, bo
->size
/ 4096, true, 0, 0, 0);
362 bo
->gem_handle
= mem
.gem_handle
;
367 static struct pipe_resource
*
368 panfrost_resource_create(struct pipe_screen
*screen
,
369 const struct pipe_resource
*template)
371 struct panfrost_resource
*so
= rzalloc(screen
, struct panfrost_resource
);
372 struct panfrost_screen
*pscreen
= (struct panfrost_screen
*) screen
;
374 so
->base
= *template;
375 so
->base
.screen
= screen
;
377 pipe_reference_init(&so
->base
.reference
, 1);
379 /* Make sure we're familiar */
380 switch (template->target
) {
382 case PIPE_TEXTURE_1D
:
383 case PIPE_TEXTURE_2D
:
384 case PIPE_TEXTURE_3D
:
385 case PIPE_TEXTURE_CUBE
:
386 case PIPE_TEXTURE_RECT
:
387 case PIPE_TEXTURE_2D_ARRAY
:
390 DBG("Unknown texture target %d\n", template->target
);
394 util_range_init(&so
->valid_buffer_range
);
396 if (template->bind
& PIPE_BIND_DISPLAY_TARGET
||
397 template->bind
& PIPE_BIND_SCANOUT
||
398 template->bind
& PIPE_BIND_SHARED
) {
399 struct pipe_resource scanout_templat
= *template;
400 struct renderonly_scanout
*scanout
;
401 struct winsys_handle handle
;
403 scanout
= renderonly_scanout_for_resource(&scanout_templat
,
404 pscreen
->ro
, &handle
);
408 assert(handle
.type
== WINSYS_HANDLE_TYPE_FD
);
409 /* TODO: handle modifiers? */
410 so
= pan_resource(screen
->resource_from_handle(screen
, template,
412 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
));
413 close(handle
.handle
);
417 so
->scanout
= scanout
;
418 pscreen
->display_target
= so
;
420 so
->bo
= panfrost_create_bo(pscreen
, template);
423 return (struct pipe_resource
*)so
;
427 panfrost_destroy_bo(struct panfrost_screen
*screen
, struct panfrost_bo
*bo
)
430 panfrost_drm_free_imported_bo(screen
, bo
);
432 struct panfrost_memory mem
= {
436 .gem_handle
= bo
->gem_handle
,
439 panfrost_drm_free_slab(screen
, &mem
);
446 panfrost_bo_reference(struct panfrost_bo
*bo
)
448 pipe_reference(NULL
, &bo
->reference
);
452 panfrost_bo_unreference(struct pipe_screen
*screen
, struct panfrost_bo
*bo
)
454 /* When the reference count goes to zero, we need to cleanup */
456 if (pipe_reference(&bo
->reference
, NULL
)) {
457 panfrost_destroy_bo(pan_screen(screen
), bo
);
462 panfrost_resource_destroy(struct pipe_screen
*screen
,
463 struct pipe_resource
*pt
)
465 struct panfrost_screen
*pscreen
= pan_screen(screen
);
466 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) pt
;
469 renderonly_scanout_destroy(rsrc
->scanout
, pscreen
->ro
);
472 panfrost_bo_unreference(screen
, rsrc
->bo
);
474 util_range_destroy(&rsrc
->valid_buffer_range
);
479 panfrost_transfer_map(struct pipe_context
*pctx
,
480 struct pipe_resource
*resource
,
482 unsigned usage
, /* a combination of PIPE_TRANSFER_x */
483 const struct pipe_box
*box
,
484 struct pipe_transfer
**out_transfer
)
486 int bytes_per_pixel
= util_format_get_blocksize(resource
->format
);
487 struct panfrost_resource
*rsrc
= pan_resource(resource
);
488 struct panfrost_bo
*bo
= rsrc
->bo
;
490 struct panfrost_gtransfer
*transfer
= rzalloc(pctx
, struct panfrost_gtransfer
);
491 transfer
->base
.level
= level
;
492 transfer
->base
.usage
= usage
;
493 transfer
->base
.box
= *box
;
495 pipe_resource_reference(&transfer
->base
.resource
, resource
);
497 *out_transfer
= &transfer
->base
;
499 /* Check if we're bound for rendering and this is a read pixels. If so,
500 * we need to flush */
502 struct panfrost_context
*ctx
= pan_context(pctx
);
503 struct pipe_framebuffer_state
*fb
= &ctx
->pipe_framebuffer
;
505 bool is_bound
= false;
507 for (unsigned c
= 0; c
< fb
->nr_cbufs
; ++c
) {
508 is_bound
|= fb
->cbufs
[c
]->texture
== resource
;
511 if (is_bound
&& (usage
& PIPE_TRANSFER_READ
)) {
513 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
516 /* TODO: Respect usage flags */
518 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
519 /* TODO: reallocate */
520 //printf("debug: Missed reallocate\n");
521 } else if ((usage
& PIPE_TRANSFER_WRITE
)
522 && resource
->target
== PIPE_BUFFER
523 && !util_ranges_intersect(&rsrc
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
524 /* No flush for writes to uninitialized */
525 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
526 if (usage
& PIPE_TRANSFER_WRITE
) {
527 /* STUB: flush reading */
528 //printf("debug: missed reading flush %d\n", resource->target);
529 } else if (usage
& PIPE_TRANSFER_READ
) {
530 /* STUB: flush writing */
531 //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
533 /* Why are you even mapping?! */
537 if (bo
->layout
!= PAN_LINEAR
) {
538 /* Non-linear resources need to be indirectly mapped */
540 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
543 transfer
->base
.stride
= box
->width
* bytes_per_pixel
;
544 transfer
->base
.layer_stride
= transfer
->base
.stride
* box
->height
;
545 transfer
->map
= rzalloc_size(transfer
, transfer
->base
.layer_stride
* box
->depth
);
546 assert(box
->depth
== 1);
548 if ((usage
& PIPE_TRANSFER_READ
) && bo
->slices
[level
].initialized
) {
549 if (bo
->layout
== PAN_AFBC
) {
550 DBG("Unimplemented: reads from AFBC");
551 } else if (bo
->layout
== PAN_TILED
) {
552 panfrost_load_tiled_image(
554 bo
->cpu
+ bo
->slices
[level
].offset
,
556 transfer
->base
.stride
,
557 bo
->slices
[level
].stride
,
558 util_format_get_blocksize(resource
->format
));
562 return transfer
->map
;
564 transfer
->base
.stride
= bo
->slices
[level
].stride
;
565 transfer
->base
.layer_stride
= bo
->cubemap_stride
;
567 /* By mapping direct-write, we're implicitly already
568 * initialized (maybe), so be conservative */
570 if ((usage
& PIPE_TRANSFER_WRITE
) && (usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
571 bo
->slices
[level
].initialized
= true;
574 + bo
->slices
[level
].offset
575 + transfer
->base
.box
.z
* bo
->cubemap_stride
576 + transfer
->base
.box
.y
* bo
->slices
[level
].stride
577 + transfer
->base
.box
.x
* bytes_per_pixel
;
582 panfrost_transfer_unmap(struct pipe_context
*pctx
,
583 struct pipe_transfer
*transfer
)
585 /* Gallium expects writeback here, so we tile */
587 struct panfrost_gtransfer
*trans
= pan_transfer(transfer
);
588 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) transfer
->resource
;
591 struct panfrost_bo
*bo
= prsrc
->bo
;
593 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
594 unsigned level
= transfer
->level
;
595 bo
->slices
[level
].initialized
= true;
597 if (bo
->layout
== PAN_AFBC
) {
598 DBG("Unimplemented: writes to AFBC\n");
599 } else if (bo
->layout
== PAN_TILED
) {
600 assert(transfer
->box
.depth
== 1);
602 panfrost_store_tiled_image(
603 bo
->cpu
+ bo
->slices
[level
].offset
,
606 bo
->slices
[level
].stride
,
608 util_format_get_blocksize(prsrc
->base
.format
));
614 util_range_add(&prsrc
->valid_buffer_range
,
616 transfer
->box
.x
+ transfer
->box
.width
);
618 /* Derefence the resource */
619 pipe_resource_reference(&transfer
->resource
, NULL
);
621 /* Transfer itself is RALLOCed at the moment */
622 ralloc_free(transfer
);
626 panfrost_transfer_flush_region(struct pipe_context
*pctx
,
627 struct pipe_transfer
*transfer
,
628 const struct pipe_box
*box
)
630 struct panfrost_resource
*rsc
= pan_resource(transfer
->resource
);
632 if (transfer
->resource
->target
== PIPE_BUFFER
) {
633 util_range_add(&rsc
->valid_buffer_range
,
634 transfer
->box
.x
+ box
->x
,
635 transfer
->box
.x
+ box
->x
+ box
->width
);
639 static struct pb_slab
*
640 panfrost_slab_alloc(void *priv
, unsigned heap
, unsigned entry_size
, unsigned group_index
)
642 struct panfrost_screen
*screen
= (struct panfrost_screen
*) priv
;
643 struct panfrost_memory
*mem
= rzalloc(screen
, struct panfrost_memory
);
645 size_t slab_size
= (1 << (MAX_SLAB_ENTRY_SIZE
+ 1));
647 mem
->slab
.num_entries
= slab_size
/ entry_size
;
648 mem
->slab
.num_free
= mem
->slab
.num_entries
;
650 LIST_INITHEAD(&mem
->slab
.free
);
651 for (unsigned i
= 0; i
< mem
->slab
.num_entries
; ++i
) {
652 /* Create a slab entry */
653 struct panfrost_memory_entry
*entry
= rzalloc(mem
, struct panfrost_memory_entry
);
654 entry
->offset
= entry_size
* i
;
656 entry
->base
.slab
= &mem
->slab
;
657 entry
->base
.group_index
= group_index
;
659 LIST_ADDTAIL(&entry
->base
.head
, &mem
->slab
.free
);
662 /* Actually allocate the memory from kernel-space. Mapped, same_va, no
665 panfrost_drm_allocate_slab(screen
, mem
, slab_size
/ 4096, true, 0, 0, 0);
671 panfrost_slab_can_reclaim(void *priv
, struct pb_slab_entry
*entry
)
673 struct panfrost_memory_entry
*p_entry
= (struct panfrost_memory_entry
*) entry
;
674 return p_entry
->freed
;
678 panfrost_slab_free(void *priv
, struct pb_slab
*slab
)
680 struct panfrost_memory
*mem
= (struct panfrost_memory
*) slab
;
681 struct panfrost_screen
*screen
= (struct panfrost_screen
*) priv
;
683 panfrost_drm_free_slab(screen
, mem
);
688 panfrost_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
690 //DBG("TODO %s\n", __func__);
693 static enum pipe_format
694 panfrost_resource_get_internal_format(struct pipe_resource
*prsrc
)
696 return prsrc
->format
;
700 panfrost_generate_mipmap(
701 struct pipe_context
*pctx
,
702 struct pipe_resource
*prsrc
,
703 enum pipe_format format
,
706 unsigned first_layer
,
709 struct panfrost_context
*ctx
= pan_context(pctx
);
710 struct panfrost_resource
*rsrc
= pan_resource(prsrc
);
712 /* Generating a mipmap invalidates the written levels, so make that
713 * explicit so we don't try to wallpaper them back and end up with
714 * u_blitter recursion */
717 for (unsigned l
= base_level
+ 1; l
<= last_level
; ++l
)
718 rsrc
->bo
->slices
[l
].initialized
= false;
720 /* Beyond that, we just delegate the hard stuff. We're careful to
721 * include flushes on both ends to make sure the data is really valid.
722 * We could be doing a lot better perf-wise, especially once we have
723 * reorder-type optimizations in place. But for now prioritize
726 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
727 bool has_draws
= job
->last_job
.gpu
;
730 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
732 /* We've flushed the original buffer if needed, now trigger a blit */
734 bool blit_res
= util_gen_mipmap(
736 base_level
, last_level
,
737 first_layer
, last_layer
,
738 PIPE_TEX_FILTER_LINEAR
);
740 /* If the blit was successful, flush once more. If it wasn't, well, let
741 * the state tracker deal with it. */
744 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
749 /* Computes the address to a texture at a particular slice */
752 panfrost_get_texture_address(
753 struct panfrost_resource
*rsrc
,
754 unsigned level
, unsigned face
)
756 unsigned level_offset
= rsrc
->bo
->slices
[level
].offset
;
757 unsigned face_offset
= face
* rsrc
->bo
->cubemap_stride
;
759 return rsrc
->bo
->gpu
+ level_offset
+ face_offset
;
763 panfrost_resource_set_stencil(struct pipe_resource
*prsrc
,
764 struct pipe_resource
*stencil
)
766 pan_resource(prsrc
)->separate_stencil
= pan_resource(stencil
);
769 static struct pipe_resource
*
770 panfrost_resource_get_stencil(struct pipe_resource
*prsrc
)
772 return &pan_resource(prsrc
)->separate_stencil
->base
;
775 static const struct u_transfer_vtbl transfer_vtbl
= {
776 .resource_create
= panfrost_resource_create
,
777 .resource_destroy
= panfrost_resource_destroy
,
778 .transfer_map
= panfrost_transfer_map
,
779 .transfer_unmap
= panfrost_transfer_unmap
,
780 .transfer_flush_region
= panfrost_transfer_flush_region
,
781 .get_internal_format
= panfrost_resource_get_internal_format
,
782 .set_stencil
= panfrost_resource_set_stencil
,
783 .get_stencil
= panfrost_resource_get_stencil
,
787 panfrost_resource_screen_init(struct panfrost_screen
*pscreen
)
789 //pscreen->base.resource_create_with_modifiers =
790 // panfrost_resource_create_with_modifiers;
791 pscreen
->base
.resource_create
= u_transfer_helper_resource_create
;
792 pscreen
->base
.resource_destroy
= u_transfer_helper_resource_destroy
;
793 pscreen
->base
.resource_from_handle
= panfrost_resource_from_handle
;
794 pscreen
->base
.resource_get_handle
= panfrost_resource_get_handle
;
795 pscreen
->base
.transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
799 pb_slabs_init(&pscreen
->slabs
,
803 3, /* Number of heaps */
807 panfrost_slab_can_reclaim
,
813 panfrost_resource_screen_deinit(struct panfrost_screen
*pscreen
)
815 pb_slabs_deinit(&pscreen
->slabs
);
819 panfrost_resource_context_init(struct pipe_context
*pctx
)
821 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
822 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
823 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
824 pctx
->buffer_subdata
= u_default_buffer_subdata
;
825 pctx
->create_surface
= panfrost_create_surface
;
826 pctx
->surface_destroy
= panfrost_surface_destroy
;
827 pctx
->resource_copy_region
= util_resource_copy_region
;
828 pctx
->blit
= panfrost_blit
;
829 pctx
->generate_mipmap
= panfrost_generate_mipmap
;
830 pctx
->flush_resource
= panfrost_flush_resource
;
831 pctx
->invalidate_resource
= panfrost_invalidate_resource
;
832 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
833 pctx
->buffer_subdata
= u_default_buffer_subdata
;
834 pctx
->texture_subdata
= u_default_texture_subdata
;