2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018-2019 Alyssa Rosenzweig
5 * Copyright (C) 2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Authors (Collabora):
27 * Tomeu Vizoso <tomeu.vizoso@collabora.com>
28 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
34 #include "drm-uapi/drm_fourcc.h"
36 #include "state_tracker/winsys_handle.h"
37 #include "util/u_format.h"
38 #include "util/u_memory.h"
39 #include "util/u_surface.h"
40 #include "util/u_transfer.h"
41 #include "util/u_transfer_helper.h"
42 #include "util/u_gen_mipmap.h"
44 #include "pan_context.h"
45 #include "pan_screen.h"
46 #include "pan_resource.h"
48 #include "pan_tiling.h"
50 static struct pipe_resource
*
51 panfrost_resource_from_handle(struct pipe_screen
*pscreen
,
52 const struct pipe_resource
*templat
,
53 struct winsys_handle
*whandle
,
56 struct panfrost_screen
*screen
= pan_screen(pscreen
);
57 struct panfrost_resource
*rsc
;
58 struct pipe_resource
*prsc
;
60 assert(whandle
->type
== WINSYS_HANDLE_TYPE_FD
);
62 rsc
= rzalloc(pscreen
, struct panfrost_resource
);
70 pipe_reference_init(&prsc
->reference
, 1);
71 prsc
->screen
= pscreen
;
73 rsc
->bo
= panfrost_drm_import_bo(screen
, whandle
->handle
);
74 rsc
->slices
[0].stride
= whandle
->stride
;
75 rsc
->slices
[0].initialized
= true;
79 renderonly_create_gpu_import_for_resource(prsc
, screen
->ro
, NULL
);
80 /* failure is expected in some cases.. */
87 panfrost_resource_get_handle(struct pipe_screen
*pscreen
,
88 struct pipe_context
*ctx
,
89 struct pipe_resource
*pt
,
90 struct winsys_handle
*handle
,
93 struct panfrost_screen
*screen
= pan_screen(pscreen
);
94 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) pt
;
95 struct renderonly_scanout
*scanout
= rsrc
->scanout
;
97 handle
->modifier
= DRM_FORMAT_MOD_INVALID
;
99 if (handle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
101 } else if (handle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
102 if (renderonly_get_handle(scanout
, handle
))
105 handle
->handle
= rsrc
->bo
->gem_handle
;
106 handle
->stride
= rsrc
->slices
[0].stride
;
108 } else if (handle
->type
== WINSYS_HANDLE_TYPE_FD
) {
110 struct drm_prime_handle args
= {
111 .handle
= scanout
->handle
,
112 .flags
= DRM_CLOEXEC
,
115 int ret
= drmIoctl(screen
->ro
->kms_fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
119 handle
->stride
= scanout
->stride
;
120 handle
->handle
= args
.fd
;
124 int fd
= panfrost_drm_export_bo(screen
, rsrc
->bo
);
130 handle
->stride
= rsrc
->slices
[0].stride
;
139 panfrost_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
141 //DBG("TODO %s\n", __func__);
144 static struct pipe_surface
*
145 panfrost_create_surface(struct pipe_context
*pipe
,
146 struct pipe_resource
*pt
,
147 const struct pipe_surface
*surf_tmpl
)
149 struct pipe_surface
*ps
= NULL
;
151 ps
= rzalloc(pipe
, struct pipe_surface
);
154 pipe_reference_init(&ps
->reference
, 1);
155 pipe_resource_reference(&ps
->texture
, pt
);
157 ps
->format
= surf_tmpl
->format
;
159 if (pt
->target
!= PIPE_BUFFER
) {
160 assert(surf_tmpl
->u
.tex
.level
<= pt
->last_level
);
161 ps
->width
= u_minify(pt
->width0
, surf_tmpl
->u
.tex
.level
);
162 ps
->height
= u_minify(pt
->height0
, surf_tmpl
->u
.tex
.level
);
163 ps
->u
.tex
.level
= surf_tmpl
->u
.tex
.level
;
164 ps
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
165 ps
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
167 /* setting width as number of elements should get us correct renderbuffer width */
168 ps
->width
= surf_tmpl
->u
.buf
.last_element
- surf_tmpl
->u
.buf
.first_element
+ 1;
169 ps
->height
= pt
->height0
;
170 ps
->u
.buf
.first_element
= surf_tmpl
->u
.buf
.first_element
;
171 ps
->u
.buf
.last_element
= surf_tmpl
->u
.buf
.last_element
;
172 assert(ps
->u
.buf
.first_element
<= ps
->u
.buf
.last_element
);
173 assert(ps
->u
.buf
.last_element
< ps
->width
);
181 panfrost_surface_destroy(struct pipe_context
*pipe
,
182 struct pipe_surface
*surf
)
184 assert(surf
->texture
);
185 pipe_resource_reference(&surf
->texture
, NULL
);
189 static struct pipe_resource
*
190 panfrost_create_scanout_res(struct pipe_screen
*screen
,
191 const struct pipe_resource
*template)
193 struct panfrost_screen
*pscreen
= pan_screen(screen
);
194 struct pipe_resource scanout_templat
= *template;
195 struct renderonly_scanout
*scanout
;
196 struct winsys_handle handle
;
197 struct pipe_resource
*res
;
199 scanout
= renderonly_scanout_for_resource(&scanout_templat
,
200 pscreen
->ro
, &handle
);
204 assert(handle
.type
== WINSYS_HANDLE_TYPE_FD
);
205 /* TODO: handle modifiers? */
206 res
= screen
->resource_from_handle(screen
, template, &handle
,
207 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
);
208 close(handle
.handle
);
212 struct panfrost_resource
*pres
= pan_resource(res
);
214 pres
->scanout
= scanout
;
215 pscreen
->display_target
= pres
;
220 /* Computes sizes for checksumming, which is 8 bytes per 16x16 tile */
222 #define CHECKSUM_TILE_WIDTH 16
223 #define CHECKSUM_TILE_HEIGHT 16
224 #define CHECKSUM_BYTES_PER_TILE 8
227 panfrost_compute_checksum_sizes(
228 struct panfrost_slice
*slice
,
232 unsigned aligned_width
= ALIGN(width
, CHECKSUM_TILE_WIDTH
);
233 unsigned aligned_height
= ALIGN(height
, CHECKSUM_TILE_HEIGHT
);
235 unsigned tile_count_x
= aligned_width
/ CHECKSUM_TILE_WIDTH
;
236 unsigned tile_count_y
= aligned_height
/ CHECKSUM_TILE_HEIGHT
;
238 slice
->checksum_stride
= tile_count_x
* CHECKSUM_BYTES_PER_TILE
;
240 return slice
->checksum_stride
* tile_count_y
;
243 /* Setup the mip tree given a particular layout, possibly with checksumming */
246 panfrost_setup_slices(struct panfrost_resource
*pres
, size_t *bo_size
)
248 struct pipe_resource
*res
= &pres
->base
;
249 unsigned width
= res
->width0
;
250 unsigned height
= res
->height0
;
251 unsigned depth
= res
->depth0
;
252 unsigned bytes_per_pixel
= util_format_get_blocksize(res
->format
);
256 /* Tiled operates blockwise; linear is packed. Also, anything
257 * we render to has to be tile-aligned. Maybe not strictly
258 * necessary, but we're not *that* pressed for memory and it
259 * makes code a lot simpler */
261 bool renderable
= res
->bind
&
262 (PIPE_BIND_RENDER_TARGET
| PIPE_BIND_DEPTH_STENCIL
);
263 bool afbc
= pres
->layout
== PAN_AFBC
;
264 bool tiled
= pres
->layout
== PAN_TILED
;
265 bool should_align
= renderable
|| tiled
;
267 /* We don't know how to specify a 2D stride for 3D textures */
269 bool can_align_stride
=
270 res
->target
!= PIPE_TEXTURE_3D
;
272 should_align
&= can_align_stride
;
275 unsigned size_2d
= 0;
277 for (unsigned l
= 0; l
<= res
->last_level
; ++l
) {
278 struct panfrost_slice
*slice
= &pres
->slices
[l
];
280 unsigned effective_width
= width
;
281 unsigned effective_height
= height
;
282 unsigned effective_depth
= depth
;
285 effective_width
= ALIGN(effective_width
, 16);
286 effective_height
= ALIGN(effective_height
, 16);
288 /* We don't need to align depth */
291 slice
->offset
= offset
;
293 /* Compute the would-be stride */
294 unsigned stride
= bytes_per_pixel
* effective_width
;
296 /* ..but cache-line align it for performance */
297 if (can_align_stride
&& pres
->layout
== PAN_LINEAR
)
298 stride
= ALIGN(stride
, 64);
300 slice
->stride
= stride
;
302 unsigned slice_one_size
= slice
->stride
* effective_height
;
303 unsigned slice_full_size
= slice_one_size
* effective_depth
;
305 /* Report 2D size for 3D texturing */
308 size_2d
= slice_one_size
;
310 /* Compute AFBC sizes if necessary */
313 panfrost_afbc_header_size(width
, height
);
315 offset
+= slice
->header_size
;
318 offset
+= slice_full_size
;
320 /* Add a checksum region if necessary */
321 if (pres
->checksummed
) {
322 slice
->checksum_offset
= offset
;
324 unsigned size
= panfrost_compute_checksum_sizes(
325 slice
, width
, height
);
330 width
= u_minify(width
, 1);
331 height
= u_minify(height
, 1);
332 depth
= u_minify(depth
, 1);
335 assert(res
->array_size
);
337 if (res
->target
!= PIPE_TEXTURE_3D
) {
338 /* Arrays and cubemaps have the entire miptree duplicated */
340 pres
->cubemap_stride
= ALIGN(offset
, 64);
341 *bo_size
= ALIGN(pres
->cubemap_stride
* res
->array_size
, 4096);
343 /* 3D strides across the 2D layers */
344 assert(res
->array_size
== 1);
346 pres
->cubemap_stride
= size_2d
;
347 *bo_size
= ALIGN(offset
, 4096);
352 panfrost_resource_create_bo(struct panfrost_screen
*screen
, struct panfrost_resource
*pres
)
354 struct pipe_resource
*res
= &pres
->base
;
356 /* Based on the usage, figure out what storing will be used. There are
359 * Linear: the basic format, bad for memory bandwidth, bad for cache
360 * use. Zero-copy, though. Renderable.
362 * Tiled: Not compressed, but cache-optimized. Expensive to write into
363 * (due to software tiling), but cheap to sample from. Ideal for most
366 * AFBC: Compressed and renderable (so always desirable for non-scanout
367 * rendertargets). Cheap to sample from. The format is black box, so we
368 * can't read/write from software.
371 /* Tiling textures is almost always faster, unless we only use it once */
373 bool is_texture
= (res
->bind
& PIPE_BIND_SAMPLER_VIEW
);
374 bool is_2d
= res
->depth0
== 1 && res
->array_size
== 1;
375 bool is_streaming
= (res
->usage
!= PIPE_USAGE_STREAM
);
377 bool should_tile
= is_streaming
&& is_texture
&& is_2d
;
379 /* Depth/stencil can't be tiled, only linear or AFBC */
380 should_tile
&= !(res
->bind
& PIPE_BIND_DEPTH_STENCIL
);
382 /* FBOs we would like to checksum, if at all possible */
383 bool can_checksum
= !(res
->bind
& (PIPE_BIND_SCANOUT
| PIPE_BIND_SHARED
));
384 bool should_checksum
= res
->bind
& PIPE_BIND_RENDER_TARGET
;
386 pres
->checksummed
= can_checksum
&& should_checksum
;
388 /* Set the layout appropriately */
389 pres
->layout
= should_tile
? PAN_TILED
: PAN_LINEAR
;
393 panfrost_setup_slices(pres
, &bo_size
);
394 pres
->bo
= panfrost_drm_create_bo(screen
, bo_size
, 0);
397 static struct pipe_resource
*
398 panfrost_resource_create(struct pipe_screen
*screen
,
399 const struct pipe_resource
*template)
401 /* Make sure we're familiar */
402 switch (template->target
) {
404 case PIPE_TEXTURE_1D
:
405 case PIPE_TEXTURE_2D
:
406 case PIPE_TEXTURE_3D
:
407 case PIPE_TEXTURE_CUBE
:
408 case PIPE_TEXTURE_RECT
:
409 case PIPE_TEXTURE_2D_ARRAY
:
412 DBG("Unknown texture target %d\n", template->target
);
417 (PIPE_BIND_DISPLAY_TARGET
| PIPE_BIND_SCANOUT
| PIPE_BIND_SHARED
))
418 return panfrost_create_scanout_res(screen
, template);
420 struct panfrost_resource
*so
= rzalloc(screen
, struct panfrost_resource
);
421 struct panfrost_screen
*pscreen
= (struct panfrost_screen
*) screen
;
423 so
->base
= *template;
424 so
->base
.screen
= screen
;
426 pipe_reference_init(&so
->base
.reference
, 1);
428 util_range_init(&so
->valid_buffer_range
);
430 panfrost_resource_create_bo(pscreen
, so
);
431 return (struct pipe_resource
*)so
;
435 panfrost_bo_reference(struct panfrost_bo
*bo
)
437 pipe_reference(NULL
, &bo
->reference
);
441 panfrost_bo_unreference(struct pipe_screen
*screen
, struct panfrost_bo
*bo
)
443 /* When the reference count goes to zero, we need to cleanup */
445 if (pipe_reference(&bo
->reference
, NULL
))
446 panfrost_drm_release_bo(pan_screen(screen
), bo
);
450 panfrost_resource_destroy(struct pipe_screen
*screen
,
451 struct pipe_resource
*pt
)
453 struct panfrost_screen
*pscreen
= pan_screen(screen
);
454 struct panfrost_resource
*rsrc
= (struct panfrost_resource
*) pt
;
457 renderonly_scanout_destroy(rsrc
->scanout
, pscreen
->ro
);
460 panfrost_bo_unreference(screen
, rsrc
->bo
);
462 util_range_destroy(&rsrc
->valid_buffer_range
);
467 panfrost_transfer_map(struct pipe_context
*pctx
,
468 struct pipe_resource
*resource
,
470 unsigned usage
, /* a combination of PIPE_TRANSFER_x */
471 const struct pipe_box
*box
,
472 struct pipe_transfer
**out_transfer
)
474 int bytes_per_pixel
= util_format_get_blocksize(resource
->format
);
475 struct panfrost_resource
*rsrc
= pan_resource(resource
);
476 struct panfrost_bo
*bo
= rsrc
->bo
;
478 struct panfrost_gtransfer
*transfer
= rzalloc(pctx
, struct panfrost_gtransfer
);
479 transfer
->base
.level
= level
;
480 transfer
->base
.usage
= usage
;
481 transfer
->base
.box
= *box
;
483 pipe_resource_reference(&transfer
->base
.resource
, resource
);
485 *out_transfer
= &transfer
->base
;
487 /* Check if we're bound for rendering and this is a read pixels. If so,
488 * we need to flush */
490 struct panfrost_context
*ctx
= pan_context(pctx
);
491 struct pipe_framebuffer_state
*fb
= &ctx
->pipe_framebuffer
;
493 bool is_bound
= false;
495 for (unsigned c
= 0; c
< fb
->nr_cbufs
; ++c
) {
496 is_bound
|= fb
->cbufs
[c
]->texture
== resource
;
499 if (is_bound
&& (usage
& PIPE_TRANSFER_READ
)) {
501 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
504 /* TODO: Respect usage flags */
506 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
507 /* TODO: reallocate */
508 //printf("debug: Missed reallocate\n");
509 } else if ((usage
& PIPE_TRANSFER_WRITE
)
510 && resource
->target
== PIPE_BUFFER
511 && !util_ranges_intersect(&rsrc
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
512 /* No flush for writes to uninitialized */
513 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
514 if (usage
& PIPE_TRANSFER_WRITE
) {
515 /* STUB: flush reading */
516 //printf("debug: missed reading flush %d\n", resource->target);
517 } else if (usage
& PIPE_TRANSFER_READ
) {
518 /* STUB: flush writing */
519 //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
521 /* Why are you even mapping?! */
525 if (rsrc
->layout
!= PAN_LINEAR
) {
526 /* Non-linear resources need to be indirectly mapped */
528 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
531 transfer
->base
.stride
= box
->width
* bytes_per_pixel
;
532 transfer
->base
.layer_stride
= transfer
->base
.stride
* box
->height
;
533 transfer
->map
= rzalloc_size(transfer
, transfer
->base
.layer_stride
* box
->depth
);
534 assert(box
->depth
== 1);
536 if ((usage
& PIPE_TRANSFER_READ
) && rsrc
->slices
[level
].initialized
) {
537 if (rsrc
->layout
== PAN_AFBC
) {
538 DBG("Unimplemented: reads from AFBC");
539 } else if (rsrc
->layout
== PAN_TILED
) {
540 panfrost_load_tiled_image(
542 bo
->cpu
+ rsrc
->slices
[level
].offset
,
544 transfer
->base
.stride
,
545 rsrc
->slices
[level
].stride
,
546 util_format_get_blocksize(resource
->format
));
550 return transfer
->map
;
552 transfer
->base
.stride
= rsrc
->slices
[level
].stride
;
553 transfer
->base
.layer_stride
= rsrc
->cubemap_stride
;
555 /* By mapping direct-write, we're implicitly already
556 * initialized (maybe), so be conservative */
558 if ((usage
& PIPE_TRANSFER_WRITE
) && (usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
559 rsrc
->slices
[level
].initialized
= true;
562 + rsrc
->slices
[level
].offset
563 + transfer
->base
.box
.z
* rsrc
->cubemap_stride
564 + transfer
->base
.box
.y
* rsrc
->slices
[level
].stride
565 + transfer
->base
.box
.x
* bytes_per_pixel
;
570 panfrost_transfer_unmap(struct pipe_context
*pctx
,
571 struct pipe_transfer
*transfer
)
573 /* Gallium expects writeback here, so we tile */
575 struct panfrost_gtransfer
*trans
= pan_transfer(transfer
);
576 struct panfrost_resource
*prsrc
= (struct panfrost_resource
*) transfer
->resource
;
579 struct panfrost_bo
*bo
= prsrc
->bo
;
581 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
582 unsigned level
= transfer
->level
;
583 prsrc
->slices
[level
].initialized
= true;
585 if (prsrc
->layout
== PAN_AFBC
) {
586 DBG("Unimplemented: writes to AFBC\n");
587 } else if (prsrc
->layout
== PAN_TILED
) {
588 assert(transfer
->box
.depth
== 1);
590 panfrost_store_tiled_image(
591 bo
->cpu
+ prsrc
->slices
[level
].offset
,
594 prsrc
->slices
[level
].stride
,
596 util_format_get_blocksize(prsrc
->base
.format
));
602 util_range_add(&prsrc
->valid_buffer_range
,
604 transfer
->box
.x
+ transfer
->box
.width
);
606 /* Derefence the resource */
607 pipe_resource_reference(&transfer
->resource
, NULL
);
609 /* Transfer itself is RALLOCed at the moment */
610 ralloc_free(transfer
);
614 panfrost_transfer_flush_region(struct pipe_context
*pctx
,
615 struct pipe_transfer
*transfer
,
616 const struct pipe_box
*box
)
618 struct panfrost_resource
*rsc
= pan_resource(transfer
->resource
);
620 if (transfer
->resource
->target
== PIPE_BUFFER
) {
621 util_range_add(&rsc
->valid_buffer_range
,
622 transfer
->box
.x
+ box
->x
,
623 transfer
->box
.x
+ box
->x
+ box
->width
);
627 static struct pb_slab
*
628 panfrost_slab_alloc(void *priv
, unsigned heap
, unsigned entry_size
, unsigned group_index
)
630 struct panfrost_screen
*screen
= (struct panfrost_screen
*) priv
;
631 struct panfrost_memory
*mem
= rzalloc(screen
, struct panfrost_memory
);
633 size_t slab_size
= (1 << (MAX_SLAB_ENTRY_SIZE
+ 1));
635 mem
->slab
.num_entries
= slab_size
/ entry_size
;
636 mem
->slab
.num_free
= mem
->slab
.num_entries
;
638 LIST_INITHEAD(&mem
->slab
.free
);
639 for (unsigned i
= 0; i
< mem
->slab
.num_entries
; ++i
) {
640 /* Create a slab entry */
641 struct panfrost_memory_entry
*entry
= rzalloc(mem
, struct panfrost_memory_entry
);
642 entry
->offset
= entry_size
* i
;
644 entry
->base
.slab
= &mem
->slab
;
645 entry
->base
.group_index
= group_index
;
647 LIST_ADDTAIL(&entry
->base
.head
, &mem
->slab
.free
);
650 /* Actually allocate the memory from kernel-space. Mapped, same_va, no
653 panfrost_drm_allocate_slab(screen
, mem
, slab_size
/ 4096, true, 0, 0, 0);
659 panfrost_slab_can_reclaim(void *priv
, struct pb_slab_entry
*entry
)
661 struct panfrost_memory_entry
*p_entry
= (struct panfrost_memory_entry
*) entry
;
662 return p_entry
->freed
;
666 panfrost_slab_free(void *priv
, struct pb_slab
*slab
)
668 struct panfrost_memory
*mem
= (struct panfrost_memory
*) slab
;
669 struct panfrost_screen
*screen
= (struct panfrost_screen
*) priv
;
671 panfrost_drm_free_slab(screen
, mem
);
676 panfrost_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
678 //DBG("TODO %s\n", __func__);
681 static enum pipe_format
682 panfrost_resource_get_internal_format(struct pipe_resource
*prsrc
)
684 return prsrc
->format
;
688 panfrost_generate_mipmap(
689 struct pipe_context
*pctx
,
690 struct pipe_resource
*prsrc
,
691 enum pipe_format format
,
694 unsigned first_layer
,
697 struct panfrost_context
*ctx
= pan_context(pctx
);
698 struct panfrost_resource
*rsrc
= pan_resource(prsrc
);
700 /* Generating a mipmap invalidates the written levels, so make that
701 * explicit so we don't try to wallpaper them back and end up with
702 * u_blitter recursion */
705 for (unsigned l
= base_level
+ 1; l
<= last_level
; ++l
)
706 rsrc
->slices
[l
].initialized
= false;
708 /* Beyond that, we just delegate the hard stuff. We're careful to
709 * include flushes on both ends to make sure the data is really valid.
710 * We could be doing a lot better perf-wise, especially once we have
711 * reorder-type optimizations in place. But for now prioritize
714 struct panfrost_job
*job
= panfrost_get_job_for_fbo(ctx
);
715 bool has_draws
= job
->last_job
.gpu
;
718 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
720 /* We've flushed the original buffer if needed, now trigger a blit */
722 bool blit_res
= util_gen_mipmap(
724 base_level
, last_level
,
725 first_layer
, last_layer
,
726 PIPE_TEX_FILTER_LINEAR
);
728 /* If the blit was successful, flush once more. If it wasn't, well, let
729 * the state tracker deal with it. */
732 panfrost_flush(pctx
, NULL
, PIPE_FLUSH_END_OF_FRAME
);
737 /* Computes the address to a texture at a particular slice */
740 panfrost_get_texture_address(
741 struct panfrost_resource
*rsrc
,
742 unsigned level
, unsigned face
)
744 unsigned level_offset
= rsrc
->slices
[level
].offset
;
745 unsigned face_offset
= face
* rsrc
->cubemap_stride
;
747 return rsrc
->bo
->gpu
+ level_offset
+ face_offset
;
751 panfrost_resource_set_stencil(struct pipe_resource
*prsrc
,
752 struct pipe_resource
*stencil
)
754 pan_resource(prsrc
)->separate_stencil
= pan_resource(stencil
);
757 static struct pipe_resource
*
758 panfrost_resource_get_stencil(struct pipe_resource
*prsrc
)
760 return &pan_resource(prsrc
)->separate_stencil
->base
;
763 static const struct u_transfer_vtbl transfer_vtbl
= {
764 .resource_create
= panfrost_resource_create
,
765 .resource_destroy
= panfrost_resource_destroy
,
766 .transfer_map
= panfrost_transfer_map
,
767 .transfer_unmap
= panfrost_transfer_unmap
,
768 .transfer_flush_region
= panfrost_transfer_flush_region
,
769 .get_internal_format
= panfrost_resource_get_internal_format
,
770 .set_stencil
= panfrost_resource_set_stencil
,
771 .get_stencil
= panfrost_resource_get_stencil
,
775 panfrost_resource_screen_init(struct panfrost_screen
*pscreen
)
777 //pscreen->base.resource_create_with_modifiers =
778 // panfrost_resource_create_with_modifiers;
779 pscreen
->base
.resource_create
= u_transfer_helper_resource_create
;
780 pscreen
->base
.resource_destroy
= u_transfer_helper_resource_destroy
;
781 pscreen
->base
.resource_from_handle
= panfrost_resource_from_handle
;
782 pscreen
->base
.resource_get_handle
= panfrost_resource_get_handle
;
783 pscreen
->base
.transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
787 pb_slabs_init(&pscreen
->slabs
,
791 3, /* Number of heaps */
795 panfrost_slab_can_reclaim
,
801 panfrost_resource_screen_deinit(struct panfrost_screen
*pscreen
)
803 pb_slabs_deinit(&pscreen
->slabs
);
807 panfrost_resource_context_init(struct pipe_context
*pctx
)
809 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
810 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
811 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
812 pctx
->buffer_subdata
= u_default_buffer_subdata
;
813 pctx
->create_surface
= panfrost_create_surface
;
814 pctx
->surface_destroy
= panfrost_surface_destroy
;
815 pctx
->resource_copy_region
= util_resource_copy_region
;
816 pctx
->blit
= panfrost_blit
;
817 pctx
->generate_mipmap
= panfrost_generate_mipmap
;
818 pctx
->flush_resource
= panfrost_flush_resource
;
819 pctx
->invalidate_resource
= panfrost_invalidate_resource
;
820 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
821 pctx
->buffer_subdata
= u_default_buffer_subdata
;
822 pctx
->texture_subdata
= u_default_texture_subdata
;