1 /**************************************************************************
3 * Copyright 2018-2019 Alyssa Rosenzweig
4 * Copyright 2018-2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
32 #include "pipe/p_screen.h"
33 #include "pipe/p_defines.h"
34 #include "renderonly/renderonly.h"
35 #include "util/u_dynarray.h"
36 #include "util/bitset.h"
38 #include <panfrost-misc.h>
39 #include "pan_allocate.h"
41 struct panfrost_context
;
42 struct panfrost_resource
;
43 struct panfrost_screen
;
46 #define PAN_MAX_CONST_BUFFERS 16
48 /* Flags for allocated memory */
50 /* This memory region is executable */
51 #define PAN_ALLOCATE_EXECUTE (1 << 0)
53 /* This memory region should be lazily allocated and grow-on-page-fault. Must
54 * be used in conjunction with INVISIBLE */
55 #define PAN_ALLOCATE_GROWABLE (1 << 1)
57 /* This memory region should not be mapped to the CPU */
58 #define PAN_ALLOCATE_INVISIBLE (1 << 2)
60 /* This memory region will be used for varyings and needs to have the cache
61 * bits twiddled accordingly */
62 #define PAN_ALLOCATE_COHERENT_LOCAL (1 << 3)
64 /* This region may not be used immediately and will not mmap on allocate
65 * (semantically distinct from INVISIBLE, which cannot never be mmaped) */
66 #define PAN_ALLOCATE_DELAY_MMAP (1 << 4)
68 /* Transient slab size. This is a balance between fragmentation against cache
69 * locality and ease of bookkeeping */
71 #define TRANSIENT_SLAB_PAGES (32) /* 128kb */
72 #define TRANSIENT_SLAB_SIZE (4096 * TRANSIENT_SLAB_PAGES)
74 /* Maximum number of transient slabs so we don't need dynamic arrays. Most
75 * interesting Mali boards are 4GB RAM max, so if the entire RAM was filled
76 * with transient slabs, you could never exceed (4GB / TRANSIENT_SLAB_SIZE)
77 * allocations anyway. By capping, we can use a fixed-size bitset for tracking
78 * free slabs, eliminating quite a bit of complexity. We can pack the free
79 * state of 8 slabs into a single byte, so for 128kb transient slabs the bitset
80 * occupies a cheap 4kb of memory */
82 #define MAX_TRANSIENT_SLABS (1024*1024 / TRANSIENT_SLAB_PAGES)
84 struct panfrost_screen
{
85 struct pipe_screen base
;
89 struct renderonly
*ro
;
91 /* Transient memory management is based on borrowing fixed-size slabs
92 * off the screen (loaning them out to the batch). Dynamic array
93 * container of panfrost_bo */
95 struct util_dynarray transient_bo
;
97 /* Set of free transient BOs */
98 BITSET_DECLARE(free_transient
, MAX_TRANSIENT_SLABS
);
100 /* While we're busy building up the job for frame N, the GPU is
101 * still busy executing frame N-1. So hold a reference to
103 int last_fragment_flushed
;
104 struct panfrost_job
*last_job
;
107 static inline struct panfrost_screen
*
108 pan_screen(struct pipe_screen
*p
)
110 return (struct panfrost_screen
*)p
;
113 /* Get a transient BO off the screen given a
114 * particular index */
116 static inline struct panfrost_bo
*
117 pan_bo_for_index(struct panfrost_screen
*screen
, unsigned index
)
119 return *(util_dynarray_element(&screen
->transient_bo
,
120 struct panfrost_bo
*, index
));
124 panfrost_drm_allocate_slab(struct panfrost_screen
*screen
,
125 struct panfrost_memory
*mem
,
132 panfrost_drm_free_slab(struct panfrost_screen
*screen
,
133 struct panfrost_memory
*mem
);
135 panfrost_drm_create_bo(struct panfrost_screen
*screen
, size_t size
,
138 panfrost_drm_mmap_bo(struct panfrost_screen
*screen
, struct panfrost_bo
*bo
);
140 panfrost_drm_release_bo(struct panfrost_screen
*screen
, struct panfrost_bo
*bo
, bool cacheable
);
142 panfrost_drm_import_bo(struct panfrost_screen
*screen
, int fd
);
144 panfrost_drm_export_bo(struct panfrost_screen
*screen
, const struct panfrost_bo
*bo
);
146 panfrost_drm_submit_vs_fs_job(struct panfrost_context
*ctx
, bool has_draws
,
149 panfrost_drm_force_flush_fragment(struct panfrost_context
*ctx
,
150 struct pipe_fence_handle
**fence
);
152 panfrost_drm_query_gpu_version(struct panfrost_screen
*screen
);
154 panfrost_drm_init_context(struct panfrost_context
*ctx
);
156 panfrost_drm_fence_reference(struct pipe_screen
*screen
,
157 struct pipe_fence_handle
**ptr
,
158 struct pipe_fence_handle
*fence
);
160 panfrost_drm_fence_finish(struct pipe_screen
*pscreen
,
161 struct pipe_context
*ctx
,
162 struct pipe_fence_handle
*fence
,
165 panfrost_bo_cache_fetch(
166 struct panfrost_screen
*screen
,
167 size_t size
, uint32_t flags
);
170 panfrost_bo_cache_put(
171 struct panfrost_screen
*screen
,
172 struct panfrost_bo
*bo
);
175 panfrost_bo_cache_evict_all(
176 struct panfrost_screen
*screen
);
178 #endif /* PAN_SCREEN_H */