freedreno: Implement memory object create/destroy for GL_EXT_memory_object
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.h
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
29
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
34
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
38
39 enum fd_lrz_direction {
40 FD_LRZ_UNKNOWN,
41 /* Depth func less/less-than: */
42 FD_LRZ_LESS,
43 /* Depth func greater/greater-than: */
44 FD_LRZ_GREATER,
45 };
46
47 struct fd_resource {
48 struct pipe_resource base;
49 struct fd_bo *bo;
50 enum pipe_format internal_format;
51 struct fdl_layout layout;
52
53 /* buffer range that has been initialized */
54 struct util_range valid_buffer_range;
55 bool valid;
56 struct renderonly_scanout *scanout;
57
58 /* reference to the resource holding stencil data for a z32_s8 texture */
59 /* TODO rename to secondary or auxiliary? */
60 struct fd_resource *stencil;
61
62 simple_mtx_t lock;
63
64 /* bitmask of in-flight batches which reference this resource. Note
65 * that the batch doesn't hold reference to resources (but instead
66 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
67 * the resource is destroyed we need to clean up the batch's weak
68 * references to us.
69 */
70 uint32_t batch_mask;
71
72 /* reference to batch that writes this resource: */
73 struct fd_batch *write_batch;
74
75 /* Set of batches whose batch-cache key references this resource.
76 * We need to track this to know which batch-cache entries to
77 * invalidate if, for example, the resource is invalidated or
78 * shadowed.
79 */
80 uint32_t bc_batch_mask;
81
82 /* Sequence # incremented each time bo changes: */
83 uint16_t seqno;
84
85 /* bitmask of state this resource could potentially dirty when rebound,
86 * see rebind_resource()
87 */
88 enum fd_dirty_3d_state dirty;
89
90 /*
91 * LRZ
92 *
93 * TODO lrz width/height/pitch should probably also move to
94 * fdl_layout
95 */
96 bool lrz_valid : 1;
97 enum fd_lrz_direction lrz_direction : 2;
98 uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
99 uint16_t lrz_height;
100 uint16_t lrz_pitch;
101 struct fd_bo *lrz;
102 };
103
104 struct fd_memory_object {
105 struct pipe_memory_object b;
106 struct fd_bo *bo;
107 };
108
109 static inline struct fd_resource *
110 fd_resource(struct pipe_resource *ptex)
111 {
112 return (struct fd_resource *)ptex;
113 }
114
115 static inline const struct fd_resource *
116 fd_resource_const(const struct pipe_resource *ptex)
117 {
118 return (const struct fd_resource *)ptex;
119 }
120
121 static inline struct fd_memory_object *
122 fd_memory_object (struct pipe_memory_object *pmemobj)
123 {
124 return (struct fd_memory_object *)pmemobj;
125 }
126
127 static inline bool
128 pending(struct fd_resource *rsc, bool write)
129 {
130 /* if we have a pending GPU write, we are busy in any case: */
131 if (rsc->write_batch)
132 return true;
133
134 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
135 if (write && rsc->batch_mask)
136 return true;
137
138 if (rsc->stencil && pending(rsc->stencil, write))
139 return true;
140
141 return false;
142 }
143
144 static inline bool
145 fd_resource_busy(struct fd_resource *rsc, unsigned op)
146 {
147 return fd_bo_cpu_prep(rsc->bo, NULL, op | DRM_FREEDRENO_PREP_NOSYNC) != 0;
148 }
149
150 static inline void
151 fd_resource_lock(struct fd_resource *rsc)
152 {
153 simple_mtx_lock(&rsc->lock);
154 }
155
156 static inline void
157 fd_resource_unlock(struct fd_resource *rsc)
158 {
159 simple_mtx_unlock(&rsc->lock);
160 }
161
162 static inline void
163 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
164 {
165 if (!prsc)
166 return;
167 struct fd_resource *rsc = fd_resource(prsc);
168 /* Bits are only ever ORed in, and we expect many set_usage() per
169 * resource, so do the quick check outside of the lock.
170 */
171 if (likely(rsc->dirty & usage))
172 return;
173 fd_resource_lock(rsc);
174 rsc->dirty |= usage;
175 fd_resource_unlock(rsc);
176 }
177
178 static inline bool
179 has_depth(enum pipe_format format)
180 {
181 const struct util_format_description *desc =
182 util_format_description(format);
183 return util_format_has_depth(desc);
184 }
185
186 struct fd_transfer {
187 struct pipe_transfer base;
188 struct pipe_resource *staging_prsc;
189 struct pipe_box staging_box;
190 };
191
192 static inline struct fd_transfer *
193 fd_transfer(struct pipe_transfer *ptrans)
194 {
195 return (struct fd_transfer *)ptrans;
196 }
197
198 static inline struct fdl_slice *
199 fd_resource_slice(struct fd_resource *rsc, unsigned level)
200 {
201 assert(level <= rsc->base.last_level);
202 return &rsc->layout.slices[level];
203 }
204
205 static inline uint32_t
206 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
207 {
208 return fdl_layer_stride(&rsc->layout, level);
209 }
210
211 /* get pitch (in bytes) for specified mipmap level */
212 static inline uint32_t
213 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
214 {
215 if (is_a2xx(fd_screen(rsc->base.screen)))
216 return fdl2_pitch(&rsc->layout, level);
217
218 return fdl_pitch(&rsc->layout, level);
219 }
220
221 /* get offset for specified mipmap level and texture/array layer */
222 static inline uint32_t
223 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
224 {
225 uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
226 debug_assert(offset < fd_bo_size(rsc->bo));
227 return offset;
228 }
229
230 static inline uint32_t
231 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
232 {
233 uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
234 debug_assert(offset < fd_bo_size(rsc->bo));
235 return offset;
236 }
237
238 /* This might be a5xx specific, but higher mipmap levels are always linear: */
239 static inline bool
240 fd_resource_level_linear(const struct pipe_resource *prsc, int level)
241 {
242 struct fd_screen *screen = fd_screen(prsc->screen);
243 debug_assert(!is_a3xx(screen));
244
245 return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
246 }
247
248 static inline uint32_t
249 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
250 {
251 return fdl_tile_mode(&fd_resource(prsc)->layout, level);
252 }
253
254 static inline bool
255 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
256 {
257 return fdl_ubwc_enabled(&rsc->layout, level);
258 }
259
260 /* access # of samples, with 0 normalized to 1 (which is what we care about
261 * most of the time)
262 */
263 static inline unsigned
264 fd_resource_nr_samples(struct pipe_resource *prsc)
265 {
266 return MAX2(1, prsc->nr_samples);
267 }
268
269 void fd_resource_screen_init(struct pipe_screen *pscreen);
270 void fd_resource_context_init(struct pipe_context *pctx);
271
272 uint32_t fd_setup_slices(struct fd_resource *rsc);
273 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
274 void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
275
276 bool fd_render_condition_check(struct pipe_context *pctx);
277
278 static inline bool
279 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
280 {
281 return rsc->batch_mask & (1 << batch->idx);
282 }
283
284 static inline void
285 fd_batch_resource_read(struct fd_batch *batch,
286 struct fd_resource *rsc)
287 {
288 /* Fast path: if we hit this then we know we don't have anyone else
289 * writing to it (since both _write and _read flush other writers), and
290 * that we've already recursed for stencil.
291 */
292 if (unlikely(!fd_batch_references_resource(batch, rsc)))
293 fd_batch_resource_read_slowpath(batch, rsc);
294 }
295
296 #endif /* FREEDRENO_RESOURCE_H_ */