freedreno: Fix missing rsc->seqno updates
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.h
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
29
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
34
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
38
39 enum fd_lrz_direction {
40 FD_LRZ_UNKNOWN,
41 /* Depth func less/less-than: */
42 FD_LRZ_LESS,
43 /* Depth func greater/greater-than: */
44 FD_LRZ_GREATER,
45 };
46
47 struct fd_resource {
48 struct pipe_resource base;
49 struct fd_bo *bo; /* use fd_resource_set_bo() to write */
50 enum pipe_format internal_format;
51 struct fdl_layout layout;
52
53 /* buffer range that has been initialized */
54 struct util_range valid_buffer_range;
55 bool valid;
56 struct renderonly_scanout *scanout;
57
58 /* reference to the resource holding stencil data for a z32_s8 texture */
59 /* TODO rename to secondary or auxiliary? */
60 struct fd_resource *stencil;
61
62 simple_mtx_t lock;
63
64 /* bitmask of in-flight batches which reference this resource. Note
65 * that the batch doesn't hold reference to resources (but instead
66 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
67 * the resource is destroyed we need to clean up the batch's weak
68 * references to us.
69 */
70 uint32_t batch_mask;
71
72 /* reference to batch that writes this resource: */
73 struct fd_batch *write_batch;
74
75 /* Set of batches whose batch-cache key references this resource.
76 * We need to track this to know which batch-cache entries to
77 * invalidate if, for example, the resource is invalidated or
78 * shadowed.
79 */
80 uint32_t bc_batch_mask;
81
82 /* Sequence # incremented each time bo changes: */
83 uint16_t seqno;
84
85 /* bitmask of state this resource could potentially dirty when rebound,
86 * see rebind_resource()
87 */
88 enum fd_dirty_3d_state dirty;
89
90 /* Uninitialized resources with UBWC format need their UBWC flag data
91 * cleared before writes, as the UBWC state is read and used during
92 * writes, so undefined UBWC flag data results in undefined results.
93 */
94 bool needs_ubwc_clear : 1;
95
96 /*
97 * LRZ
98 *
99 * TODO lrz width/height/pitch should probably also move to
100 * fdl_layout
101 */
102 bool lrz_valid : 1;
103 enum fd_lrz_direction lrz_direction : 2;
104 uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
105 uint16_t lrz_height;
106 uint16_t lrz_pitch;
107 struct fd_bo *lrz;
108 };
109
110 struct fd_memory_object {
111 struct pipe_memory_object b;
112 struct fd_bo *bo;
113 };
114
115 static inline struct fd_resource *
116 fd_resource(struct pipe_resource *ptex)
117 {
118 return (struct fd_resource *)ptex;
119 }
120
121 static inline const struct fd_resource *
122 fd_resource_const(const struct pipe_resource *ptex)
123 {
124 return (const struct fd_resource *)ptex;
125 }
126
127 static inline struct fd_memory_object *
128 fd_memory_object (struct pipe_memory_object *pmemobj)
129 {
130 return (struct fd_memory_object *)pmemobj;
131 }
132
133 static inline bool
134 pending(struct fd_resource *rsc, bool write)
135 {
136 /* if we have a pending GPU write, we are busy in any case: */
137 if (rsc->write_batch)
138 return true;
139
140 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
141 if (write && rsc->batch_mask)
142 return true;
143
144 if (rsc->stencil && pending(rsc->stencil, write))
145 return true;
146
147 return false;
148 }
149
150 static inline bool
151 fd_resource_busy(struct fd_resource *rsc, unsigned op)
152 {
153 return fd_bo_cpu_prep(rsc->bo, NULL, op | DRM_FREEDRENO_PREP_NOSYNC) != 0;
154 }
155
156 static inline void
157 fd_resource_lock(struct fd_resource *rsc)
158 {
159 simple_mtx_lock(&rsc->lock);
160 }
161
162 static inline void
163 fd_resource_unlock(struct fd_resource *rsc)
164 {
165 simple_mtx_unlock(&rsc->lock);
166 }
167
168 static inline void
169 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
170 {
171 if (!prsc)
172 return;
173 struct fd_resource *rsc = fd_resource(prsc);
174 /* Bits are only ever ORed in, and we expect many set_usage() per
175 * resource, so do the quick check outside of the lock.
176 */
177 if (likely(rsc->dirty & usage))
178 return;
179 fd_resource_lock(rsc);
180 rsc->dirty |= usage;
181 fd_resource_unlock(rsc);
182 }
183
184 static inline bool
185 has_depth(enum pipe_format format)
186 {
187 const struct util_format_description *desc =
188 util_format_description(format);
189 return util_format_has_depth(desc);
190 }
191
192 struct fd_transfer {
193 struct pipe_transfer base;
194 struct pipe_resource *staging_prsc;
195 struct pipe_box staging_box;
196 };
197
198 static inline struct fd_transfer *
199 fd_transfer(struct pipe_transfer *ptrans)
200 {
201 return (struct fd_transfer *)ptrans;
202 }
203
204 static inline struct fdl_slice *
205 fd_resource_slice(struct fd_resource *rsc, unsigned level)
206 {
207 assert(level <= rsc->base.last_level);
208 return &rsc->layout.slices[level];
209 }
210
211 static inline uint32_t
212 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
213 {
214 return fdl_layer_stride(&rsc->layout, level);
215 }
216
217 /* get pitch (in bytes) for specified mipmap level */
218 static inline uint32_t
219 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
220 {
221 if (is_a2xx(fd_screen(rsc->base.screen)))
222 return fdl2_pitch(&rsc->layout, level);
223
224 return fdl_pitch(&rsc->layout, level);
225 }
226
227 /* get offset for specified mipmap level and texture/array layer */
228 static inline uint32_t
229 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
230 {
231 uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
232 debug_assert(offset < fd_bo_size(rsc->bo));
233 return offset;
234 }
235
236 static inline uint32_t
237 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
238 {
239 uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
240 debug_assert(offset < fd_bo_size(rsc->bo));
241 return offset;
242 }
243
244 /* This might be a5xx specific, but higher mipmap levels are always linear: */
245 static inline bool
246 fd_resource_level_linear(const struct pipe_resource *prsc, int level)
247 {
248 struct fd_screen *screen = fd_screen(prsc->screen);
249 debug_assert(!is_a3xx(screen));
250
251 return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
252 }
253
254 static inline uint32_t
255 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
256 {
257 return fdl_tile_mode(&fd_resource(prsc)->layout, level);
258 }
259
260 static inline bool
261 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
262 {
263 return fdl_ubwc_enabled(&rsc->layout, level);
264 }
265
266 /* access # of samples, with 0 normalized to 1 (which is what we care about
267 * most of the time)
268 */
269 static inline unsigned
270 fd_resource_nr_samples(struct pipe_resource *prsc)
271 {
272 return MAX2(1, prsc->nr_samples);
273 }
274
275 void fd_resource_screen_init(struct pipe_screen *pscreen);
276 void fd_resource_context_init(struct pipe_context *pctx);
277
278 uint32_t fd_setup_slices(struct fd_resource *rsc);
279 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
280 void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
281 void fd_resource_dump(struct fd_resource *rsc, const char *name);
282
283 bool fd_render_condition_check(struct pipe_context *pctx);
284
285 static inline bool
286 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
287 {
288 return rsc->batch_mask & (1 << batch->idx);
289 }
290
291 static inline void
292 fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc)
293 {
294 if (unlikely(rsc->needs_ubwc_clear)) {
295 batch->ctx->clear_ubwc(batch, rsc);
296 rsc->needs_ubwc_clear = false;
297 }
298 }
299
300 static inline void
301 fd_batch_resource_read(struct fd_batch *batch,
302 struct fd_resource *rsc)
303 {
304 /* Fast path: if we hit this then we know we don't have anyone else
305 * writing to it (since both _write and _read flush other writers), and
306 * that we've already recursed for stencil.
307 */
308 if (unlikely(!fd_batch_references_resource(batch, rsc)))
309 fd_batch_resource_read_slowpath(batch, rsc);
310 }
311
312 #endif /* FREEDRENO_RESOURCE_H_ */