freedreno/a6xx: Don't take pipe_blit_info in emit_blit_dst
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.h
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
29
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
34
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
38
39 enum fd_lrz_direction {
40 FD_LRZ_UNKNOWN,
41 /* Depth func less/less-than: */
42 FD_LRZ_LESS,
43 /* Depth func greater/greater-than: */
44 FD_LRZ_GREATER,
45 };
46
47 struct fd_resource {
48 struct pipe_resource base;
49 struct fd_bo *bo;
50 enum pipe_format internal_format;
51 struct fdl_layout layout;
52
53 /* buffer range that has been initialized */
54 struct util_range valid_buffer_range;
55 bool valid;
56 struct renderonly_scanout *scanout;
57
58 /* reference to the resource holding stencil data for a z32_s8 texture */
59 /* TODO rename to secondary or auxiliary? */
60 struct fd_resource *stencil;
61
62 simple_mtx_t lock;
63
64 /* bitmask of in-flight batches which reference this resource. Note
65 * that the batch doesn't hold reference to resources (but instead
66 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
67 * the resource is destroyed we need to clean up the batch's weak
68 * references to us.
69 */
70 uint32_t batch_mask;
71
72 /* reference to batch that writes this resource: */
73 struct fd_batch *write_batch;
74
75 /* Set of batches whose batch-cache key references this resource.
76 * We need to track this to know which batch-cache entries to
77 * invalidate if, for example, the resource is invalidated or
78 * shadowed.
79 */
80 uint32_t bc_batch_mask;
81
82 /* Sequence # incremented each time bo changes: */
83 uint16_t seqno;
84
85 /* bitmask of state this resource could potentially dirty when rebound,
86 * see rebind_resource()
87 */
88 enum fd_dirty_3d_state dirty;
89
90 /*
91 * LRZ
92 *
93 * TODO lrz width/height/pitch should probably also move to
94 * fdl_layout
95 */
96 bool lrz_valid : 1;
97 enum fd_lrz_direction lrz_direction : 2;
98 uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
99 uint16_t lrz_height;
100 uint16_t lrz_pitch;
101 struct fd_bo *lrz;
102 };
103
104 static inline struct fd_resource *
105 fd_resource(struct pipe_resource *ptex)
106 {
107 return (struct fd_resource *)ptex;
108 }
109
110 static inline const struct fd_resource *
111 fd_resource_const(const struct pipe_resource *ptex)
112 {
113 return (const struct fd_resource *)ptex;
114 }
115
116 static inline bool
117 pending(struct fd_resource *rsc, bool write)
118 {
119 /* if we have a pending GPU write, we are busy in any case: */
120 if (rsc->write_batch)
121 return true;
122
123 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
124 if (write && rsc->batch_mask)
125 return true;
126
127 if (rsc->stencil && pending(rsc->stencil, write))
128 return true;
129
130 return false;
131 }
132
133 static inline bool
134 fd_resource_busy(struct fd_resource *rsc, unsigned op)
135 {
136 return fd_bo_cpu_prep(rsc->bo, NULL, op | DRM_FREEDRENO_PREP_NOSYNC) != 0;
137 }
138
139 static inline void
140 fd_resource_lock(struct fd_resource *rsc)
141 {
142 simple_mtx_lock(&rsc->lock);
143 }
144
145 static inline void
146 fd_resource_unlock(struct fd_resource *rsc)
147 {
148 simple_mtx_unlock(&rsc->lock);
149 }
150
151 static inline void
152 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
153 {
154 if (!prsc)
155 return;
156 struct fd_resource *rsc = fd_resource(prsc);
157 /* Bits are only ever ORed in, and we expect many set_usage() per
158 * resource, so do the quick check outside of the lock.
159 */
160 if (likely(rsc->dirty & usage))
161 return;
162 fd_resource_lock(rsc);
163 rsc->dirty |= usage;
164 fd_resource_unlock(rsc);
165 }
166
167 static inline bool
168 has_depth(enum pipe_format format)
169 {
170 const struct util_format_description *desc =
171 util_format_description(format);
172 return util_format_has_depth(desc);
173 }
174
175 struct fd_transfer {
176 struct pipe_transfer base;
177 struct pipe_resource *staging_prsc;
178 struct pipe_box staging_box;
179 };
180
181 static inline struct fd_transfer *
182 fd_transfer(struct pipe_transfer *ptrans)
183 {
184 return (struct fd_transfer *)ptrans;
185 }
186
187 static inline struct fdl_slice *
188 fd_resource_slice(struct fd_resource *rsc, unsigned level)
189 {
190 assert(level <= rsc->base.last_level);
191 return &rsc->layout.slices[level];
192 }
193
194 static inline uint32_t
195 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
196 {
197 return fdl_layer_stride(&rsc->layout, level);
198 }
199
200 /* get pitch (in bytes) for specified mipmap level */
201 static inline uint32_t
202 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
203 {
204 if (is_a2xx(fd_screen(rsc->base.screen)))
205 return fdl2_pitch(&rsc->layout, level);
206
207 return fdl_pitch(&rsc->layout, level);
208 }
209
210 /* get offset for specified mipmap level and texture/array layer */
211 static inline uint32_t
212 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
213 {
214 uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
215 debug_assert(offset < fd_bo_size(rsc->bo));
216 return offset;
217 }
218
219 static inline uint32_t
220 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
221 {
222 uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
223 debug_assert(offset < fd_bo_size(rsc->bo));
224 return offset;
225 }
226
227 /* This might be a5xx specific, but higher mipmap levels are always linear: */
228 static inline bool
229 fd_resource_level_linear(const struct pipe_resource *prsc, int level)
230 {
231 struct fd_screen *screen = fd_screen(prsc->screen);
232 debug_assert(!is_a3xx(screen));
233
234 return fdl_level_linear(&fd_resource_const(prsc)->layout, level);
235 }
236
237 static inline uint32_t
238 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
239 {
240 return fdl_tile_mode(&fd_resource(prsc)->layout, level);
241 }
242
243 static inline bool
244 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
245 {
246 return fdl_ubwc_enabled(&rsc->layout, level);
247 }
248
249 /* access # of samples, with 0 normalized to 1 (which is what we care about
250 * most of the time)
251 */
252 static inline unsigned
253 fd_resource_nr_samples(struct pipe_resource *prsc)
254 {
255 return MAX2(1, prsc->nr_samples);
256 }
257
258 void fd_resource_screen_init(struct pipe_screen *pscreen);
259 void fd_resource_context_init(struct pipe_context *pctx);
260
261 uint32_t fd_setup_slices(struct fd_resource *rsc);
262 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
263 void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
264
265 bool fd_render_condition_check(struct pipe_context *pctx);
266
267 static inline bool
268 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
269 {
270 return rsc->batch_mask & (1 << batch->idx);
271 }
272
273 static inline void
274 fd_batch_resource_read(struct fd_batch *batch,
275 struct fd_resource *rsc)
276 {
277 /* Fast path: if we hit this then we know we don't have anyone else
278 * writing to it (since both _write and _read flush other writers), and
279 * that we've already recursed for stencil.
280 */
281 if (unlikely(!fd_batch_references_resource(batch, rsc)))
282 fd_batch_resource_read_slowpath(batch, rsc);
283 }
284
285 #endif /* FREEDRENO_RESOURCE_H_ */