2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
39 enum fd_lrz_direction
{
41 /* Depth func less/less-than: */
43 /* Depth func greater/greater-than: */
48 struct pipe_resource base
;
50 enum pipe_format internal_format
;
51 struct fdl_layout layout
;
53 /* buffer range that has been initialized */
54 struct util_range valid_buffer_range
;
56 struct renderonly_scanout
*scanout
;
58 /* reference to the resource holding stencil data for a z32_s8 texture */
59 /* TODO rename to secondary or auxiliary? */
60 struct fd_resource
*stencil
;
64 /* bitmask of in-flight batches which reference this resource. Note
65 * that the batch doesn't hold reference to resources (but instead
66 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
67 * the resource is destroyed we need to clean up the batch's weak
72 /* reference to batch that writes this resource: */
73 struct fd_batch
*write_batch
;
75 /* Set of batches whose batch-cache key references this resource.
76 * We need to track this to know which batch-cache entries to
77 * invalidate if, for example, the resource is invalidated or
80 uint32_t bc_batch_mask
;
82 /* Sequence # incremented each time bo changes: */
85 /* bitmask of state this resource could potentially dirty when rebound,
86 * see rebind_resource()
88 enum fd_dirty_3d_state dirty
;
93 * TODO lrz width/height/pitch should probably also move to
97 enum fd_lrz_direction lrz_direction
: 2;
98 uint16_t lrz_width
; // for lrz clear, does this differ from lrz_pitch?
104 struct fd_memory_object
{
105 struct pipe_memory_object b
;
109 static inline struct fd_resource
*
110 fd_resource(struct pipe_resource
*ptex
)
112 return (struct fd_resource
*)ptex
;
115 static inline const struct fd_resource
*
116 fd_resource_const(const struct pipe_resource
*ptex
)
118 return (const struct fd_resource
*)ptex
;
121 static inline struct fd_memory_object
*
122 fd_memory_object (struct pipe_memory_object
*pmemobj
)
124 return (struct fd_memory_object
*)pmemobj
;
128 pending(struct fd_resource
*rsc
, bool write
)
130 /* if we have a pending GPU write, we are busy in any case: */
131 if (rsc
->write_batch
)
134 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
135 if (write
&& rsc
->batch_mask
)
138 if (rsc
->stencil
&& pending(rsc
->stencil
, write
))
145 fd_resource_busy(struct fd_resource
*rsc
, unsigned op
)
147 return fd_bo_cpu_prep(rsc
->bo
, NULL
, op
| DRM_FREEDRENO_PREP_NOSYNC
) != 0;
151 fd_resource_lock(struct fd_resource
*rsc
)
153 simple_mtx_lock(&rsc
->lock
);
157 fd_resource_unlock(struct fd_resource
*rsc
)
159 simple_mtx_unlock(&rsc
->lock
);
163 fd_resource_set_usage(struct pipe_resource
*prsc
, enum fd_dirty_3d_state usage
)
167 struct fd_resource
*rsc
= fd_resource(prsc
);
168 /* Bits are only ever ORed in, and we expect many set_usage() per
169 * resource, so do the quick check outside of the lock.
171 if (likely(rsc
->dirty
& usage
))
173 fd_resource_lock(rsc
);
175 fd_resource_unlock(rsc
);
179 has_depth(enum pipe_format format
)
181 const struct util_format_description
*desc
=
182 util_format_description(format
);
183 return util_format_has_depth(desc
);
187 struct pipe_transfer base
;
188 struct pipe_resource
*staging_prsc
;
189 struct pipe_box staging_box
;
192 static inline struct fd_transfer
*
193 fd_transfer(struct pipe_transfer
*ptrans
)
195 return (struct fd_transfer
*)ptrans
;
198 static inline struct fdl_slice
*
199 fd_resource_slice(struct fd_resource
*rsc
, unsigned level
)
201 assert(level
<= rsc
->base
.last_level
);
202 return &rsc
->layout
.slices
[level
];
205 static inline uint32_t
206 fd_resource_layer_stride(struct fd_resource
*rsc
, unsigned level
)
208 return fdl_layer_stride(&rsc
->layout
, level
);
211 /* get pitch (in bytes) for specified mipmap level */
212 static inline uint32_t
213 fd_resource_pitch(struct fd_resource
*rsc
, unsigned level
)
215 if (is_a2xx(fd_screen(rsc
->base
.screen
)))
216 return fdl2_pitch(&rsc
->layout
, level
);
218 return fdl_pitch(&rsc
->layout
, level
);
221 /* get offset for specified mipmap level and texture/array layer */
222 static inline uint32_t
223 fd_resource_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
225 uint32_t offset
= fdl_surface_offset(&rsc
->layout
, level
, layer
);
226 debug_assert(offset
< fd_bo_size(rsc
->bo
));
230 static inline uint32_t
231 fd_resource_ubwc_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
233 uint32_t offset
= fdl_ubwc_offset(&rsc
->layout
, level
, layer
);
234 debug_assert(offset
< fd_bo_size(rsc
->bo
));
238 /* This might be a5xx specific, but higher mipmap levels are always linear: */
240 fd_resource_level_linear(const struct pipe_resource
*prsc
, int level
)
242 struct fd_screen
*screen
= fd_screen(prsc
->screen
);
243 debug_assert(!is_a3xx(screen
));
245 return fdl_level_linear(&fd_resource_const(prsc
)->layout
, level
);
248 static inline uint32_t
249 fd_resource_tile_mode(struct pipe_resource
*prsc
, int level
)
251 return fdl_tile_mode(&fd_resource(prsc
)->layout
, level
);
255 fd_resource_ubwc_enabled(struct fd_resource
*rsc
, int level
)
257 return fdl_ubwc_enabled(&rsc
->layout
, level
);
260 /* access # of samples, with 0 normalized to 1 (which is what we care about
263 static inline unsigned
264 fd_resource_nr_samples(struct pipe_resource
*prsc
)
266 return MAX2(1, prsc
->nr_samples
);
269 void fd_resource_screen_init(struct pipe_screen
*pscreen
);
270 void fd_resource_context_init(struct pipe_context
*pctx
);
272 uint32_t fd_setup_slices(struct fd_resource
*rsc
);
273 void fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
);
274 void fd_resource_uncompress(struct fd_context
*ctx
, struct fd_resource
*rsc
);
275 void fd_resource_dump(struct fd_resource
*rsc
, const char *name
);
277 bool fd_render_condition_check(struct pipe_context
*pctx
);
280 fd_batch_references_resource(struct fd_batch
*batch
, struct fd_resource
*rsc
)
282 return rsc
->batch_mask
& (1 << batch
->idx
);
286 fd_batch_resource_read(struct fd_batch
*batch
,
287 struct fd_resource
*rsc
)
289 /* Fast path: if we hit this then we know we don't have anyone else
290 * writing to it (since both _write and _read flush other writers), and
291 * that we've already recursed for stencil.
293 if (unlikely(!fd_batch_references_resource(batch
, rsc
)))
294 fd_batch_resource_read_slowpath(batch
, rsc
);
297 #endif /* FREEDRENO_RESOURCE_H_ */