2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
40 struct pipe_resource base
;
42 enum pipe_format internal_format
;
43 struct fdl_layout layout
;
45 /* buffer range that has been initialized */
46 struct util_range valid_buffer_range
;
48 struct renderonly_scanout
*scanout
;
50 /* reference to the resource holding stencil data for a z32_s8 texture */
51 /* TODO rename to secondary or auxiliary? */
52 struct fd_resource
*stencil
;
56 /* bitmask of in-flight batches which reference this resource. Note
57 * that the batch doesn't hold reference to resources (but instead
58 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
59 * the resource is destroyed we need to clean up the batch's weak
64 /* reference to batch that writes this resource: */
65 struct fd_batch
*write_batch
;
67 /* Set of batches whose batch-cache key references this resource.
68 * We need to track this to know which batch-cache entries to
69 * invalidate if, for example, the resource is invalidated or
72 uint32_t bc_batch_mask
;
74 /* Sequence # incremented each time bo changes: */
77 /* bitmask of state this resource could potentially dirty when rebound,
78 * see rebind_resource()
80 enum fd_dirty_3d_state dirty
;
85 * TODO lrz width/height/pitch should probably also move to
89 uint16_t lrz_width
; // for lrz clear, does this differ from lrz_pitch?
95 static inline struct fd_resource
*
96 fd_resource(struct pipe_resource
*ptex
)
98 return (struct fd_resource
*)ptex
;
101 static inline const struct fd_resource
*
102 fd_resource_const(const struct pipe_resource
*ptex
)
104 return (const struct fd_resource
*)ptex
;
108 pending(struct fd_resource
*rsc
, bool write
)
110 /* if we have a pending GPU write, we are busy in any case: */
111 if (rsc
->write_batch
)
114 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
115 if (write
&& rsc
->batch_mask
)
118 if (rsc
->stencil
&& pending(rsc
->stencil
, write
))
125 fd_resource_busy(struct fd_resource
*rsc
, unsigned op
)
127 return fd_bo_cpu_prep(rsc
->bo
, NULL
, op
| DRM_FREEDRENO_PREP_NOSYNC
) != 0;
131 fd_resource_lock(struct fd_resource
*rsc
)
133 simple_mtx_lock(&rsc
->lock
);
137 fd_resource_unlock(struct fd_resource
*rsc
)
139 simple_mtx_unlock(&rsc
->lock
);
143 fd_resource_set_usage(struct pipe_resource
*prsc
, enum fd_dirty_3d_state usage
)
147 struct fd_resource
*rsc
= fd_resource(prsc
);
148 /* Bits are only ever ORed in, and we expect many set_usage() per
149 * resource, so do the quick check outside of the lock.
151 if (likely(rsc
->dirty
& usage
))
153 fd_resource_lock(rsc
);
155 fd_resource_unlock(rsc
);
159 has_depth(enum pipe_format format
)
161 const struct util_format_description
*desc
=
162 util_format_description(format
);
163 return util_format_has_depth(desc
);
167 struct pipe_transfer base
;
168 struct pipe_resource
*staging_prsc
;
169 struct pipe_box staging_box
;
172 static inline struct fd_transfer
*
173 fd_transfer(struct pipe_transfer
*ptrans
)
175 return (struct fd_transfer
*)ptrans
;
178 static inline struct fdl_slice
*
179 fd_resource_slice(struct fd_resource
*rsc
, unsigned level
)
181 assert(level
<= rsc
->base
.last_level
);
182 return &rsc
->layout
.slices
[level
];
185 static inline uint32_t
186 fd_resource_layer_stride(struct fd_resource
*rsc
, unsigned level
)
188 return fdl_layer_stride(&rsc
->layout
, level
);
191 /* get offset for specified mipmap level and texture/array layer */
192 static inline uint32_t
193 fd_resource_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
195 uint32_t offset
= fdl_surface_offset(&rsc
->layout
, level
, layer
);
196 debug_assert(offset
< fd_bo_size(rsc
->bo
));
200 static inline uint32_t
201 fd_resource_ubwc_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
203 uint32_t offset
= fdl_ubwc_offset(&rsc
->layout
, level
, layer
);
204 debug_assert(offset
< fd_bo_size(rsc
->bo
));
208 /* This might be a5xx specific, but higher mipmap levels are always linear: */
210 fd_resource_level_linear(const struct pipe_resource
*prsc
, int level
)
212 struct fd_screen
*screen
= fd_screen(prsc
->screen
);
213 debug_assert(!is_a3xx(screen
));
215 return fdl_level_linear(&fd_resource_const(prsc
)->layout
, level
);
218 static inline uint32_t
219 fd_resource_tile_mode(struct pipe_resource
*prsc
, int level
)
221 return fdl_tile_mode(&fd_resource(prsc
)->layout
, level
);
225 fd_resource_ubwc_enabled(struct fd_resource
*rsc
, int level
)
227 return fdl_ubwc_enabled(&rsc
->layout
, level
);
230 /* access # of samples, with 0 normalized to 1 (which is what we care about
233 static inline unsigned
234 fd_resource_nr_samples(struct pipe_resource
*prsc
)
236 return MAX2(1, prsc
->nr_samples
);
239 void fd_resource_screen_init(struct pipe_screen
*pscreen
);
240 void fd_resource_context_init(struct pipe_context
*pctx
);
242 uint32_t fd_setup_slices(struct fd_resource
*rsc
);
243 void fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
);
244 void fd_resource_uncompress(struct fd_context
*ctx
, struct fd_resource
*rsc
);
246 bool fd_render_condition_check(struct pipe_context
*pctx
);
249 fd_batch_references_resource(struct fd_batch
*batch
, struct fd_resource
*rsc
)
251 return rsc
->batch_mask
& (1 << batch
->idx
);
255 fd_batch_resource_read(struct fd_batch
*batch
,
256 struct fd_resource
*rsc
)
258 /* Fast path: if we hit this then we know we don't have anyone else
259 * writing to it (since both _write and _read flush other writers), and
260 * that we've already recursed for stencil.
262 if (unlikely(!fd_batch_references_resource(batch
, rsc
)))
263 fd_batch_resource_read_slowpath(batch
, rsc
);
266 #endif /* FREEDRENO_RESOURCE_H_ */