2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_RESOURCE_H_
28 #define FREEDRENO_RESOURCE_H_
30 #include "util/list.h"
31 #include "util/u_range.h"
32 #include "util/u_transfer_helper.h"
33 #include "util/simple_mtx.h"
35 #include "freedreno_batch.h"
36 #include "freedreno_util.h"
37 #include "freedreno/fdl/freedreno_layout.h"
40 struct pipe_resource base
;
42 enum pipe_format internal_format
;
43 struct fdl_layout layout
;
45 /* buffer range that has been initialized */
46 struct util_range valid_buffer_range
;
48 struct renderonly_scanout
*scanout
;
50 /* reference to the resource holding stencil data for a z32_s8 texture */
51 /* TODO rename to secondary or auxiliary? */
52 struct fd_resource
*stencil
;
56 /* bitmask of in-flight batches which reference this resource. Note
57 * that the batch doesn't hold reference to resources (but instead
58 * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
59 * the resource is destroyed we need to clean up the batch's weak
64 /* reference to batch that writes this resource: */
65 struct fd_batch
*write_batch
;
67 /* Set of batches whose batch-cache key references this resource.
68 * We need to track this to know which batch-cache entries to
69 * invalidate if, for example, the resource is invalidated or
72 uint32_t bc_batch_mask
;
74 /* Sequence # incremented each time bo changes: */
77 /* bitmask of state this resource could potentially dirty when rebound,
78 * see rebind_resource()
80 enum fd_dirty_3d_state dirty
;
85 * TODO lrz width/height/pitch should probably also move to
89 uint16_t lrz_width
; // for lrz clear, does this differ from lrz_pitch?
95 static inline struct fd_resource
*
96 fd_resource(struct pipe_resource
*ptex
)
98 return (struct fd_resource
*)ptex
;
101 static inline const struct fd_resource
*
102 fd_resource_const(const struct pipe_resource
*ptex
)
104 return (const struct fd_resource
*)ptex
;
108 pending(struct fd_resource
*rsc
, bool write
)
110 /* if we have a pending GPU write, we are busy in any case: */
111 if (rsc
->write_batch
)
114 /* if CPU wants to write, but we are pending a GPU read, we are busy: */
115 if (write
&& rsc
->batch_mask
)
118 if (rsc
->stencil
&& pending(rsc
->stencil
, write
))
125 fd_resource_busy(struct fd_resource
*rsc
, unsigned op
)
127 return fd_bo_cpu_prep(rsc
->bo
, NULL
, op
| DRM_FREEDRENO_PREP_NOSYNC
) != 0;
131 fd_resource_lock(struct fd_resource
*rsc
)
133 simple_mtx_lock(&rsc
->lock
);
137 fd_resource_unlock(struct fd_resource
*rsc
)
139 simple_mtx_unlock(&rsc
->lock
);
143 fd_resource_set_usage(struct pipe_resource
*prsc
, enum fd_dirty_3d_state usage
)
147 struct fd_resource
*rsc
= fd_resource(prsc
);
148 fd_resource_lock(rsc
);
150 fd_resource_unlock(rsc
);
154 has_depth(enum pipe_format format
)
156 const struct util_format_description
*desc
=
157 util_format_description(format
);
158 return util_format_has_depth(desc
);
162 struct pipe_transfer base
;
163 struct pipe_resource
*staging_prsc
;
164 struct pipe_box staging_box
;
167 static inline struct fd_transfer
*
168 fd_transfer(struct pipe_transfer
*ptrans
)
170 return (struct fd_transfer
*)ptrans
;
173 static inline struct fdl_slice
*
174 fd_resource_slice(struct fd_resource
*rsc
, unsigned level
)
176 assert(level
<= rsc
->base
.last_level
);
177 return &rsc
->layout
.slices
[level
];
180 static inline uint32_t
181 fd_resource_layer_stride(struct fd_resource
*rsc
, unsigned level
)
183 return fdl_layer_stride(&rsc
->layout
, level
);
186 /* get offset for specified mipmap level and texture/array layer */
187 static inline uint32_t
188 fd_resource_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
190 uint32_t offset
= fdl_surface_offset(&rsc
->layout
, level
, layer
);
191 debug_assert(offset
< fd_bo_size(rsc
->bo
));
195 static inline uint32_t
196 fd_resource_ubwc_offset(struct fd_resource
*rsc
, unsigned level
, unsigned layer
)
198 uint32_t offset
= fdl_ubwc_offset(&rsc
->layout
, level
, layer
);
199 debug_assert(offset
< fd_bo_size(rsc
->bo
));
203 /* This might be a5xx specific, but higher mipmap levels are always linear: */
205 fd_resource_level_linear(const struct pipe_resource
*prsc
, int level
)
207 struct fd_screen
*screen
= fd_screen(prsc
->screen
);
208 debug_assert(!is_a3xx(screen
));
210 return fdl_level_linear(&fd_resource_const(prsc
)->layout
, level
);
213 static inline uint32_t
214 fd_resource_tile_mode(struct pipe_resource
*prsc
, int level
)
216 return fdl_tile_mode(&fd_resource(prsc
)->layout
, level
);
220 fd_resource_ubwc_enabled(struct fd_resource
*rsc
, int level
)
222 return fdl_ubwc_enabled(&rsc
->layout
, level
);
225 /* access # of samples, with 0 normalized to 1 (which is what we care about
228 static inline unsigned
229 fd_resource_nr_samples(struct pipe_resource
*prsc
)
231 return MAX2(1, prsc
->nr_samples
);
234 void fd_resource_screen_init(struct pipe_screen
*pscreen
);
235 void fd_resource_context_init(struct pipe_context
*pctx
);
237 uint32_t fd_setup_slices(struct fd_resource
*rsc
);
238 void fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
);
239 void fd_resource_uncompress(struct fd_context
*ctx
, struct fd_resource
*rsc
);
241 bool fd_render_condition_check(struct pipe_context
*pctx
);
244 fd_batch_references_resource(struct fd_batch
*batch
, struct fd_resource
*rsc
)
246 return rsc
->batch_mask
& (1 << batch
->idx
);
250 fd_batch_resource_read(struct fd_batch
*batch
,
251 struct fd_resource
*rsc
)
253 /* Fast path: if we hit this then we know we don't have anyone else
254 * writing to it (since both _write and _read flush other writers), and
255 * that we've already recursed for stencil.
257 if (unlikely(!fd_batch_references_resource(batch
, rsc
)))
258 fd_batch_resource_read_slowpath(batch
, rsc
);
261 #endif /* FREEDRENO_RESOURCE_H_ */