etnaviv: Check that resource has a valid TS in etna_resource_needs_flush
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_transfer.c
1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_screen.h"
32
33 #include "pipe/p_defines.h"
34 #include "pipe/p_format.h"
35 #include "pipe/p_screen.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_inlines.h"
39 #include "util/u_memory.h"
40 #include "util/u_surface.h"
41 #include "util/u_transfer.h"
42
43 #include "hw/common_3d.xml.h"
44
45 #include <drm_fourcc.h>
46
47 /* Compute offset into a 1D/2D/3D buffer of a certain box.
48 * This box must be aligned to the block width and height of the
49 * underlying format. */
50 static inline size_t
51 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
52 size_t stride, size_t layer_stride)
53 {
54 return box->z * layer_stride +
55 box->y / util_format_get_blockheight(format) * stride +
56 box->x / util_format_get_blockwidth(format) *
57 util_format_get_blocksize(format);
58 }
59
60 static void
61 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
62 {
63 struct etna_context *ctx = etna_context(pctx);
64 struct etna_transfer *trans = etna_transfer(ptrans);
65 struct etna_resource *rsc = etna_resource(ptrans->resource);
66
67 /* XXX
68 * When writing to a resource that is already in use, replace the resource
69 * with a completely new buffer
70 * and free the old one using a fenced free.
71 * The most tricky case to implement will be: tiled or supertiled surface,
72 * partial write, target not aligned to 4/64. */
73 assert(ptrans->level <= rsc->base.last_level);
74
75 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
76 rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
77
78 /*
79 * Temporary resources are always pulled into the CPU domain, must push them
80 * back into GPU domain before the RS execs the blit to the base resource.
81 */
82 if (trans->rsc)
83 etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
84
85 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
86 if (trans->rsc) {
87 /* We have a temporary resource due to either tile status or
88 * tiling format. Write back the updated buffer contents.
89 * FIXME: we need to invalidate the tile status. */
90 etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
91 } else if (trans->staging) {
92 /* map buffer object */
93 struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
94 void *mapped = etna_bo_map(rsc->bo) + res_level->offset;
95
96 if (rsc->layout == ETNA_LAYOUT_TILED) {
97 etna_texture_tile(
98 mapped + ptrans->box.z * res_level->layer_stride,
99 trans->staging, ptrans->box.x, ptrans->box.y,
100 res_level->stride, ptrans->box.width, ptrans->box.height,
101 ptrans->stride, util_format_get_blocksize(rsc->base.format));
102 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
103 util_copy_box(mapped, rsc->base.format, res_level->stride,
104 res_level->layer_stride, ptrans->box.x,
105 ptrans->box.y, ptrans->box.z, ptrans->box.width,
106 ptrans->box.height, ptrans->box.depth,
107 trans->staging, ptrans->stride,
108 ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
109 } else {
110 BUG("unsupported tiling %i", rsc->layout);
111 }
112
113 FREE(trans->staging);
114 }
115
116 rsc->seqno++;
117
118 if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
119 ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
120 }
121 }
122
123 /*
124 * Transfers without a temporary are only pulled into the CPU domain if they
125 * are not mapped unsynchronized. If they are, must push them back into GPU
126 * domain after CPU access is finished.
127 */
128 if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
129 etna_bo_cpu_fini(rsc->bo);
130
131 pipe_resource_reference(&trans->rsc, NULL);
132 pipe_resource_reference(&ptrans->resource, NULL);
133 slab_free(&ctx->transfer_pool, trans);
134 }
135
136 static void *
137 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
138 unsigned level,
139 unsigned usage,
140 const struct pipe_box *box,
141 struct pipe_transfer **out_transfer)
142 {
143 struct etna_context *ctx = etna_context(pctx);
144 struct etna_resource *rsc = etna_resource(prsc);
145 struct etna_transfer *trans;
146 struct pipe_transfer *ptrans;
147 enum pipe_format format = prsc->format;
148
149 trans = slab_alloc(&ctx->transfer_pool);
150 if (!trans)
151 return NULL;
152
153 /* slab_alloc() doesn't zero */
154 memset(trans, 0, sizeof(*trans));
155
156 ptrans = &trans->base;
157 pipe_resource_reference(&ptrans->resource, prsc);
158 ptrans->level = level;
159 ptrans->usage = usage;
160 ptrans->box = *box;
161
162 assert(level <= prsc->last_level);
163
164 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
165 * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
166 * check needs to be extended to coherent mappings and shared resources.
167 */
168 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
169 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
170 prsc->last_level == 0 &&
171 prsc->width0 == box->width &&
172 prsc->height0 == box->height &&
173 prsc->depth0 == box->depth &&
174 prsc->array_size == 1) {
175 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
176 }
177
178 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
179 /* We have a texture resource which is the same age or newer than the
180 * render resource. Use the texture resource, which avoids bouncing
181 * pixels between the two resources, and we can de-tile it in s/w. */
182 rsc = etna_resource(rsc->texture);
183 } else if (rsc->ts_bo ||
184 (rsc->layout != ETNA_LAYOUT_LINEAR &&
185 util_format_get_blocksize(format) > 1 &&
186 /* HALIGN 4 resources are incompatible with the resolve engine,
187 * so fall back to using software to detile this resource. */
188 rsc->halign != TEXTURE_HALIGN_FOUR)) {
189 /* If the surface has tile status, we need to resolve it first.
190 * The strategy we implement here is to use the RS to copy the
191 * depth buffer, filling in the "holes" where the tile status
192 * indicates that it's clear. We also do this for tiled
193 * resources, but only if the RS can blit them. */
194 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
195 slab_free(&ctx->transfer_pool, trans);
196 BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
197 return NULL;
198 }
199
200 if (prsc->depth0 > 1) {
201 slab_free(&ctx->transfer_pool, trans);
202 BUG("resource has depth >1 with tile status");
203 return NULL;
204 }
205
206 struct pipe_resource templ = *prsc;
207 templ.nr_samples = 0;
208 templ.bind = PIPE_BIND_RENDER_TARGET;
209
210 trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
211 DRM_FORMAT_MOD_LINEAR, &templ);
212 if (!trans->rsc) {
213 slab_free(&ctx->transfer_pool, trans);
214 return NULL;
215 }
216
217 /* Need to align the transfer region to satisfy RS restrictions, as we
218 * really want to hit the RS blit path here.
219 */
220 unsigned w_align, h_align;
221
222 if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
223 w_align = h_align = 64;
224 } else {
225 w_align = ETNA_RS_WIDTH_MASK + 1;
226 h_align = ETNA_RS_HEIGHT_MASK + 1;
227 }
228 h_align *= ctx->screen->specs.pixel_pipes;
229
230 ptrans->box.width += ptrans->box.x & (w_align - 1);
231 ptrans->box.x = ptrans->box.x & ~(w_align - 1);
232 ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
233 ptrans->box.height += ptrans->box.y & (h_align - 1);
234 ptrans->box.y = ptrans->box.y & ~(h_align - 1);
235 ptrans->box.height = align(ptrans->box.height,
236 (ETNA_RS_HEIGHT_MASK + 1) *
237 ctx->screen->specs.pixel_pipes);
238
239 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
240 etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
241
242 /* Switch to using the temporary resource instead */
243 rsc = etna_resource(trans->rsc);
244 }
245
246 struct etna_resource_level *res_level = &rsc->levels[level];
247
248 /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
249 * when mapping in-place,
250 * but when not in place we need to fire off the copy operation in
251 * transfer_flush_region (currently
252 * a no-op) instead of unmap. Need to handle this to support
253 * ARB_map_buffer_range extension at least.
254 */
255 /* XXX we don't take care of current operations on the resource; which can
256 be, at some point in the pipeline
257 which is not yet executed:
258
259 - bound as surface
260 - bound through vertex buffer
261 - bound through index buffer
262 - bound in sampler view
263 - used in clear_render_target / clear_depth_stencil operation
264 - used in blit
265 - used in resource_copy_region
266
267 How do other drivers record this information over course of the rendering
268 pipeline?
269 Is it necessary at all? Only in case we want to provide a fast path and
270 map the resource directly
271 (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
272 We also need to know whether the resource is in use to determine if a sync
273 is needed (or just do it
274 always, but that comes at the expense of performance).
275
276 A conservative approximation without too much overhead would be to mark
277 all resources that have
278 been bound at some point as busy. A drawback would be that accessing
279 resources that have
280 been bound but are no longer in use for a while still carry a performance
281 penalty. On the other hand,
282 the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
283 PIPE_TRANSFER_UNSYNCHRONIZED to
284 avoid this in the first place...
285
286 A) We use an in-pipe copy engine, and queue the copy operation after unmap
287 so that the copy
288 will be performed when all current commands have been executed.
289 Using the RS is possible, not sure if always efficient. This can also
290 do any kind of tiling for us.
291 Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
292 B) We discard the entire resource (or at least, the mipmap level) and
293 allocate new memory for it.
294 Only possible when mapping the entire resource or
295 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
296 */
297
298 /*
299 * Pull resources into the CPU domain. Only skipped for unsynchronized
300 * transfers without a temporary resource.
301 */
302 if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
303 uint32_t prep_flags = 0;
304
305 /*
306 * Always flush if we have the temporary resource and have a copy to this
307 * outstanding. Otherwise infer flush requirement from resource access and
308 * current GPU usage (reads must wait for GPU writes, writes must have
309 * exclusive access to the buffer).
310 */
311 if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
312 (!trans->rsc &&
313 (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
314 ((usage & PIPE_TRANSFER_WRITE) && rsc->status))))
315 pctx->flush(pctx, NULL, 0);
316
317 if (usage & PIPE_TRANSFER_READ)
318 prep_flags |= DRM_ETNA_PREP_READ;
319 if (usage & PIPE_TRANSFER_WRITE)
320 prep_flags |= DRM_ETNA_PREP_WRITE;
321
322 if (etna_bo_cpu_prep(rsc->bo, prep_flags))
323 goto fail_prep;
324 }
325
326 /* map buffer object */
327 void *mapped = etna_bo_map(rsc->bo);
328 if (!mapped)
329 goto fail;
330
331 *out_transfer = ptrans;
332
333 if (rsc->layout == ETNA_LAYOUT_LINEAR) {
334 ptrans->stride = res_level->stride;
335 ptrans->layer_stride = res_level->layer_stride;
336
337 return mapped + res_level->offset +
338 etna_compute_offset(prsc->format, box, res_level->stride,
339 res_level->layer_stride);
340 } else {
341 unsigned divSizeX = util_format_get_blockwidth(format);
342 unsigned divSizeY = util_format_get_blockheight(format);
343
344 /* No direct mappings of tiled, since we need to manually
345 * tile/untile.
346 */
347 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
348 goto fail;
349
350 mapped += res_level->offset;
351 ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
352 ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
353 size_t size = ptrans->layer_stride * box->depth;
354
355 trans->staging = MALLOC(size);
356 if (!trans->staging)
357 goto fail;
358
359 if (usage & PIPE_TRANSFER_READ) {
360 if (rsc->layout == ETNA_LAYOUT_TILED) {
361 etna_texture_untile(trans->staging,
362 mapped + ptrans->box.z * res_level->layer_stride,
363 ptrans->box.x, ptrans->box.y, res_level->stride,
364 ptrans->box.width, ptrans->box.height, ptrans->stride,
365 util_format_get_blocksize(rsc->base.format));
366 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
367 util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
368 ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
369 ptrans->box.width, ptrans->box.height,
370 ptrans->box.depth, mapped, res_level->stride,
371 res_level->layer_stride, ptrans->box.x,
372 ptrans->box.y, ptrans->box.z);
373 } else {
374 /* TODO supertiling */
375 BUG("unsupported tiling %i for reading", rsc->layout);
376 }
377 }
378
379 return trans->staging;
380 }
381
382 fail:
383 etna_bo_cpu_fini(rsc->bo);
384 fail_prep:
385 etna_transfer_unmap(pctx, ptrans);
386 return NULL;
387 }
388
389 static void
390 etna_transfer_flush_region(struct pipe_context *pctx,
391 struct pipe_transfer *transfer,
392 const struct pipe_box *box)
393 {
394 /* NOOP for now */
395 }
396
397 void
398 etna_transfer_init(struct pipe_context *pctx)
399 {
400 pctx->transfer_map = etna_transfer_map;
401 pctx->transfer_flush_region = etna_transfer_flush_region;
402 pctx->transfer_unmap = etna_transfer_unmap;
403 pctx->buffer_subdata = u_default_buffer_subdata;
404 pctx->texture_subdata = u_default_texture_subdata;
405 }