etnaviv: Rework locking
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_transfer.c
1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_etc2.h"
32 #include "etnaviv_screen.h"
33
34 #include "pipe/p_defines.h"
35 #include "pipe/p_format.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38 #include "util/u_format.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_surface.h"
42 #include "util/u_transfer.h"
43
44 #include "hw/common_3d.xml.h"
45
46 #include "drm-uapi/drm_fourcc.h"
47
48 /* Compute offset into a 1D/2D/3D buffer of a certain box.
49 * This box must be aligned to the block width and height of the
50 * underlying format. */
51 static inline size_t
52 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
53 size_t stride, size_t layer_stride)
54 {
55 return box->z * layer_stride +
56 box->y / util_format_get_blockheight(format) * stride +
57 box->x / util_format_get_blockwidth(format) *
58 util_format_get_blocksize(format);
59 }
60
61 static void etna_patch_data(void *buffer, const struct pipe_transfer *ptrans)
62 {
63 struct pipe_resource *prsc = ptrans->resource;
64 struct etna_resource *rsc = etna_resource(prsc);
65 struct etna_resource_level *level = &rsc->levels[ptrans->level];
66
67 if (likely(!etna_etc2_needs_patching(prsc)))
68 return;
69
70 if (level->patched)
71 return;
72
73 /* do have the offsets of blocks to patch? */
74 if (!level->patch_offsets) {
75 level->patch_offsets = CALLOC_STRUCT(util_dynarray);
76
77 etna_etc2_calculate_blocks(buffer, ptrans->stride,
78 ptrans->box.width, ptrans->box.height,
79 prsc->format, level->patch_offsets);
80 }
81
82 etna_etc2_patch(buffer, level->patch_offsets);
83
84 level->patched = true;
85 }
86
87 static void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
88 {
89 struct pipe_resource *prsc = ptrans->resource;
90 struct etna_resource *rsc = etna_resource(prsc);
91 struct etna_resource_level *level = &rsc->levels[ptrans->level];
92
93 if (!level->patched)
94 return;
95
96 etna_etc2_patch(buffer, level->patch_offsets);
97
98 level->patched = false;
99 }
100
101 static void
102 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
103 {
104 struct etna_context *ctx = etna_context(pctx);
105 struct etna_transfer *trans = etna_transfer(ptrans);
106 struct etna_resource *rsc = etna_resource(ptrans->resource);
107
108 /* XXX
109 * When writing to a resource that is already in use, replace the resource
110 * with a completely new buffer
111 * and free the old one using a fenced free.
112 * The most tricky case to implement will be: tiled or supertiled surface,
113 * partial write, target not aligned to 4/64. */
114 assert(ptrans->level <= rsc->base.last_level);
115
116 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
117 rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
118
119 /*
120 * Temporary resources are always pulled into the CPU domain, must push them
121 * back into GPU domain before the RS execs the blit to the base resource.
122 */
123 if (trans->rsc)
124 etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
125
126 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
127 if (trans->rsc) {
128 /* We have a temporary resource due to either tile status or
129 * tiling format. Write back the updated buffer contents.
130 * FIXME: we need to invalidate the tile status. */
131 etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
132 } else if (trans->staging) {
133 /* map buffer object */
134 struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
135
136 if (rsc->layout == ETNA_LAYOUT_TILED) {
137 for (unsigned z = 0; z < ptrans->box.depth; z++) {
138 etna_texture_tile(
139 trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
140 trans->staging + z * ptrans->layer_stride,
141 ptrans->box.x, ptrans->box.y,
142 res_level->stride, ptrans->box.width, ptrans->box.height,
143 ptrans->stride, util_format_get_blocksize(rsc->base.format));
144 }
145 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
146 util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
147 res_level->layer_stride, ptrans->box.x,
148 ptrans->box.y, ptrans->box.z, ptrans->box.width,
149 ptrans->box.height, ptrans->box.depth,
150 trans->staging, ptrans->stride,
151 ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
152 } else {
153 BUG("unsupported tiling %i", rsc->layout);
154 }
155
156 FREE(trans->staging);
157 }
158
159 rsc->seqno++;
160
161 if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
162 ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
163 }
164 }
165
166 /* We need to have the patched data ready for the GPU. */
167 etna_patch_data(trans->mapped, ptrans);
168
169 /*
170 * Transfers without a temporary are only pulled into the CPU domain if they
171 * are not mapped unsynchronized. If they are, must push them back into GPU
172 * domain after CPU access is finished.
173 */
174 if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
175 etna_bo_cpu_fini(rsc->bo);
176
177 pipe_resource_reference(&trans->rsc, NULL);
178 pipe_resource_reference(&ptrans->resource, NULL);
179 slab_free(&ctx->transfer_pool, trans);
180 }
181
182 static void *
183 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
184 unsigned level,
185 unsigned usage,
186 const struct pipe_box *box,
187 struct pipe_transfer **out_transfer)
188 {
189 struct etna_context *ctx = etna_context(pctx);
190 struct etna_resource *rsc = etna_resource(prsc);
191 struct etna_transfer *trans;
192 struct pipe_transfer *ptrans;
193 enum pipe_format format = prsc->format;
194
195 trans = slab_alloc(&ctx->transfer_pool);
196 if (!trans)
197 return NULL;
198
199 /* slab_alloc() doesn't zero */
200 memset(trans, 0, sizeof(*trans));
201
202 ptrans = &trans->base;
203 pipe_resource_reference(&ptrans->resource, prsc);
204 ptrans->level = level;
205 ptrans->usage = usage;
206 ptrans->box = *box;
207
208 assert(level <= prsc->last_level);
209
210 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
211 * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
212 * check needs to be extended to coherent mappings and shared resources.
213 */
214 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
215 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
216 prsc->last_level == 0 &&
217 prsc->width0 == box->width &&
218 prsc->height0 == box->height &&
219 prsc->depth0 == box->depth &&
220 prsc->array_size == 1) {
221 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
222 }
223
224 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
225 /* We have a texture resource which is the same age or newer than the
226 * render resource. Use the texture resource, which avoids bouncing
227 * pixels between the two resources, and we can de-tile it in s/w. */
228 rsc = etna_resource(rsc->texture);
229 } else if (rsc->ts_bo ||
230 (rsc->layout != ETNA_LAYOUT_LINEAR &&
231 util_format_get_blocksize(format) > 1 &&
232 /* HALIGN 4 resources are incompatible with the resolve engine,
233 * so fall back to using software to detile this resource. */
234 rsc->halign != TEXTURE_HALIGN_FOUR)) {
235 /* If the surface has tile status, we need to resolve it first.
236 * The strategy we implement here is to use the RS to copy the
237 * depth buffer, filling in the "holes" where the tile status
238 * indicates that it's clear. We also do this for tiled
239 * resources, but only if the RS can blit them. */
240 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
241 slab_free(&ctx->transfer_pool, trans);
242 BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
243 return NULL;
244 }
245
246 if (prsc->depth0 > 1 && rsc->ts_bo) {
247 slab_free(&ctx->transfer_pool, trans);
248 BUG("resource has depth >1 with tile status");
249 return NULL;
250 }
251
252 struct pipe_resource templ = *prsc;
253 templ.nr_samples = 0;
254 templ.bind = PIPE_BIND_RENDER_TARGET;
255
256 trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
257 DRM_FORMAT_MOD_LINEAR, &templ);
258 if (!trans->rsc) {
259 slab_free(&ctx->transfer_pool, trans);
260 return NULL;
261 }
262
263 if (!ctx->specs.use_blt) {
264 /* Need to align the transfer region to satisfy RS restrictions, as we
265 * really want to hit the RS blit path here.
266 */
267 unsigned w_align, h_align;
268
269 if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
270 w_align = 64;
271 h_align = 64 * ctx->screen->specs.pixel_pipes;
272 } else {
273 w_align = ETNA_RS_WIDTH_MASK + 1;
274 h_align = ETNA_RS_HEIGHT_MASK + 1;
275 }
276
277 ptrans->box.width += ptrans->box.x & (w_align - 1);
278 ptrans->box.x = ptrans->box.x & ~(w_align - 1);
279 ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
280 ptrans->box.height += ptrans->box.y & (h_align - 1);
281 ptrans->box.y = ptrans->box.y & ~(h_align - 1);
282 ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
283 }
284
285 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
286 etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
287
288 /* Switch to using the temporary resource instead */
289 rsc = etna_resource(trans->rsc);
290 }
291
292 struct etna_resource_level *res_level = &rsc->levels[level];
293
294 /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
295 * when mapping in-place,
296 * but when not in place we need to fire off the copy operation in
297 * transfer_flush_region (currently
298 * a no-op) instead of unmap. Need to handle this to support
299 * ARB_map_buffer_range extension at least.
300 */
301 /* XXX we don't take care of current operations on the resource; which can
302 be, at some point in the pipeline
303 which is not yet executed:
304
305 - bound as surface
306 - bound through vertex buffer
307 - bound through index buffer
308 - bound in sampler view
309 - used in clear_render_target / clear_depth_stencil operation
310 - used in blit
311 - used in resource_copy_region
312
313 How do other drivers record this information over course of the rendering
314 pipeline?
315 Is it necessary at all? Only in case we want to provide a fast path and
316 map the resource directly
317 (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
318 We also need to know whether the resource is in use to determine if a sync
319 is needed (or just do it
320 always, but that comes at the expense of performance).
321
322 A conservative approximation without too much overhead would be to mark
323 all resources that have
324 been bound at some point as busy. A drawback would be that accessing
325 resources that have
326 been bound but are no longer in use for a while still carry a performance
327 penalty. On the other hand,
328 the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
329 PIPE_TRANSFER_UNSYNCHRONIZED to
330 avoid this in the first place...
331
332 A) We use an in-pipe copy engine, and queue the copy operation after unmap
333 so that the copy
334 will be performed when all current commands have been executed.
335 Using the RS is possible, not sure if always efficient. This can also
336 do any kind of tiling for us.
337 Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
338 B) We discard the entire resource (or at least, the mipmap level) and
339 allocate new memory for it.
340 Only possible when mapping the entire resource or
341 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
342 */
343
344 /*
345 * Pull resources into the CPU domain. Only skipped for unsynchronized
346 * transfers without a temporary resource.
347 */
348 if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
349 struct etna_screen *screen = ctx->screen;
350 uint32_t prep_flags = 0;
351
352 /*
353 * Always flush if we have the temporary resource and have a copy to this
354 * outstanding. Otherwise infer flush requirement from resource access and
355 * current GPU usage (reads must wait for GPU writes, writes must have
356 * exclusive access to the buffer).
357 */
358 mtx_lock(&ctx->lock);
359
360 if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
361 (!trans->rsc &&
362 (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
363 ((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
364 set_foreach(rsc->pending_ctx, entry) {
365 struct etna_context *pend_ctx = (struct etna_context *)entry->key;
366 struct pipe_context *pend_pctx = &pend_ctx->base;
367
368 pend_pctx->flush(pend_pctx, NULL, 0);
369 }
370 }
371
372 mtx_unlock(&ctx->lock);
373
374 if (usage & PIPE_TRANSFER_READ)
375 prep_flags |= DRM_ETNA_PREP_READ;
376 if (usage & PIPE_TRANSFER_WRITE)
377 prep_flags |= DRM_ETNA_PREP_WRITE;
378
379 /*
380 * The ETC2 patching operates in-place on the resource, so the resource will
381 * get written even on read-only transfers. This blocks the GPU to sample
382 * from this resource.
383 */
384 if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
385 prep_flags |= DRM_ETNA_PREP_WRITE;
386
387 if (etna_bo_cpu_prep(rsc->bo, prep_flags))
388 goto fail_prep;
389 }
390
391 /* map buffer object */
392 trans->mapped = etna_bo_map(rsc->bo);
393 if (!trans->mapped)
394 goto fail;
395
396 *out_transfer = ptrans;
397
398 if (rsc->layout == ETNA_LAYOUT_LINEAR) {
399 ptrans->stride = res_level->stride;
400 ptrans->layer_stride = res_level->layer_stride;
401
402 trans->mapped += res_level->offset +
403 etna_compute_offset(prsc->format, box, res_level->stride,
404 res_level->layer_stride);
405
406 /* We need to have the unpatched data ready for the gfx stack. */
407 if (usage & PIPE_TRANSFER_READ)
408 etna_unpatch_data(trans->mapped, ptrans);
409
410 return trans->mapped;
411 } else {
412 unsigned divSizeX = util_format_get_blockwidth(format);
413 unsigned divSizeY = util_format_get_blockheight(format);
414
415 /* No direct mappings of tiled, since we need to manually
416 * tile/untile.
417 */
418 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
419 goto fail;
420
421 trans->mapped += res_level->offset;
422 ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
423 ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
424 size_t size = ptrans->layer_stride * box->depth;
425
426 trans->staging = MALLOC(size);
427 if (!trans->staging)
428 goto fail;
429
430 if (usage & PIPE_TRANSFER_READ) {
431 if (rsc->layout == ETNA_LAYOUT_TILED) {
432 for (unsigned z = 0; z < ptrans->box.depth; z++) {
433 etna_texture_untile(trans->staging + z * ptrans->layer_stride,
434 trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
435 ptrans->box.x, ptrans->box.y, res_level->stride,
436 ptrans->box.width, ptrans->box.height, ptrans->stride,
437 util_format_get_blocksize(rsc->base.format));
438 }
439 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
440 util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
441 ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
442 ptrans->box.width, ptrans->box.height,
443 ptrans->box.depth, trans->mapped, res_level->stride,
444 res_level->layer_stride, ptrans->box.x,
445 ptrans->box.y, ptrans->box.z);
446 } else {
447 /* TODO supertiling */
448 BUG("unsupported tiling %i for reading", rsc->layout);
449 }
450 }
451
452 return trans->staging;
453 }
454
455 fail:
456 etna_bo_cpu_fini(rsc->bo);
457 fail_prep:
458 etna_transfer_unmap(pctx, ptrans);
459 return NULL;
460 }
461
462 static void
463 etna_transfer_flush_region(struct pipe_context *pctx,
464 struct pipe_transfer *transfer,
465 const struct pipe_box *box)
466 {
467 /* NOOP for now */
468 }
469
470 void
471 etna_transfer_init(struct pipe_context *pctx)
472 {
473 pctx->transfer_map = etna_transfer_map;
474 pctx->transfer_flush_region = etna_transfer_flush_region;
475 pctx->transfer_unmap = etna_transfer_unmap;
476 pctx->buffer_subdata = u_default_buffer_subdata;
477 pctx->texture_subdata = u_default_texture_subdata;
478 }