77a9abafa99dd7900d026af7a77e4c6b040bcdda
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_transfer.c
1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_etc2.h"
32 #include "etnaviv_screen.h"
33
34 #include "pipe/p_defines.h"
35 #include "pipe/p_format.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38 #include "util/u_format.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_surface.h"
42 #include "util/u_transfer.h"
43
44 #include "hw/common_3d.xml.h"
45
46 #include "drm-uapi/drm_fourcc.h"
47
48 /* Compute offset into a 1D/2D/3D buffer of a certain box.
49 * This box must be aligned to the block width and height of the
50 * underlying format. */
51 static inline size_t
52 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
53 size_t stride, size_t layer_stride)
54 {
55 return box->z * layer_stride +
56 box->y / util_format_get_blockheight(format) * stride +
57 box->x / util_format_get_blockwidth(format) *
58 util_format_get_blocksize(format);
59 }
60
61 static void etna_patch_data(void *buffer, const struct pipe_transfer *ptrans)
62 {
63 struct pipe_resource *prsc = ptrans->resource;
64 struct etna_resource *rsc = etna_resource(prsc);
65 struct etna_resource_level *level = &rsc->levels[ptrans->level];
66
67 if (likely(!etna_etc2_needs_patching(prsc)))
68 return;
69
70 if (level->patched)
71 return;
72
73 /* do have the offsets of blocks to patch? */
74 if (!level->patch_offsets) {
75 level->patch_offsets = CALLOC_STRUCT(util_dynarray);
76
77 etna_etc2_calculate_blocks(buffer, ptrans->stride,
78 ptrans->box.width, ptrans->box.height,
79 prsc->format, level->patch_offsets);
80 }
81
82 etna_etc2_patch(buffer, level->patch_offsets);
83
84 level->patched = true;
85 }
86
87 static void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
88 {
89 struct pipe_resource *prsc = ptrans->resource;
90 struct etna_resource *rsc = etna_resource(prsc);
91 struct etna_resource_level *level = &rsc->levels[ptrans->level];
92
93 if (!level->patched)
94 return;
95
96 etna_etc2_patch(buffer, level->patch_offsets);
97
98 level->patched = false;
99 }
100
101 static void
102 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
103 {
104 struct etna_context *ctx = etna_context(pctx);
105 struct etna_transfer *trans = etna_transfer(ptrans);
106 struct etna_resource *rsc = etna_resource(ptrans->resource);
107
108 /* XXX
109 * When writing to a resource that is already in use, replace the resource
110 * with a completely new buffer
111 * and free the old one using a fenced free.
112 * The most tricky case to implement will be: tiled or supertiled surface,
113 * partial write, target not aligned to 4/64. */
114 assert(ptrans->level <= rsc->base.last_level);
115
116 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
117 rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
118
119 /*
120 * Temporary resources are always pulled into the CPU domain, must push them
121 * back into GPU domain before the RS execs the blit to the base resource.
122 */
123 if (trans->rsc)
124 etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
125
126 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
127 if (trans->rsc) {
128 /* We have a temporary resource due to either tile status or
129 * tiling format. Write back the updated buffer contents.
130 * FIXME: we need to invalidate the tile status. */
131 etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
132 } else if (trans->staging) {
133 /* map buffer object */
134 struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
135
136 if (rsc->layout == ETNA_LAYOUT_TILED) {
137 for (unsigned z = 0; z < ptrans->box.depth; z++) {
138 etna_texture_tile(
139 trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
140 trans->staging + z * ptrans->layer_stride,
141 ptrans->box.x, ptrans->box.y,
142 res_level->stride, ptrans->box.width, ptrans->box.height,
143 ptrans->stride, util_format_get_blocksize(rsc->base.format));
144 }
145 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
146 util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
147 res_level->layer_stride, ptrans->box.x,
148 ptrans->box.y, ptrans->box.z, ptrans->box.width,
149 ptrans->box.height, ptrans->box.depth,
150 trans->staging, ptrans->stride,
151 ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
152 } else {
153 BUG("unsupported tiling %i", rsc->layout);
154 }
155
156 FREE(trans->staging);
157 }
158
159 rsc->seqno++;
160
161 if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
162 ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
163 }
164 }
165
166 /* We need to have the patched data ready for the GPU. */
167 etna_patch_data(trans->mapped, ptrans);
168
169 /*
170 * Transfers without a temporary are only pulled into the CPU domain if they
171 * are not mapped unsynchronized. If they are, must push them back into GPU
172 * domain after CPU access is finished.
173 */
174 if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
175 etna_bo_cpu_fini(rsc->bo);
176
177 if ((ptrans->resource->target == PIPE_BUFFER) &&
178 (ptrans->usage & PIPE_TRANSFER_WRITE)) {
179 util_range_add(&rsc->base,
180 &rsc->valid_buffer_range,
181 ptrans->box.x,
182 ptrans->box.x + ptrans->box.width);
183 }
184
185 pipe_resource_reference(&trans->rsc, NULL);
186 pipe_resource_reference(&ptrans->resource, NULL);
187 slab_free(&ctx->transfer_pool, trans);
188 }
189
190 static void *
191 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
192 unsigned level,
193 unsigned usage,
194 const struct pipe_box *box,
195 struct pipe_transfer **out_transfer)
196 {
197 struct etna_context *ctx = etna_context(pctx);
198 struct etna_resource *rsc = etna_resource(prsc);
199 struct etna_transfer *trans;
200 struct pipe_transfer *ptrans;
201 enum pipe_format format = prsc->format;
202
203 trans = slab_alloc(&ctx->transfer_pool);
204 if (!trans)
205 return NULL;
206
207 /* slab_alloc() doesn't zero */
208 memset(trans, 0, sizeof(*trans));
209
210 /*
211 * Upgrade to UNSYNCHRONIZED if target is PIPE_BUFFER and range is uninitialized.
212 */
213 if ((usage & PIPE_TRANSFER_WRITE) &&
214 (prsc->target == PIPE_BUFFER) &&
215 !util_ranges_intersect(&rsc->valid_buffer_range,
216 box->x,
217 box->x + box->width)) {
218 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
219 }
220
221 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
222 * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
223 * check needs to be extended to coherent mappings and shared resources.
224 */
225 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
226 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
227 prsc->last_level == 0 &&
228 prsc->width0 == box->width &&
229 prsc->height0 == box->height &&
230 prsc->depth0 == box->depth &&
231 prsc->array_size == 1) {
232 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
233 }
234
235 ptrans = &trans->base;
236 pipe_resource_reference(&ptrans->resource, prsc);
237 ptrans->level = level;
238 ptrans->usage = usage;
239 ptrans->box = *box;
240
241 assert(level <= prsc->last_level);
242
243 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
244 /* We have a texture resource which is the same age or newer than the
245 * render resource. Use the texture resource, which avoids bouncing
246 * pixels between the two resources, and we can de-tile it in s/w. */
247 rsc = etna_resource(rsc->texture);
248 } else if (rsc->ts_bo ||
249 (rsc->layout != ETNA_LAYOUT_LINEAR &&
250 util_format_get_blocksize(format) > 1 &&
251 /* HALIGN 4 resources are incompatible with the resolve engine,
252 * so fall back to using software to detile this resource. */
253 rsc->halign != TEXTURE_HALIGN_FOUR)) {
254 /* If the surface has tile status, we need to resolve it first.
255 * The strategy we implement here is to use the RS to copy the
256 * depth buffer, filling in the "holes" where the tile status
257 * indicates that it's clear. We also do this for tiled
258 * resources, but only if the RS can blit them. */
259 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
260 slab_free(&ctx->transfer_pool, trans);
261 BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
262 return NULL;
263 }
264
265 if (prsc->depth0 > 1 && rsc->ts_bo) {
266 slab_free(&ctx->transfer_pool, trans);
267 BUG("resource has depth >1 with tile status");
268 return NULL;
269 }
270
271 struct pipe_resource templ = *prsc;
272 templ.nr_samples = 0;
273 templ.bind = PIPE_BIND_RENDER_TARGET;
274
275 trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
276 DRM_FORMAT_MOD_LINEAR, &templ);
277 if (!trans->rsc) {
278 slab_free(&ctx->transfer_pool, trans);
279 return NULL;
280 }
281
282 if (!ctx->specs.use_blt) {
283 /* Need to align the transfer region to satisfy RS restrictions, as we
284 * really want to hit the RS blit path here.
285 */
286 unsigned w_align, h_align;
287
288 if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
289 w_align = 64;
290 h_align = 64 * ctx->screen->specs.pixel_pipes;
291 } else {
292 w_align = ETNA_RS_WIDTH_MASK + 1;
293 h_align = ETNA_RS_HEIGHT_MASK + 1;
294 }
295
296 ptrans->box.width += ptrans->box.x & (w_align - 1);
297 ptrans->box.x = ptrans->box.x & ~(w_align - 1);
298 ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
299 ptrans->box.height += ptrans->box.y & (h_align - 1);
300 ptrans->box.y = ptrans->box.y & ~(h_align - 1);
301 ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
302 }
303
304 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
305 etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
306
307 /* Switch to using the temporary resource instead */
308 rsc = etna_resource(trans->rsc);
309 }
310
311 struct etna_resource_level *res_level = &rsc->levels[level];
312
313 /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
314 * when mapping in-place,
315 * but when not in place we need to fire off the copy operation in
316 * transfer_flush_region (currently
317 * a no-op) instead of unmap. Need to handle this to support
318 * ARB_map_buffer_range extension at least.
319 */
320 /* XXX we don't take care of current operations on the resource; which can
321 be, at some point in the pipeline
322 which is not yet executed:
323
324 - bound as surface
325 - bound through vertex buffer
326 - bound through index buffer
327 - bound in sampler view
328 - used in clear_render_target / clear_depth_stencil operation
329 - used in blit
330 - used in resource_copy_region
331
332 How do other drivers record this information over course of the rendering
333 pipeline?
334 Is it necessary at all? Only in case we want to provide a fast path and
335 map the resource directly
336 (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
337 We also need to know whether the resource is in use to determine if a sync
338 is needed (or just do it
339 always, but that comes at the expense of performance).
340
341 A conservative approximation without too much overhead would be to mark
342 all resources that have
343 been bound at some point as busy. A drawback would be that accessing
344 resources that have
345 been bound but are no longer in use for a while still carry a performance
346 penalty. On the other hand,
347 the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
348 PIPE_TRANSFER_UNSYNCHRONIZED to
349 avoid this in the first place...
350
351 A) We use an in-pipe copy engine, and queue the copy operation after unmap
352 so that the copy
353 will be performed when all current commands have been executed.
354 Using the RS is possible, not sure if always efficient. This can also
355 do any kind of tiling for us.
356 Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
357 B) We discard the entire resource (or at least, the mipmap level) and
358 allocate new memory for it.
359 Only possible when mapping the entire resource or
360 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
361 */
362
363 /*
364 * Pull resources into the CPU domain. Only skipped for unsynchronized
365 * transfers without a temporary resource.
366 */
367 if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
368 uint32_t prep_flags = 0;
369
370 /*
371 * Always flush if we have the temporary resource and have a copy to this
372 * outstanding. Otherwise infer flush requirement from resource access and
373 * current GPU usage (reads must wait for GPU writes, writes must have
374 * exclusive access to the buffer).
375 */
376 mtx_lock(&ctx->lock);
377
378 if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
379 (!trans->rsc &&
380 (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
381 ((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
382 set_foreach(rsc->pending_ctx, entry) {
383 struct etna_context *pend_ctx = (struct etna_context *)entry->key;
384 struct pipe_context *pend_pctx = &pend_ctx->base;
385
386 pend_pctx->flush(pend_pctx, NULL, 0);
387 }
388 }
389
390 mtx_unlock(&ctx->lock);
391
392 if (usage & PIPE_TRANSFER_READ)
393 prep_flags |= DRM_ETNA_PREP_READ;
394 if (usage & PIPE_TRANSFER_WRITE)
395 prep_flags |= DRM_ETNA_PREP_WRITE;
396
397 /*
398 * The ETC2 patching operates in-place on the resource, so the resource will
399 * get written even on read-only transfers. This blocks the GPU to sample
400 * from this resource.
401 */
402 if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
403 prep_flags |= DRM_ETNA_PREP_WRITE;
404
405 if (etna_bo_cpu_prep(rsc->bo, prep_flags))
406 goto fail_prep;
407 }
408
409 /* map buffer object */
410 trans->mapped = etna_bo_map(rsc->bo);
411 if (!trans->mapped)
412 goto fail;
413
414 *out_transfer = ptrans;
415
416 if (rsc->layout == ETNA_LAYOUT_LINEAR) {
417 ptrans->stride = res_level->stride;
418 ptrans->layer_stride = res_level->layer_stride;
419
420 trans->mapped += res_level->offset +
421 etna_compute_offset(prsc->format, box, res_level->stride,
422 res_level->layer_stride);
423
424 /* We need to have the unpatched data ready for the gfx stack. */
425 if (usage & PIPE_TRANSFER_READ)
426 etna_unpatch_data(trans->mapped, ptrans);
427
428 return trans->mapped;
429 } else {
430 unsigned divSizeX = util_format_get_blockwidth(format);
431 unsigned divSizeY = util_format_get_blockheight(format);
432
433 /* No direct mappings of tiled, since we need to manually
434 * tile/untile.
435 */
436 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
437 goto fail;
438
439 trans->mapped += res_level->offset;
440 ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
441 ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
442 size_t size = ptrans->layer_stride * box->depth;
443
444 trans->staging = MALLOC(size);
445 if (!trans->staging)
446 goto fail;
447
448 if (usage & PIPE_TRANSFER_READ) {
449 if (rsc->layout == ETNA_LAYOUT_TILED) {
450 for (unsigned z = 0; z < ptrans->box.depth; z++) {
451 etna_texture_untile(trans->staging + z * ptrans->layer_stride,
452 trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
453 ptrans->box.x, ptrans->box.y, res_level->stride,
454 ptrans->box.width, ptrans->box.height, ptrans->stride,
455 util_format_get_blocksize(rsc->base.format));
456 }
457 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
458 util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
459 ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
460 ptrans->box.width, ptrans->box.height,
461 ptrans->box.depth, trans->mapped, res_level->stride,
462 res_level->layer_stride, ptrans->box.x,
463 ptrans->box.y, ptrans->box.z);
464 } else {
465 /* TODO supertiling */
466 BUG("unsupported tiling %i for reading", rsc->layout);
467 }
468 }
469
470 return trans->staging;
471 }
472
473 fail:
474 etna_bo_cpu_fini(rsc->bo);
475 fail_prep:
476 etna_transfer_unmap(pctx, ptrans);
477 return NULL;
478 }
479
480 static void
481 etna_transfer_flush_region(struct pipe_context *pctx,
482 struct pipe_transfer *ptrans,
483 const struct pipe_box *box)
484 {
485 struct etna_resource *rsc = etna_resource(ptrans->resource);
486
487 if (ptrans->resource->target == PIPE_BUFFER)
488 util_range_add(&rsc->base,
489 &rsc->valid_buffer_range,
490 ptrans->box.x + box->x,
491 ptrans->box.x + box->x + box->width);
492 }
493
494 void
495 etna_transfer_init(struct pipe_context *pctx)
496 {
497 pctx->transfer_map = etna_transfer_map;
498 pctx->transfer_flush_region = etna_transfer_flush_region;
499 pctx->transfer_unmap = etna_transfer_unmap;
500 pctx->buffer_subdata = u_default_buffer_subdata;
501 pctx->texture_subdata = u_default_texture_subdata;
502 }