etnaviv: reduce rs alignment requirement for two pixel pipes GPU
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_transfer.c
1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_etc2.h"
32 #include "etnaviv_screen.h"
33
34 #include "pipe/p_defines.h"
35 #include "pipe/p_format.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38 #include "util/u_format.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_surface.h"
42 #include "util/u_transfer.h"
43
44 #include "hw/common_3d.xml.h"
45
46 #include "drm-uapi/drm_fourcc.h"
47
48 /* Compute offset into a 1D/2D/3D buffer of a certain box.
49 * This box must be aligned to the block width and height of the
50 * underlying format. */
51 static inline size_t
52 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
53 size_t stride, size_t layer_stride)
54 {
55 return box->z * layer_stride +
56 box->y / util_format_get_blockheight(format) * stride +
57 box->x / util_format_get_blockwidth(format) *
58 util_format_get_blocksize(format);
59 }
60
61 static void etna_patch_data(void *buffer, const struct pipe_transfer *ptrans)
62 {
63 struct pipe_resource *prsc = ptrans->resource;
64 struct etna_resource *rsc = etna_resource(prsc);
65 struct etna_resource_level *level = &rsc->levels[ptrans->level];
66
67 if (likely(!etna_etc2_needs_patching(prsc)))
68 return;
69
70 if (level->patched)
71 return;
72
73 /* do have the offsets of blocks to patch? */
74 if (!level->patch_offsets) {
75 level->patch_offsets = CALLOC_STRUCT(util_dynarray);
76
77 etna_etc2_calculate_blocks(buffer, ptrans->stride,
78 ptrans->box.width, ptrans->box.height,
79 prsc->format, level->patch_offsets);
80 }
81
82 etna_etc2_patch(buffer, level->patch_offsets);
83
84 level->patched = true;
85 }
86
87 static void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
88 {
89 struct pipe_resource *prsc = ptrans->resource;
90 struct etna_resource *rsc = etna_resource(prsc);
91 struct etna_resource_level *level = &rsc->levels[ptrans->level];
92
93 if (!level->patched)
94 return;
95
96 etna_etc2_patch(buffer, level->patch_offsets);
97
98 level->patched = false;
99 }
100
101 static void
102 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
103 {
104 struct etna_context *ctx = etna_context(pctx);
105 struct etna_transfer *trans = etna_transfer(ptrans);
106 struct etna_resource *rsc = etna_resource(ptrans->resource);
107
108 /* XXX
109 * When writing to a resource that is already in use, replace the resource
110 * with a completely new buffer
111 * and free the old one using a fenced free.
112 * The most tricky case to implement will be: tiled or supertiled surface,
113 * partial write, target not aligned to 4/64. */
114 assert(ptrans->level <= rsc->base.last_level);
115
116 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
117 rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
118
119 /*
120 * Temporary resources are always pulled into the CPU domain, must push them
121 * back into GPU domain before the RS execs the blit to the base resource.
122 */
123 if (trans->rsc)
124 etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
125
126 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
127 if (trans->rsc) {
128 /* We have a temporary resource due to either tile status or
129 * tiling format. Write back the updated buffer contents.
130 * FIXME: we need to invalidate the tile status. */
131 etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
132 } else if (trans->staging) {
133 /* map buffer object */
134 struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
135
136 if (rsc->layout == ETNA_LAYOUT_TILED) {
137 etna_texture_tile(
138 trans->mapped + ptrans->box.z * res_level->layer_stride,
139 trans->staging, ptrans->box.x, ptrans->box.y,
140 res_level->stride, ptrans->box.width, ptrans->box.height,
141 ptrans->stride, util_format_get_blocksize(rsc->base.format));
142 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
143 util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
144 res_level->layer_stride, ptrans->box.x,
145 ptrans->box.y, ptrans->box.z, ptrans->box.width,
146 ptrans->box.height, ptrans->box.depth,
147 trans->staging, ptrans->stride,
148 ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
149 } else {
150 BUG("unsupported tiling %i", rsc->layout);
151 }
152
153 FREE(trans->staging);
154 }
155
156 rsc->seqno++;
157
158 if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
159 ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
160 }
161 }
162
163 /* We need to have the patched data ready for the GPU. */
164 etna_patch_data(trans->mapped, ptrans);
165
166 /*
167 * Transfers without a temporary are only pulled into the CPU domain if they
168 * are not mapped unsynchronized. If they are, must push them back into GPU
169 * domain after CPU access is finished.
170 */
171 if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
172 etna_bo_cpu_fini(rsc->bo);
173
174 pipe_resource_reference(&trans->rsc, NULL);
175 pipe_resource_reference(&ptrans->resource, NULL);
176 slab_free(&ctx->transfer_pool, trans);
177 }
178
179 static void *
180 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
181 unsigned level,
182 unsigned usage,
183 const struct pipe_box *box,
184 struct pipe_transfer **out_transfer)
185 {
186 struct etna_context *ctx = etna_context(pctx);
187 struct etna_resource *rsc = etna_resource(prsc);
188 struct etna_transfer *trans;
189 struct pipe_transfer *ptrans;
190 enum pipe_format format = prsc->format;
191
192 trans = slab_alloc(&ctx->transfer_pool);
193 if (!trans)
194 return NULL;
195
196 /* slab_alloc() doesn't zero */
197 memset(trans, 0, sizeof(*trans));
198
199 ptrans = &trans->base;
200 pipe_resource_reference(&ptrans->resource, prsc);
201 ptrans->level = level;
202 ptrans->usage = usage;
203 ptrans->box = *box;
204
205 assert(level <= prsc->last_level);
206
207 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
208 * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
209 * check needs to be extended to coherent mappings and shared resources.
210 */
211 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
212 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
213 prsc->last_level == 0 &&
214 prsc->width0 == box->width &&
215 prsc->height0 == box->height &&
216 prsc->depth0 == box->depth &&
217 prsc->array_size == 1) {
218 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
219 }
220
221 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
222 /* We have a texture resource which is the same age or newer than the
223 * render resource. Use the texture resource, which avoids bouncing
224 * pixels between the two resources, and we can de-tile it in s/w. */
225 rsc = etna_resource(rsc->texture);
226 } else if (rsc->ts_bo ||
227 (rsc->layout != ETNA_LAYOUT_LINEAR &&
228 util_format_get_blocksize(format) > 1 &&
229 /* HALIGN 4 resources are incompatible with the resolve engine,
230 * so fall back to using software to detile this resource. */
231 rsc->halign != TEXTURE_HALIGN_FOUR)) {
232 /* If the surface has tile status, we need to resolve it first.
233 * The strategy we implement here is to use the RS to copy the
234 * depth buffer, filling in the "holes" where the tile status
235 * indicates that it's clear. We also do this for tiled
236 * resources, but only if the RS can blit them. */
237 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
238 slab_free(&ctx->transfer_pool, trans);
239 BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
240 return NULL;
241 }
242
243 if (prsc->depth0 > 1) {
244 slab_free(&ctx->transfer_pool, trans);
245 BUG("resource has depth >1 with tile status");
246 return NULL;
247 }
248
249 struct pipe_resource templ = *prsc;
250 templ.nr_samples = 0;
251 templ.bind = PIPE_BIND_RENDER_TARGET;
252
253 trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
254 ETNA_ADDRESSING_MODE_TILED, DRM_FORMAT_MOD_LINEAR,
255 &templ);
256 if (!trans->rsc) {
257 slab_free(&ctx->transfer_pool, trans);
258 return NULL;
259 }
260
261 if (!ctx->specs.use_blt) {
262 /* Need to align the transfer region to satisfy RS restrictions, as we
263 * really want to hit the RS blit path here.
264 */
265 unsigned w_align, h_align;
266
267 if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
268 w_align = 64;
269 h_align = 64 * ctx->screen->specs.pixel_pipes;
270 } else {
271 w_align = ETNA_RS_WIDTH_MASK + 1;
272 h_align = ETNA_RS_HEIGHT_MASK + 1;
273 }
274
275 ptrans->box.width += ptrans->box.x & (w_align - 1);
276 ptrans->box.x = ptrans->box.x & ~(w_align - 1);
277 ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
278 ptrans->box.height += ptrans->box.y & (h_align - 1);
279 ptrans->box.y = ptrans->box.y & ~(h_align - 1);
280 ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
281 }
282
283 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
284 etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
285
286 /* Switch to using the temporary resource instead */
287 rsc = etna_resource(trans->rsc);
288 }
289
290 struct etna_resource_level *res_level = &rsc->levels[level];
291
292 /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
293 * when mapping in-place,
294 * but when not in place we need to fire off the copy operation in
295 * transfer_flush_region (currently
296 * a no-op) instead of unmap. Need to handle this to support
297 * ARB_map_buffer_range extension at least.
298 */
299 /* XXX we don't take care of current operations on the resource; which can
300 be, at some point in the pipeline
301 which is not yet executed:
302
303 - bound as surface
304 - bound through vertex buffer
305 - bound through index buffer
306 - bound in sampler view
307 - used in clear_render_target / clear_depth_stencil operation
308 - used in blit
309 - used in resource_copy_region
310
311 How do other drivers record this information over course of the rendering
312 pipeline?
313 Is it necessary at all? Only in case we want to provide a fast path and
314 map the resource directly
315 (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
316 We also need to know whether the resource is in use to determine if a sync
317 is needed (or just do it
318 always, but that comes at the expense of performance).
319
320 A conservative approximation without too much overhead would be to mark
321 all resources that have
322 been bound at some point as busy. A drawback would be that accessing
323 resources that have
324 been bound but are no longer in use for a while still carry a performance
325 penalty. On the other hand,
326 the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
327 PIPE_TRANSFER_UNSYNCHRONIZED to
328 avoid this in the first place...
329
330 A) We use an in-pipe copy engine, and queue the copy operation after unmap
331 so that the copy
332 will be performed when all current commands have been executed.
333 Using the RS is possible, not sure if always efficient. This can also
334 do any kind of tiling for us.
335 Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
336 B) We discard the entire resource (or at least, the mipmap level) and
337 allocate new memory for it.
338 Only possible when mapping the entire resource or
339 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
340 */
341
342 /*
343 * Pull resources into the CPU domain. Only skipped for unsynchronized
344 * transfers without a temporary resource.
345 */
346 if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
347 struct etna_screen *screen = ctx->screen;
348 uint32_t prep_flags = 0;
349
350 /*
351 * Always flush if we have the temporary resource and have a copy to this
352 * outstanding. Otherwise infer flush requirement from resource access and
353 * current GPU usage (reads must wait for GPU writes, writes must have
354 * exclusive access to the buffer).
355 */
356 mtx_lock(&screen->lock);
357
358 if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
359 (!trans->rsc &&
360 (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
361 ((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
362 set_foreach(rsc->pending_ctx, entry) {
363 struct etna_context *pend_ctx = (struct etna_context *)entry->key;
364 struct pipe_context *pend_pctx = &pend_ctx->base;
365
366 pend_pctx->flush(pend_pctx, NULL, 0);
367 }
368 }
369
370 mtx_unlock(&screen->lock);
371
372 if (usage & PIPE_TRANSFER_READ)
373 prep_flags |= DRM_ETNA_PREP_READ;
374 if (usage & PIPE_TRANSFER_WRITE)
375 prep_flags |= DRM_ETNA_PREP_WRITE;
376
377 /*
378 * The ETC2 patching operates in-place on the resource, so the resource will
379 * get written even on read-only transfers. This blocks the GPU to sample
380 * from this resource.
381 */
382 if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
383 prep_flags |= DRM_ETNA_PREP_WRITE;
384
385 if (etna_bo_cpu_prep(rsc->bo, prep_flags))
386 goto fail_prep;
387 }
388
389 /* map buffer object */
390 trans->mapped = etna_bo_map(rsc->bo);
391 if (!trans->mapped)
392 goto fail;
393
394 *out_transfer = ptrans;
395
396 if (rsc->layout == ETNA_LAYOUT_LINEAR) {
397 ptrans->stride = res_level->stride;
398 ptrans->layer_stride = res_level->layer_stride;
399
400 trans->mapped += res_level->offset +
401 etna_compute_offset(prsc->format, box, res_level->stride,
402 res_level->layer_stride);
403
404 /* We need to have the unpatched data ready for the gfx stack. */
405 if (usage & PIPE_TRANSFER_READ)
406 etna_unpatch_data(trans->mapped, ptrans);
407
408 return trans->mapped;
409 } else {
410 unsigned divSizeX = util_format_get_blockwidth(format);
411 unsigned divSizeY = util_format_get_blockheight(format);
412
413 /* No direct mappings of tiled, since we need to manually
414 * tile/untile.
415 */
416 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
417 goto fail;
418
419 trans->mapped += res_level->offset;
420 ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
421 ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
422 size_t size = ptrans->layer_stride * box->depth;
423
424 trans->staging = MALLOC(size);
425 if (!trans->staging)
426 goto fail;
427
428 if (usage & PIPE_TRANSFER_READ) {
429 if (rsc->layout == ETNA_LAYOUT_TILED) {
430 etna_texture_untile(trans->staging,
431 trans->mapped + ptrans->box.z * res_level->layer_stride,
432 ptrans->box.x, ptrans->box.y, res_level->stride,
433 ptrans->box.width, ptrans->box.height, ptrans->stride,
434 util_format_get_blocksize(rsc->base.format));
435 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
436 util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
437 ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
438 ptrans->box.width, ptrans->box.height,
439 ptrans->box.depth, trans->mapped, res_level->stride,
440 res_level->layer_stride, ptrans->box.x,
441 ptrans->box.y, ptrans->box.z);
442 } else {
443 /* TODO supertiling */
444 BUG("unsupported tiling %i for reading", rsc->layout);
445 }
446 }
447
448 return trans->staging;
449 }
450
451 fail:
452 etna_bo_cpu_fini(rsc->bo);
453 fail_prep:
454 etna_transfer_unmap(pctx, ptrans);
455 return NULL;
456 }
457
458 static void
459 etna_transfer_flush_region(struct pipe_context *pctx,
460 struct pipe_transfer *transfer,
461 const struct pipe_box *box)
462 {
463 /* NOOP for now */
464 }
465
466 void
467 etna_transfer_init(struct pipe_context *pctx)
468 {
469 pctx->transfer_map = etna_transfer_map;
470 pctx->transfer_flush_region = etna_transfer_flush_region;
471 pctx->transfer_unmap = etna_transfer_unmap;
472 pctx->buffer_subdata = u_default_buffer_subdata;
473 pctx->texture_subdata = u_default_texture_subdata;
474 }