01da393d211f85555db029c0302b9aad32868659
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_transfer.c
1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_screen.h"
32
33 #include "pipe/p_defines.h"
34 #include "pipe/p_format.h"
35 #include "pipe/p_screen.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_inlines.h"
39 #include "util/u_memory.h"
40 #include "util/u_surface.h"
41 #include "util/u_transfer.h"
42
43 #include "hw/common_3d.xml.h"
44
45 #include "drm-uapi/drm_fourcc.h"
46
47 /* Compute offset into a 1D/2D/3D buffer of a certain box.
48 * This box must be aligned to the block width and height of the
49 * underlying format. */
50 static inline size_t
51 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
52 size_t stride, size_t layer_stride)
53 {
54 return box->z * layer_stride +
55 box->y / util_format_get_blockheight(format) * stride +
56 box->x / util_format_get_blockwidth(format) *
57 util_format_get_blocksize(format);
58 }
59
60 static void
61 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
62 {
63 struct etna_context *ctx = etna_context(pctx);
64 struct etna_transfer *trans = etna_transfer(ptrans);
65 struct etna_resource *rsc = etna_resource(ptrans->resource);
66
67 /* XXX
68 * When writing to a resource that is already in use, replace the resource
69 * with a completely new buffer
70 * and free the old one using a fenced free.
71 * The most tricky case to implement will be: tiled or supertiled surface,
72 * partial write, target not aligned to 4/64. */
73 assert(ptrans->level <= rsc->base.last_level);
74
75 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
76 rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
77
78 /*
79 * Temporary resources are always pulled into the CPU domain, must push them
80 * back into GPU domain before the RS execs the blit to the base resource.
81 */
82 if (trans->rsc)
83 etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
84
85 if (ptrans->usage & PIPE_TRANSFER_WRITE) {
86 if (trans->rsc) {
87 /* We have a temporary resource due to either tile status or
88 * tiling format. Write back the updated buffer contents.
89 * FIXME: we need to invalidate the tile status. */
90 etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
91 } else if (trans->staging) {
92 /* map buffer object */
93 struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
94
95 if (rsc->layout == ETNA_LAYOUT_TILED) {
96 etna_texture_tile(
97 trans->mapped + ptrans->box.z * res_level->layer_stride,
98 trans->staging, ptrans->box.x, ptrans->box.y,
99 res_level->stride, ptrans->box.width, ptrans->box.height,
100 ptrans->stride, util_format_get_blocksize(rsc->base.format));
101 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
102 util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
103 res_level->layer_stride, ptrans->box.x,
104 ptrans->box.y, ptrans->box.z, ptrans->box.width,
105 ptrans->box.height, ptrans->box.depth,
106 trans->staging, ptrans->stride,
107 ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
108 } else {
109 BUG("unsupported tiling %i", rsc->layout);
110 }
111
112 FREE(trans->staging);
113 }
114
115 rsc->seqno++;
116
117 if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
118 ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
119 }
120 }
121
122 /*
123 * Transfers without a temporary are only pulled into the CPU domain if they
124 * are not mapped unsynchronized. If they are, must push them back into GPU
125 * domain after CPU access is finished.
126 */
127 if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
128 etna_bo_cpu_fini(rsc->bo);
129
130 pipe_resource_reference(&trans->rsc, NULL);
131 pipe_resource_reference(&ptrans->resource, NULL);
132 slab_free(&ctx->transfer_pool, trans);
133 }
134
135 static void *
136 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
137 unsigned level,
138 unsigned usage,
139 const struct pipe_box *box,
140 struct pipe_transfer **out_transfer)
141 {
142 struct etna_context *ctx = etna_context(pctx);
143 struct etna_resource *rsc = etna_resource(prsc);
144 struct etna_transfer *trans;
145 struct pipe_transfer *ptrans;
146 enum pipe_format format = prsc->format;
147
148 trans = slab_alloc(&ctx->transfer_pool);
149 if (!trans)
150 return NULL;
151
152 /* slab_alloc() doesn't zero */
153 memset(trans, 0, sizeof(*trans));
154
155 ptrans = &trans->base;
156 pipe_resource_reference(&ptrans->resource, prsc);
157 ptrans->level = level;
158 ptrans->usage = usage;
159 ptrans->box = *box;
160
161 assert(level <= prsc->last_level);
162
163 /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
164 * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
165 * check needs to be extended to coherent mappings and shared resources.
166 */
167 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
168 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
169 prsc->last_level == 0 &&
170 prsc->width0 == box->width &&
171 prsc->height0 == box->height &&
172 prsc->depth0 == box->depth &&
173 prsc->array_size == 1) {
174 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
175 }
176
177 if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
178 /* We have a texture resource which is the same age or newer than the
179 * render resource. Use the texture resource, which avoids bouncing
180 * pixels between the two resources, and we can de-tile it in s/w. */
181 rsc = etna_resource(rsc->texture);
182 } else if (rsc->ts_bo ||
183 (rsc->layout != ETNA_LAYOUT_LINEAR &&
184 util_format_get_blocksize(format) > 1 &&
185 /* HALIGN 4 resources are incompatible with the resolve engine,
186 * so fall back to using software to detile this resource. */
187 rsc->halign != TEXTURE_HALIGN_FOUR)) {
188 /* If the surface has tile status, we need to resolve it first.
189 * The strategy we implement here is to use the RS to copy the
190 * depth buffer, filling in the "holes" where the tile status
191 * indicates that it's clear. We also do this for tiled
192 * resources, but only if the RS can blit them. */
193 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
194 slab_free(&ctx->transfer_pool, trans);
195 BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
196 return NULL;
197 }
198
199 if (prsc->depth0 > 1) {
200 slab_free(&ctx->transfer_pool, trans);
201 BUG("resource has depth >1 with tile status");
202 return NULL;
203 }
204
205 struct pipe_resource templ = *prsc;
206 templ.nr_samples = 0;
207 templ.bind = PIPE_BIND_RENDER_TARGET;
208
209 trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
210 ETNA_ADDRESSING_MODE_TILED, DRM_FORMAT_MOD_LINEAR,
211 &templ);
212 if (!trans->rsc) {
213 slab_free(&ctx->transfer_pool, trans);
214 return NULL;
215 }
216
217 if (!ctx->specs.use_blt) {
218 /* Need to align the transfer region to satisfy RS restrictions, as we
219 * really want to hit the RS blit path here.
220 */
221 unsigned w_align, h_align;
222
223 if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
224 w_align = h_align = 64;
225 } else {
226 w_align = ETNA_RS_WIDTH_MASK + 1;
227 h_align = ETNA_RS_HEIGHT_MASK + 1;
228 }
229 h_align *= ctx->screen->specs.pixel_pipes;
230
231 ptrans->box.width += ptrans->box.x & (w_align - 1);
232 ptrans->box.x = ptrans->box.x & ~(w_align - 1);
233 ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
234 ptrans->box.height += ptrans->box.y & (h_align - 1);
235 ptrans->box.y = ptrans->box.y & ~(h_align - 1);
236 ptrans->box.height = align(ptrans->box.height,
237 (ETNA_RS_HEIGHT_MASK + 1) *
238 ctx->screen->specs.pixel_pipes);
239 }
240
241 if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
242 etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
243
244 /* Switch to using the temporary resource instead */
245 rsc = etna_resource(trans->rsc);
246 }
247
248 struct etna_resource_level *res_level = &rsc->levels[level];
249
250 /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
251 * when mapping in-place,
252 * but when not in place we need to fire off the copy operation in
253 * transfer_flush_region (currently
254 * a no-op) instead of unmap. Need to handle this to support
255 * ARB_map_buffer_range extension at least.
256 */
257 /* XXX we don't take care of current operations on the resource; which can
258 be, at some point in the pipeline
259 which is not yet executed:
260
261 - bound as surface
262 - bound through vertex buffer
263 - bound through index buffer
264 - bound in sampler view
265 - used in clear_render_target / clear_depth_stencil operation
266 - used in blit
267 - used in resource_copy_region
268
269 How do other drivers record this information over course of the rendering
270 pipeline?
271 Is it necessary at all? Only in case we want to provide a fast path and
272 map the resource directly
273 (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
274 We also need to know whether the resource is in use to determine if a sync
275 is needed (or just do it
276 always, but that comes at the expense of performance).
277
278 A conservative approximation without too much overhead would be to mark
279 all resources that have
280 been bound at some point as busy. A drawback would be that accessing
281 resources that have
282 been bound but are no longer in use for a while still carry a performance
283 penalty. On the other hand,
284 the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
285 PIPE_TRANSFER_UNSYNCHRONIZED to
286 avoid this in the first place...
287
288 A) We use an in-pipe copy engine, and queue the copy operation after unmap
289 so that the copy
290 will be performed when all current commands have been executed.
291 Using the RS is possible, not sure if always efficient. This can also
292 do any kind of tiling for us.
293 Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
294 B) We discard the entire resource (or at least, the mipmap level) and
295 allocate new memory for it.
296 Only possible when mapping the entire resource or
297 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
298 */
299
300 /*
301 * Pull resources into the CPU domain. Only skipped for unsynchronized
302 * transfers without a temporary resource.
303 */
304 if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
305 uint32_t prep_flags = 0;
306
307 /*
308 * Always flush if we have the temporary resource and have a copy to this
309 * outstanding. Otherwise infer flush requirement from resource access and
310 * current GPU usage (reads must wait for GPU writes, writes must have
311 * exclusive access to the buffer).
312 */
313 if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
314 (!trans->rsc &&
315 (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
316 ((usage & PIPE_TRANSFER_WRITE) && rsc->status))))
317 pctx->flush(pctx, NULL, 0);
318
319 if (usage & PIPE_TRANSFER_READ)
320 prep_flags |= DRM_ETNA_PREP_READ;
321 if (usage & PIPE_TRANSFER_WRITE)
322 prep_flags |= DRM_ETNA_PREP_WRITE;
323
324 if (etna_bo_cpu_prep(rsc->bo, prep_flags))
325 goto fail_prep;
326 }
327
328 /* map buffer object */
329 trans->mapped = etna_bo_map(rsc->bo);
330 if (!trans->mapped)
331 goto fail;
332
333 *out_transfer = ptrans;
334
335 if (rsc->layout == ETNA_LAYOUT_LINEAR) {
336 ptrans->stride = res_level->stride;
337 ptrans->layer_stride = res_level->layer_stride;
338
339 trans->mapped += res_level->offset +
340 etna_compute_offset(prsc->format, box, res_level->stride,
341 res_level->layer_stride);
342
343 return trans->mapped;
344 } else {
345 unsigned divSizeX = util_format_get_blockwidth(format);
346 unsigned divSizeY = util_format_get_blockheight(format);
347
348 /* No direct mappings of tiled, since we need to manually
349 * tile/untile.
350 */
351 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
352 goto fail;
353
354 trans->mapped += res_level->offset;
355 ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
356 ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
357 size_t size = ptrans->layer_stride * box->depth;
358
359 trans->staging = MALLOC(size);
360 if (!trans->staging)
361 goto fail;
362
363 if (usage & PIPE_TRANSFER_READ) {
364 if (rsc->layout == ETNA_LAYOUT_TILED) {
365 etna_texture_untile(trans->staging,
366 trans->mapped + ptrans->box.z * res_level->layer_stride,
367 ptrans->box.x, ptrans->box.y, res_level->stride,
368 ptrans->box.width, ptrans->box.height, ptrans->stride,
369 util_format_get_blocksize(rsc->base.format));
370 } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
371 util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
372 ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
373 ptrans->box.width, ptrans->box.height,
374 ptrans->box.depth, trans->mapped, res_level->stride,
375 res_level->layer_stride, ptrans->box.x,
376 ptrans->box.y, ptrans->box.z);
377 } else {
378 /* TODO supertiling */
379 BUG("unsupported tiling %i for reading", rsc->layout);
380 }
381 }
382
383 return trans->staging;
384 }
385
386 fail:
387 etna_bo_cpu_fini(rsc->bo);
388 fail_prep:
389 etna_transfer_unmap(pctx, ptrans);
390 return NULL;
391 }
392
393 static void
394 etna_transfer_flush_region(struct pipe_context *pctx,
395 struct pipe_transfer *transfer,
396 const struct pipe_box *box)
397 {
398 /* NOOP for now */
399 }
400
401 void
402 etna_transfer_init(struct pipe_context *pctx)
403 {
404 pctx->transfer_map = etna_transfer_map;
405 pctx->transfer_flush_region = etna_transfer_flush_region;
406 pctx->transfer_unmap = etna_transfer_unmap;
407 pctx->buffer_subdata = u_default_buffer_subdata;
408 pctx->texture_subdata = u_default_texture_subdata;
409 }