virgl: fix readback with pending transfers
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 /* We need to flush to properly sync the transfer with the current cmdbuf.
31 * But there are cases where the flushing can be skipped:
32 *
33 * - synchronization is disabled
34 * - the resource is not referenced by the current cmdbuf
35 */
36 static bool virgl_res_needs_flush(struct virgl_context *vctx,
37 struct virgl_transfer *trans)
38 {
39 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
40 struct virgl_resource *res = virgl_resource(trans->base.resource);
41
42 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
43 return false;
44
45 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
46 return false;
47
48 return true;
49 }
50
51 /* We need to read back from the host storage to make sure the guest storage
52 * is up-to-date. But there are cases where the readback can be skipped:
53 *
54 * - the content can be discarded
55 * - the host storage is read-only
56 *
57 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
58 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
59 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
60 */
61 static bool virgl_res_needs_readback(struct virgl_context *vctx,
62 struct virgl_resource *res,
63 unsigned usage, unsigned level)
64 {
65 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
66 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
67 return false;
68
69 if (res->clean_mask & (1 << level))
70 return false;
71
72 return true;
73 }
74
75 enum virgl_transfer_map_type
76 virgl_resource_transfer_prepare(struct virgl_context *vctx,
77 struct virgl_transfer *xfer)
78 {
79 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
80 struct virgl_resource *res = virgl_resource(xfer->base.resource);
81 enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
82 bool flush;
83 bool readback;
84 bool wait;
85
86 /* there is no way to map the host storage currently */
87 if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
88 return VIRGL_TRANSFER_MAP_ERROR;
89
90 /* We break the logic down into four steps
91 *
92 * step 1: determine the required operations independently
93 * step 2: look for chances to skip the operations
94 * step 3: resolve dependencies between the operations
95 * step 4: execute the operations
96 */
97
98 flush = virgl_res_needs_flush(vctx, xfer);
99 readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
100 xfer->base.level);
101 /* We need to wait for all cmdbufs, current or previous, that access the
102 * resource to finish unless synchronization is disabled.
103 */
104 wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED);
105
106 /* When the transfer range consists of only uninitialized data, we can
107 * assume the GPU is not accessing the range and readback is unnecessary.
108 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
109 * PIPE_TRANSFER_DISCARD_RANGE are set.
110 */
111 if (res->u.b.target == PIPE_BUFFER &&
112 !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
113 xfer->base.box.x + xfer->base.box.width)) {
114 flush = false;
115 readback = false;
116 wait = false;
117 }
118
119 /* readback has some implications */
120 if (readback) {
121 /* Readback is yet another command and is transparent to the state
122 * trackers. It should be waited for in all cases, including when
123 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
124 */
125 wait = true;
126
127 /* When the transfer queue has pending writes to this transfer's region,
128 * we have to flush before readback.
129 */
130 if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
131 flush = true;
132 }
133
134 /* XXX This is incorrect and will be removed. Consider
135 *
136 * glTexImage2D(..., data1);
137 * glDrawArrays();
138 * glFlush();
139 * glTexImage2D(..., data2);
140 *
141 * readback and flush are both false in the second glTexImage2D call. The
142 * draw call might end up seeing data2. Same applies to buffers with
143 * glBufferSubData.
144 */
145 wait = flush || readback;
146
147 if (flush)
148 vctx->base.flush(&vctx->base, NULL, 0);
149
150 if (readback) {
151 vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
152 xfer->l_stride, xfer->offset, xfer->base.level);
153 }
154
155 if (wait) {
156 /* fail the mapping after flush and readback so that it will succeed in
157 * the future
158 */
159 if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
160 vws->resource_is_busy(vws, res->hw_res))
161 return VIRGL_TRANSFER_MAP_ERROR;
162
163 vws->resource_wait(vws, res->hw_res);
164 }
165
166 return map_type;
167 }
168
169 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
170 const struct pipe_resource *templ)
171 {
172 unsigned vbind;
173 struct virgl_screen *vs = virgl_screen(screen);
174 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
175
176 res->u.b = *templ;
177 res->u.b.screen = &vs->base;
178 pipe_reference_init(&res->u.b.reference, 1);
179 vbind = pipe_to_virgl_bind(vs, templ->bind);
180 virgl_resource_layout(&res->u.b, &res->metadata);
181 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
182 templ->format, vbind,
183 templ->width0,
184 templ->height0,
185 templ->depth0,
186 templ->array_size,
187 templ->last_level,
188 templ->nr_samples,
189 res->metadata.total_size);
190 if (!res->hw_res) {
191 FREE(res);
192 return NULL;
193 }
194
195 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
196
197 if (templ->target == PIPE_BUFFER) {
198 util_range_init(&res->valid_buffer_range);
199 virgl_buffer_init(res);
200 } else {
201 virgl_texture_init(res);
202 }
203
204 return &res->u.b;
205
206 }
207
208 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
209 const struct pipe_resource *templ,
210 struct winsys_handle *whandle,
211 unsigned usage)
212 {
213 struct virgl_screen *vs = virgl_screen(screen);
214 if (templ->target == PIPE_BUFFER)
215 return NULL;
216
217 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
218 res->u.b = *templ;
219 res->u.b.screen = &vs->base;
220 pipe_reference_init(&res->u.b.reference, 1);
221
222 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
223 if (!res->hw_res) {
224 FREE(res);
225 return NULL;
226 }
227
228 virgl_texture_init(res);
229
230 return &res->u.b;
231 }
232
233 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
234 {
235 screen->resource_create = virgl_resource_create;
236 screen->resource_from_handle = virgl_resource_from_handle;
237 screen->resource_get_handle = u_resource_get_handle_vtbl;
238 screen->resource_destroy = u_resource_destroy_vtbl;
239 }
240
241 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
242 struct pipe_resource *resource,
243 unsigned usage,
244 const struct pipe_box *box,
245 const void *data)
246 {
247 struct virgl_context *vctx = virgl_context(ctx);
248 struct virgl_resource *vbuf = virgl_resource(resource);
249 struct virgl_transfer dummy_trans = { 0 };
250 bool flush;
251 struct virgl_transfer *queued;
252
253 /*
254 * Attempts to short circuit the entire process of mapping and unmapping
255 * a resource if there is an existing transfer that can be extended.
256 * Pessimestically falls back if a flush is required.
257 */
258 dummy_trans.base.resource = resource;
259 dummy_trans.base.usage = usage;
260 dummy_trans.base.box = *box;
261 dummy_trans.base.stride = vbuf->metadata.stride[0];
262 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
263 dummy_trans.offset = box->x;
264
265 flush = virgl_res_needs_flush(vctx, &dummy_trans);
266 if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
267 box->x, box->x + box->width))
268 return false;
269
270 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
271 if (!queued || !queued->hw_res_map)
272 return false;
273
274 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
275 util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
276
277 return true;
278 }
279
280 static void virgl_buffer_subdata(struct pipe_context *pipe,
281 struct pipe_resource *resource,
282 unsigned usage, unsigned offset,
283 unsigned size, const void *data)
284 {
285 struct pipe_transfer *transfer;
286 uint8_t *map;
287 struct pipe_box box;
288
289 assert(!(usage & PIPE_TRANSFER_READ));
290
291 /* the write flag is implicit by the nature of buffer_subdata */
292 usage |= PIPE_TRANSFER_WRITE;
293
294 if (offset == 0 && size == resource->width0)
295 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
296 else
297 usage |= PIPE_TRANSFER_DISCARD_RANGE;
298
299 u_box_1d(offset, size, &box);
300
301 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
302 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
303 return;
304
305 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
306 if (map) {
307 memcpy(map, data, size);
308 pipe_transfer_unmap(pipe, transfer);
309 }
310 }
311
312 void virgl_init_context_resource_functions(struct pipe_context *ctx)
313 {
314 ctx->transfer_map = u_transfer_map_vtbl;
315 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
316 ctx->transfer_unmap = u_transfer_unmap_vtbl;
317 ctx->buffer_subdata = virgl_buffer_subdata;
318 ctx->texture_subdata = u_default_texture_subdata;
319 }
320
321 void virgl_resource_layout(struct pipe_resource *pt,
322 struct virgl_resource_metadata *metadata)
323 {
324 unsigned level, nblocksy;
325 unsigned width = pt->width0;
326 unsigned height = pt->height0;
327 unsigned depth = pt->depth0;
328 unsigned buffer_size = 0;
329
330 for (level = 0; level <= pt->last_level; level++) {
331 unsigned slices;
332
333 if (pt->target == PIPE_TEXTURE_CUBE)
334 slices = 6;
335 else if (pt->target == PIPE_TEXTURE_3D)
336 slices = depth;
337 else
338 slices = pt->array_size;
339
340 nblocksy = util_format_get_nblocksy(pt->format, height);
341 metadata->stride[level] = util_format_get_stride(pt->format, width);
342 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
343 metadata->level_offset[level] = buffer_size;
344
345 buffer_size += slices * metadata->layer_stride[level];
346
347 width = u_minify(width, 1);
348 height = u_minify(height, 1);
349 depth = u_minify(depth, 1);
350 }
351
352 if (pt->nr_samples <= 1)
353 metadata->total_size = buffer_size;
354 else /* don't create guest backing store for MSAA */
355 metadata->total_size = 0;
356 }
357
358 struct virgl_transfer *
359 virgl_resource_create_transfer(struct slab_child_pool *pool,
360 struct pipe_resource *pres,
361 const struct virgl_resource_metadata *metadata,
362 unsigned level, unsigned usage,
363 const struct pipe_box *box)
364 {
365 struct virgl_transfer *trans;
366 enum pipe_format format = pres->format;
367 const unsigned blocksy = box->y / util_format_get_blockheight(format);
368 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
369
370 unsigned offset = metadata->level_offset[level];
371 if (pres->target == PIPE_TEXTURE_CUBE ||
372 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
373 pres->target == PIPE_TEXTURE_3D ||
374 pres->target == PIPE_TEXTURE_2D_ARRAY) {
375 offset += box->z * metadata->layer_stride[level];
376 }
377 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
378 offset += box->z * metadata->stride[level];
379 assert(box->y == 0);
380 } else if (pres->target == PIPE_BUFFER) {
381 assert(box->y == 0 && box->z == 0);
382 } else {
383 assert(box->z == 0);
384 }
385
386 offset += blocksy * metadata->stride[level];
387 offset += blocksx * util_format_get_blocksize(format);
388
389 trans = slab_alloc(pool);
390 if (!trans)
391 return NULL;
392
393 trans->base.resource = pres;
394 trans->base.level = level;
395 trans->base.usage = usage;
396 trans->base.box = *box;
397 trans->base.stride = metadata->stride[level];
398 trans->base.layer_stride = metadata->layer_stride[level];
399 trans->offset = offset;
400 util_range_init(&trans->range);
401
402 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
403 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
404 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
405 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
406 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
407 trans->l_stride = 0;
408 else
409 trans->l_stride = trans->base.layer_stride;
410
411 return trans;
412 }
413
414 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
415 struct virgl_transfer *trans)
416 {
417 util_range_destroy(&trans->range);
418 slab_free(pool, trans);
419 }
420
421 void virgl_resource_destroy(struct pipe_screen *screen,
422 struct pipe_resource *resource)
423 {
424 struct virgl_screen *vs = virgl_screen(screen);
425 struct virgl_resource *res = virgl_resource(resource);
426
427 if (res->u.b.target == PIPE_BUFFER)
428 util_range_destroy(&res->valid_buffer_range);
429
430 vs->vws->resource_unref(vs->vws, res->hw_res);
431 FREE(res);
432 }
433
434 boolean virgl_resource_get_handle(struct pipe_screen *screen,
435 struct pipe_resource *resource,
436 struct winsys_handle *whandle)
437 {
438 struct virgl_screen *vs = virgl_screen(screen);
439 struct virgl_resource *res = virgl_resource(resource);
440
441 if (res->u.b.target == PIPE_BUFFER)
442 return FALSE;
443
444 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
445 res->metadata.stride[0],
446 whandle);
447 }
448
449 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
450 {
451 if (res) {
452 if (res->u.b.target == PIPE_BUFFER)
453 res->clean_mask &= ~1;
454 else
455 res->clean_mask &= ~(1 << level);
456 }
457 }