virgl: Store the virgl_hw_res for copy transfers
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "util/u_upload_mgr.h"
27 #include "virgl_context.h"
28 #include "virgl_resource.h"
29 #include "virgl_screen.h"
30
31 /* A (soft) limit for the amount of memory we want to allow for queued staging
32 * resources. This is used to decide when we should force a flush, in order to
33 * avoid exhausting virtio-gpu memory.
34 */
35 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
36
37 /* We need to flush to properly sync the transfer with the current cmdbuf.
38 * But there are cases where the flushing can be skipped:
39 *
40 * - synchronization is disabled
41 * - the resource is not referenced by the current cmdbuf
42 */
43 static bool virgl_res_needs_flush(struct virgl_context *vctx,
44 struct virgl_transfer *trans)
45 {
46 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
47 struct virgl_resource *res = virgl_resource(trans->base.resource);
48
49 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
50 return false;
51
52 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
53 return false;
54
55 return true;
56 }
57
58 /* We need to read back from the host storage to make sure the guest storage
59 * is up-to-date. But there are cases where the readback can be skipped:
60 *
61 * - the content can be discarded
62 * - the host storage is read-only
63 *
64 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
65 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
66 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
67 */
68 static bool virgl_res_needs_readback(struct virgl_context *vctx,
69 struct virgl_resource *res,
70 unsigned usage, unsigned level)
71 {
72 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
73 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
74 return false;
75
76 if (res->clean_mask & (1 << level))
77 return false;
78
79 return true;
80 }
81
82 enum virgl_transfer_map_type
83 virgl_resource_transfer_prepare(struct virgl_context *vctx,
84 struct virgl_transfer *xfer)
85 {
86 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
87 struct virgl_winsys *vws = vs->vws;
88 struct virgl_resource *res = virgl_resource(xfer->base.resource);
89 enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
90 bool flush;
91 bool readback;
92 bool wait;
93
94 /* there is no way to map the host storage currently */
95 if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
96 return VIRGL_TRANSFER_MAP_ERROR;
97
98 /* We break the logic down into four steps
99 *
100 * step 1: determine the required operations independently
101 * step 2: look for chances to skip the operations
102 * step 3: resolve dependencies between the operations
103 * step 4: execute the operations
104 */
105
106 flush = virgl_res_needs_flush(vctx, xfer);
107 readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
108 xfer->base.level);
109 /* We need to wait for all cmdbufs, current or previous, that access the
110 * resource to finish unless synchronization is disabled.
111 */
112 wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED);
113
114 /* When the transfer range consists of only uninitialized data, we can
115 * assume the GPU is not accessing the range and readback is unnecessary.
116 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
117 * PIPE_TRANSFER_DISCARD_RANGE are set.
118 */
119 if (res->u.b.target == PIPE_BUFFER &&
120 !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
121 xfer->base.box.x + xfer->base.box.width) &&
122 likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
123 flush = false;
124 readback = false;
125 wait = false;
126 }
127
128 /* When the resource is busy but its content can be discarded, we can
129 * replace its HW resource or use a staging buffer to avoid waiting.
130 */
131 if (wait &&
132 (xfer->base.usage & (PIPE_TRANSFER_DISCARD_RANGE |
133 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) &&
134 likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
135 bool can_realloc = false;
136 bool can_staging = false;
137
138 /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by
139 * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions.
140 * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer,
141 * otherwise those following unsynchronized transfers may overwrite
142 * valid data.
143 */
144 if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
145 can_realloc = virgl_can_rebind_resource(vctx, &res->u.b);
146 } else {
147 can_staging = vctx->transfer_uploader &&
148 !vctx->transfer_uploader_in_use;
149 }
150
151 /* discard implies no readback */
152 assert(!readback);
153
154 if (can_realloc || can_staging) {
155 /* Both map types have some costs. Do them only when the resource is
156 * (or will be) busy for real. Otherwise, set wait to false.
157 */
158 wait = (flush || vws->resource_is_busy(vws, res->hw_res));
159 if (wait) {
160 map_type = (can_realloc) ?
161 VIRGL_TRANSFER_MAP_REALLOC :
162 VIRGL_TRANSFER_MAP_STAGING;
163 wait = false;
164
165 /* There is normally no need to flush either, unless the amount of
166 * memory we are using for staging resources starts growing, in
167 * which case we want to flush to keep our memory consumption in
168 * check.
169 */
170 flush = (vctx->queued_staging_res_size >
171 VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
172 }
173 }
174 }
175
176 /* readback has some implications */
177 if (readback) {
178 /* Readback is yet another command and is transparent to the state
179 * trackers. It should be waited for in all cases, including when
180 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
181 */
182 wait = true;
183
184 /* When the transfer queue has pending writes to this transfer's region,
185 * we have to flush before readback.
186 */
187 if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
188 flush = true;
189 }
190
191 if (flush)
192 vctx->base.flush(&vctx->base, NULL, 0);
193
194 /* If we are not allowed to block, and we know that we will have to wait,
195 * either because the resource is busy, or because it will become busy due
196 * to a readback, return early to avoid performing an incomplete
197 * transfer_get. Such an incomplete transfer_get may finish at any time,
198 * during which another unsynchronized map could write to the resource
199 * contents, leaving the contents in an undefined state.
200 */
201 if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
202 (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
203 return VIRGL_TRANSFER_MAP_ERROR;
204
205 if (readback) {
206 vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
207 xfer->l_stride, xfer->offset, xfer->base.level);
208 }
209
210 if (wait)
211 vws->resource_wait(vws, res->hw_res);
212
213 return map_type;
214 }
215
216 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
217 const struct pipe_resource *templ)
218 {
219 unsigned vbind;
220 struct virgl_screen *vs = virgl_screen(screen);
221 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
222
223 res->u.b = *templ;
224 res->u.b.screen = &vs->base;
225 pipe_reference_init(&res->u.b.reference, 1);
226 vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
227 virgl_resource_layout(&res->u.b, &res->metadata);
228
229 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) &&
230 vs->tweak_gles_emulate_bgra &&
231 (templ->format == PIPE_FORMAT_B8G8R8A8_SRGB ||
232 templ->format == PIPE_FORMAT_B8G8R8A8_UNORM ||
233 templ->format == PIPE_FORMAT_B8G8R8X8_SRGB ||
234 templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) {
235 vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
236 }
237
238 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
239 templ->format, vbind,
240 templ->width0,
241 templ->height0,
242 templ->depth0,
243 templ->array_size,
244 templ->last_level,
245 templ->nr_samples,
246 res->metadata.total_size);
247 if (!res->hw_res) {
248 FREE(res);
249 return NULL;
250 }
251
252 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
253
254 if (templ->target == PIPE_BUFFER) {
255 util_range_init(&res->valid_buffer_range);
256 virgl_buffer_init(res);
257 } else {
258 virgl_texture_init(res);
259 }
260
261 return &res->u.b;
262
263 }
264
265 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
266 const struct pipe_resource *templ,
267 struct winsys_handle *whandle,
268 unsigned usage)
269 {
270 struct virgl_screen *vs = virgl_screen(screen);
271 if (templ->target == PIPE_BUFFER)
272 return NULL;
273
274 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
275 res->u.b = *templ;
276 res->u.b.screen = &vs->base;
277 pipe_reference_init(&res->u.b.reference, 1);
278
279 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
280 if (!res->hw_res) {
281 FREE(res);
282 return NULL;
283 }
284
285 virgl_texture_init(res);
286
287 return &res->u.b;
288 }
289
290 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
291 {
292 screen->resource_create = virgl_resource_create;
293 screen->resource_from_handle = virgl_resource_from_handle;
294 screen->resource_get_handle = u_resource_get_handle_vtbl;
295 screen->resource_destroy = u_resource_destroy_vtbl;
296 }
297
298 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
299 struct pipe_resource *resource,
300 unsigned usage,
301 const struct pipe_box *box,
302 const void *data)
303 {
304 struct virgl_context *vctx = virgl_context(ctx);
305 struct virgl_resource *vbuf = virgl_resource(resource);
306 struct virgl_transfer dummy_trans = { 0 };
307 bool flush;
308 struct virgl_transfer *queued;
309
310 /*
311 * Attempts to short circuit the entire process of mapping and unmapping
312 * a resource if there is an existing transfer that can be extended.
313 * Pessimestically falls back if a flush is required.
314 */
315 dummy_trans.base.resource = resource;
316 dummy_trans.base.usage = usage;
317 dummy_trans.base.box = *box;
318 dummy_trans.base.stride = vbuf->metadata.stride[0];
319 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
320 dummy_trans.offset = box->x;
321
322 flush = virgl_res_needs_flush(vctx, &dummy_trans);
323 if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
324 box->x, box->x + box->width))
325 return false;
326
327 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
328 if (!queued || !queued->hw_res_map)
329 return false;
330
331 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
332 util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
333
334 return true;
335 }
336
337 static void virgl_buffer_subdata(struct pipe_context *pipe,
338 struct pipe_resource *resource,
339 unsigned usage, unsigned offset,
340 unsigned size, const void *data)
341 {
342 struct pipe_transfer *transfer;
343 uint8_t *map;
344 struct pipe_box box;
345
346 assert(!(usage & PIPE_TRANSFER_READ));
347
348 /* the write flag is implicit by the nature of buffer_subdata */
349 usage |= PIPE_TRANSFER_WRITE;
350
351 if (offset == 0 && size == resource->width0)
352 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
353 else
354 usage |= PIPE_TRANSFER_DISCARD_RANGE;
355
356 u_box_1d(offset, size, &box);
357
358 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
359 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
360 return;
361
362 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
363 if (map) {
364 memcpy(map, data, size);
365 pipe_transfer_unmap(pipe, transfer);
366 }
367 }
368
369 void virgl_init_context_resource_functions(struct pipe_context *ctx)
370 {
371 ctx->transfer_map = u_transfer_map_vtbl;
372 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
373 ctx->transfer_unmap = u_transfer_unmap_vtbl;
374 ctx->buffer_subdata = virgl_buffer_subdata;
375 ctx->texture_subdata = u_default_texture_subdata;
376 }
377
378 void virgl_resource_layout(struct pipe_resource *pt,
379 struct virgl_resource_metadata *metadata)
380 {
381 unsigned level, nblocksy;
382 unsigned width = pt->width0;
383 unsigned height = pt->height0;
384 unsigned depth = pt->depth0;
385 unsigned buffer_size = 0;
386
387 for (level = 0; level <= pt->last_level; level++) {
388 unsigned slices;
389
390 if (pt->target == PIPE_TEXTURE_CUBE)
391 slices = 6;
392 else if (pt->target == PIPE_TEXTURE_3D)
393 slices = depth;
394 else
395 slices = pt->array_size;
396
397 nblocksy = util_format_get_nblocksy(pt->format, height);
398 metadata->stride[level] = util_format_get_stride(pt->format, width);
399 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
400 metadata->level_offset[level] = buffer_size;
401
402 buffer_size += slices * metadata->layer_stride[level];
403
404 width = u_minify(width, 1);
405 height = u_minify(height, 1);
406 depth = u_minify(depth, 1);
407 }
408
409 if (pt->nr_samples <= 1)
410 metadata->total_size = buffer_size;
411 else /* don't create guest backing store for MSAA */
412 metadata->total_size = 0;
413 }
414
415 struct virgl_transfer *
416 virgl_resource_create_transfer(struct virgl_context *vctx,
417 struct pipe_resource *pres,
418 const struct virgl_resource_metadata *metadata,
419 unsigned level, unsigned usage,
420 const struct pipe_box *box)
421 {
422 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
423 struct virgl_transfer *trans;
424 enum pipe_format format = pres->format;
425 const unsigned blocksy = box->y / util_format_get_blockheight(format);
426 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
427
428 unsigned offset = metadata->level_offset[level];
429 if (pres->target == PIPE_TEXTURE_CUBE ||
430 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
431 pres->target == PIPE_TEXTURE_3D ||
432 pres->target == PIPE_TEXTURE_2D_ARRAY) {
433 offset += box->z * metadata->layer_stride[level];
434 }
435 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
436 offset += box->z * metadata->stride[level];
437 assert(box->y == 0);
438 } else if (pres->target == PIPE_BUFFER) {
439 assert(box->y == 0 && box->z == 0);
440 } else {
441 assert(box->z == 0);
442 }
443
444 offset += blocksy * metadata->stride[level];
445 offset += blocksx * util_format_get_blocksize(format);
446
447 trans = slab_alloc(&vctx->transfer_pool);
448 if (!trans)
449 return NULL;
450
451 /* note that trans is not zero-initialized */
452 trans->base.resource = NULL;
453 pipe_resource_reference(&trans->base.resource, pres);
454 trans->hw_res = NULL;
455 vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);
456
457 trans->base.level = level;
458 trans->base.usage = usage;
459 trans->base.box = *box;
460 trans->base.stride = metadata->stride[level];
461 trans->base.layer_stride = metadata->layer_stride[level];
462 trans->offset = offset;
463 util_range_init(&trans->range);
464 trans->copy_src_hw_res = NULL;
465 trans->copy_src_offset = 0;
466
467 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
468 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
469 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
470 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
471 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
472 trans->l_stride = 0;
473 else
474 trans->l_stride = trans->base.layer_stride;
475
476 return trans;
477 }
478
479 void virgl_resource_destroy_transfer(struct virgl_context *vctx,
480 struct virgl_transfer *trans)
481 {
482 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
483
484 vws->resource_reference(vws, &trans->copy_src_hw_res, NULL);
485
486 util_range_destroy(&trans->range);
487 vws->resource_reference(vws, &trans->hw_res, NULL);
488 pipe_resource_reference(&trans->base.resource, NULL);
489 slab_free(&vctx->transfer_pool, trans);
490 }
491
492 void virgl_resource_destroy(struct pipe_screen *screen,
493 struct pipe_resource *resource)
494 {
495 struct virgl_screen *vs = virgl_screen(screen);
496 struct virgl_resource *res = virgl_resource(resource);
497
498 if (res->u.b.target == PIPE_BUFFER)
499 util_range_destroy(&res->valid_buffer_range);
500
501 vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
502 FREE(res);
503 }
504
505 boolean virgl_resource_get_handle(struct pipe_screen *screen,
506 struct pipe_resource *resource,
507 struct winsys_handle *whandle)
508 {
509 struct virgl_screen *vs = virgl_screen(screen);
510 struct virgl_resource *res = virgl_resource(resource);
511
512 if (res->u.b.target == PIPE_BUFFER)
513 return FALSE;
514
515 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
516 res->metadata.stride[0],
517 whandle);
518 }
519
520 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
521 {
522 if (res) {
523 if (res->u.b.target == PIPE_BUFFER)
524 res->clean_mask &= ~1;
525 else
526 res->clean_mask &= ~(1 << level);
527 }
528 }
529
530 /* Calculate the minimum size of the memory required to service a resource
531 * transfer map. Also return the stride and layer_stride for the corresponding
532 * layout.
533 */
534 static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
535 unsigned *out_stride,
536 unsigned *out_layer_stride)
537 {
538 struct pipe_resource *pres = vtransfer->base.resource;
539 struct pipe_box *box = &vtransfer->base.box;
540 unsigned stride;
541 unsigned layer_stride;
542 unsigned size;
543
544 assert(out_stride);
545 assert(out_layer_stride);
546
547 stride = util_format_get_stride(pres->format, box->width);
548 layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
549
550 if (pres->target == PIPE_TEXTURE_CUBE ||
551 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
552 pres->target == PIPE_TEXTURE_3D ||
553 pres->target == PIPE_TEXTURE_2D_ARRAY) {
554 size = box->depth * layer_stride;
555 } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
556 size = box->depth * stride;
557 } else {
558 size = layer_stride;
559 }
560
561 *out_stride = stride;
562 *out_layer_stride = layer_stride;
563
564 return size;
565 }
566
567 /* Maps a region from the transfer uploader to service the transfer. */
568 void *virgl_transfer_uploader_map(struct virgl_context *vctx,
569 struct virgl_transfer *vtransfer)
570 {
571 struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
572 struct pipe_resource *copy_src_res = NULL;
573 unsigned size;
574 unsigned align_offset;
575 unsigned stride;
576 unsigned layer_stride;
577 void *map_addr;
578
579 assert(vctx->transfer_uploader);
580 assert(!vctx->transfer_uploader_in_use);
581
582 size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
583
584 /* For buffers we need to ensure that the start of the buffer would be
585 * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
586 * actually include it. To achieve this we may need to allocate a slightly
587 * larger range from the upload buffer, and later update the uploader
588 * resource offset and map address to point to the requested x coordinate
589 * within that range.
590 *
591 * 0 A 2A 3A
592 * |-------|---bbbb|bbbbb--|
593 * |--------| ==> size
594 * |---| ==> align_offset
595 * |------------| ==> allocation of size + align_offset
596 */
597 align_offset = vres->u.b.target == PIPE_BUFFER ?
598 vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
599 0;
600
601 u_upload_alloc(vctx->transfer_uploader, 0, size + align_offset,
602 VIRGL_MAP_BUFFER_ALIGNMENT,
603 &vtransfer->copy_src_offset,
604 &copy_src_res, &map_addr);
605 if (map_addr) {
606 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
607
608 /* Extract and reference the hw_res backing the pipe_resource. */
609 vws->resource_reference(vws, &vtransfer->copy_src_hw_res,
610 virgl_resource(copy_src_res)->hw_res);
611 pipe_resource_reference(&copy_src_res, NULL);
612
613 /* Update source offset and address to point to the requested x coordinate
614 * if we have an align_offset (see above for more information). */
615 vtransfer->copy_src_offset += align_offset;
616 map_addr += align_offset;
617
618 /* Mark as dirty, since we are updating the host side resource
619 * without going through the corresponding guest side resource, and
620 * hence the two will diverge.
621 */
622 virgl_resource_dirty(vres, vtransfer->base.level);
623
624 /* The pointer returned by u_upload_alloc already has +offset
625 * applied. */
626 vctx->transfer_uploader_in_use = true;
627
628 /* We are using the minimum required size to hold the contents,
629 * possibly using a layout different from the layout of the resource,
630 * so update the transfer strides accordingly.
631 */
632 vtransfer->base.stride = stride;
633 vtransfer->base.layer_stride = layer_stride;
634
635 /* Track the total size of active staging resources. */
636 vctx->queued_staging_res_size += size + align_offset;
637 }
638
639 return map_addr;
640 }
641
642 bool
643 virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
644 {
645 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
646 const struct pipe_resource *templ = &res->u.b;
647 unsigned vbind;
648 struct virgl_hw_res *hw_res;
649
650 vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
651 hw_res = vs->vws->resource_create(vs->vws,
652 templ->target,
653 templ->format,
654 vbind,
655 templ->width0,
656 templ->height0,
657 templ->depth0,
658 templ->array_size,
659 templ->last_level,
660 templ->nr_samples,
661 res->metadata.total_size);
662 if (!hw_res)
663 return false;
664
665 vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
666 res->hw_res = hw_res;
667
668 util_range_set_empty(&res->valid_buffer_range);
669
670 /* count toward the staging resource size limit */
671 vctx->queued_staging_res_size += res->metadata.total_size;
672
673 virgl_rebind_resource(vctx, &res->u.b);
674
675 return true;
676 }