virgl: Support VIRGL_BIND_SHARED
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "util/u_upload_mgr.h"
27 #include "virgl_context.h"
28 #include "virgl_resource.h"
29 #include "virgl_screen.h"
30
31 /* A (soft) limit for the amount of memory we want to allow for queued staging
32 * resources. This is used to decide when we should force a flush, in order to
33 * avoid exhausting virtio-gpu memory.
34 */
35 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
36
37 /* We need to flush to properly sync the transfer with the current cmdbuf.
38 * But there are cases where the flushing can be skipped:
39 *
40 * - synchronization is disabled
41 * - the resource is not referenced by the current cmdbuf
42 */
43 static bool virgl_res_needs_flush(struct virgl_context *vctx,
44 struct virgl_transfer *trans)
45 {
46 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
47 struct virgl_resource *res = virgl_resource(trans->base.resource);
48
49 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
50 return false;
51
52 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
53 return false;
54
55 return true;
56 }
57
58 /* We need to read back from the host storage to make sure the guest storage
59 * is up-to-date. But there are cases where the readback can be skipped:
60 *
61 * - the content can be discarded
62 * - the host storage is read-only
63 *
64 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
65 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
66 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
67 */
68 static bool virgl_res_needs_readback(struct virgl_context *vctx,
69 struct virgl_resource *res,
70 unsigned usage, unsigned level)
71 {
72 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
73 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
74 return false;
75
76 if (res->clean_mask & (1 << level))
77 return false;
78
79 return true;
80 }
81
82 enum virgl_transfer_map_type
83 virgl_resource_transfer_prepare(struct virgl_context *vctx,
84 struct virgl_transfer *xfer)
85 {
86 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
87 struct virgl_winsys *vws = vs->vws;
88 struct virgl_resource *res = virgl_resource(xfer->base.resource);
89 enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
90 bool flush;
91 bool readback;
92 bool wait;
93
94 /* there is no way to map the host storage currently */
95 if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
96 return VIRGL_TRANSFER_MAP_ERROR;
97
98 /* We break the logic down into four steps
99 *
100 * step 1: determine the required operations independently
101 * step 2: look for chances to skip the operations
102 * step 3: resolve dependencies between the operations
103 * step 4: execute the operations
104 */
105
106 flush = virgl_res_needs_flush(vctx, xfer);
107 readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
108 xfer->base.level);
109 /* We need to wait for all cmdbufs, current or previous, that access the
110 * resource to finish unless synchronization is disabled.
111 */
112 wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED);
113
114 /* When the transfer range consists of only uninitialized data, we can
115 * assume the GPU is not accessing the range and readback is unnecessary.
116 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
117 * PIPE_TRANSFER_DISCARD_RANGE are set.
118 */
119 if (res->u.b.target == PIPE_BUFFER &&
120 !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
121 xfer->base.box.x + xfer->base.box.width)) {
122 flush = false;
123 readback = false;
124 wait = false;
125 }
126
127 /* When the resource is busy but its content can be discarded, we can
128 * replace its HW resource or use a staging buffer to avoid waiting.
129 */
130 if (wait && (xfer->base.usage & (PIPE_TRANSFER_DISCARD_RANGE |
131 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))) {
132 bool can_realloc = false;
133 bool can_staging = false;
134
135 /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by
136 * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions.
137 * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer,
138 * otherwise those following unsynchronized transfers may overwrite
139 * valid data.
140 */
141 if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
142 can_realloc = virgl_can_rebind_resource(vctx, &res->u.b);
143 } else {
144 can_staging = vctx->transfer_uploader &&
145 !vctx->transfer_uploader_in_use;
146 }
147
148 /* discard implies no readback */
149 assert(!readback);
150
151 if (can_realloc || can_staging) {
152 /* Both map types have some costs. Do them only when the resource is
153 * (or will be) busy for real. Otherwise, set wait to false.
154 */
155 wait = (flush || vws->resource_is_busy(vws, res->hw_res));
156 if (wait) {
157 map_type = (can_realloc) ?
158 VIRGL_TRANSFER_MAP_REALLOC :
159 VIRGL_TRANSFER_MAP_STAGING;
160 wait = false;
161
162 /* There is normally no need to flush either, unless the amount of
163 * memory we are using for staging resources starts growing, in
164 * which case we want to flush to keep our memory consumption in
165 * check.
166 */
167 flush = (vctx->queued_staging_res_size >
168 VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
169 }
170 }
171 }
172
173 /* readback has some implications */
174 if (readback) {
175 /* Readback is yet another command and is transparent to the state
176 * trackers. It should be waited for in all cases, including when
177 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
178 */
179 wait = true;
180
181 /* When the transfer queue has pending writes to this transfer's region,
182 * we have to flush before readback.
183 */
184 if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
185 flush = true;
186 }
187
188 if (flush)
189 vctx->base.flush(&vctx->base, NULL, 0);
190
191 /* If we are not allowed to block, and we know that we will have to wait,
192 * either because the resource is busy, or because it will become busy due
193 * to a readback, return early to avoid performing an incomplete
194 * transfer_get. Such an incomplete transfer_get may finish at any time,
195 * during which another unsynchronized map could write to the resource
196 * contents, leaving the contents in an undefined state.
197 */
198 if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
199 (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
200 return VIRGL_TRANSFER_MAP_ERROR;
201
202 if (readback) {
203 vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
204 xfer->l_stride, xfer->offset, xfer->base.level);
205 }
206
207 if (wait)
208 vws->resource_wait(vws, res->hw_res);
209
210 return map_type;
211 }
212
213 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
214 const struct pipe_resource *templ)
215 {
216 unsigned vbind;
217 struct virgl_screen *vs = virgl_screen(screen);
218 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
219
220 res->u.b = *templ;
221 res->u.b.screen = &vs->base;
222 pipe_reference_init(&res->u.b.reference, 1);
223 vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
224 virgl_resource_layout(&res->u.b, &res->metadata);
225 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
226 templ->format, vbind,
227 templ->width0,
228 templ->height0,
229 templ->depth0,
230 templ->array_size,
231 templ->last_level,
232 templ->nr_samples,
233 res->metadata.total_size);
234 if (!res->hw_res) {
235 FREE(res);
236 return NULL;
237 }
238
239 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
240
241 if (templ->target == PIPE_BUFFER) {
242 util_range_init(&res->valid_buffer_range);
243 virgl_buffer_init(res);
244 } else {
245 virgl_texture_init(res);
246 }
247
248 return &res->u.b;
249
250 }
251
252 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
253 const struct pipe_resource *templ,
254 struct winsys_handle *whandle,
255 unsigned usage)
256 {
257 struct virgl_screen *vs = virgl_screen(screen);
258 if (templ->target == PIPE_BUFFER)
259 return NULL;
260
261 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
262 res->u.b = *templ;
263 res->u.b.screen = &vs->base;
264 pipe_reference_init(&res->u.b.reference, 1);
265
266 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
267 if (!res->hw_res) {
268 FREE(res);
269 return NULL;
270 }
271
272 virgl_texture_init(res);
273
274 return &res->u.b;
275 }
276
277 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
278 {
279 screen->resource_create = virgl_resource_create;
280 screen->resource_from_handle = virgl_resource_from_handle;
281 screen->resource_get_handle = u_resource_get_handle_vtbl;
282 screen->resource_destroy = u_resource_destroy_vtbl;
283 }
284
285 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
286 struct pipe_resource *resource,
287 unsigned usage,
288 const struct pipe_box *box,
289 const void *data)
290 {
291 struct virgl_context *vctx = virgl_context(ctx);
292 struct virgl_resource *vbuf = virgl_resource(resource);
293 struct virgl_transfer dummy_trans = { 0 };
294 bool flush;
295 struct virgl_transfer *queued;
296
297 /*
298 * Attempts to short circuit the entire process of mapping and unmapping
299 * a resource if there is an existing transfer that can be extended.
300 * Pessimestically falls back if a flush is required.
301 */
302 dummy_trans.base.resource = resource;
303 dummy_trans.base.usage = usage;
304 dummy_trans.base.box = *box;
305 dummy_trans.base.stride = vbuf->metadata.stride[0];
306 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
307 dummy_trans.offset = box->x;
308
309 flush = virgl_res_needs_flush(vctx, &dummy_trans);
310 if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
311 box->x, box->x + box->width))
312 return false;
313
314 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
315 if (!queued || !queued->hw_res_map)
316 return false;
317
318 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
319 util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
320
321 return true;
322 }
323
324 static void virgl_buffer_subdata(struct pipe_context *pipe,
325 struct pipe_resource *resource,
326 unsigned usage, unsigned offset,
327 unsigned size, const void *data)
328 {
329 struct pipe_transfer *transfer;
330 uint8_t *map;
331 struct pipe_box box;
332
333 assert(!(usage & PIPE_TRANSFER_READ));
334
335 /* the write flag is implicit by the nature of buffer_subdata */
336 usage |= PIPE_TRANSFER_WRITE;
337
338 if (offset == 0 && size == resource->width0)
339 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
340 else
341 usage |= PIPE_TRANSFER_DISCARD_RANGE;
342
343 u_box_1d(offset, size, &box);
344
345 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
346 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
347 return;
348
349 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
350 if (map) {
351 memcpy(map, data, size);
352 pipe_transfer_unmap(pipe, transfer);
353 }
354 }
355
356 void virgl_init_context_resource_functions(struct pipe_context *ctx)
357 {
358 ctx->transfer_map = u_transfer_map_vtbl;
359 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
360 ctx->transfer_unmap = u_transfer_unmap_vtbl;
361 ctx->buffer_subdata = virgl_buffer_subdata;
362 ctx->texture_subdata = u_default_texture_subdata;
363 }
364
365 void virgl_resource_layout(struct pipe_resource *pt,
366 struct virgl_resource_metadata *metadata)
367 {
368 unsigned level, nblocksy;
369 unsigned width = pt->width0;
370 unsigned height = pt->height0;
371 unsigned depth = pt->depth0;
372 unsigned buffer_size = 0;
373
374 for (level = 0; level <= pt->last_level; level++) {
375 unsigned slices;
376
377 if (pt->target == PIPE_TEXTURE_CUBE)
378 slices = 6;
379 else if (pt->target == PIPE_TEXTURE_3D)
380 slices = depth;
381 else
382 slices = pt->array_size;
383
384 nblocksy = util_format_get_nblocksy(pt->format, height);
385 metadata->stride[level] = util_format_get_stride(pt->format, width);
386 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
387 metadata->level_offset[level] = buffer_size;
388
389 buffer_size += slices * metadata->layer_stride[level];
390
391 width = u_minify(width, 1);
392 height = u_minify(height, 1);
393 depth = u_minify(depth, 1);
394 }
395
396 if (pt->nr_samples <= 1)
397 metadata->total_size = buffer_size;
398 else /* don't create guest backing store for MSAA */
399 metadata->total_size = 0;
400 }
401
402 struct virgl_transfer *
403 virgl_resource_create_transfer(struct virgl_context *vctx,
404 struct pipe_resource *pres,
405 const struct virgl_resource_metadata *metadata,
406 unsigned level, unsigned usage,
407 const struct pipe_box *box)
408 {
409 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
410 struct virgl_transfer *trans;
411 enum pipe_format format = pres->format;
412 const unsigned blocksy = box->y / util_format_get_blockheight(format);
413 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
414
415 unsigned offset = metadata->level_offset[level];
416 if (pres->target == PIPE_TEXTURE_CUBE ||
417 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
418 pres->target == PIPE_TEXTURE_3D ||
419 pres->target == PIPE_TEXTURE_2D_ARRAY) {
420 offset += box->z * metadata->layer_stride[level];
421 }
422 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
423 offset += box->z * metadata->stride[level];
424 assert(box->y == 0);
425 } else if (pres->target == PIPE_BUFFER) {
426 assert(box->y == 0 && box->z == 0);
427 } else {
428 assert(box->z == 0);
429 }
430
431 offset += blocksy * metadata->stride[level];
432 offset += blocksx * util_format_get_blocksize(format);
433
434 trans = slab_alloc(&vctx->transfer_pool);
435 if (!trans)
436 return NULL;
437
438 /* note that trans is not zero-initialized */
439 trans->base.resource = NULL;
440 pipe_resource_reference(&trans->base.resource, pres);
441 trans->hw_res = NULL;
442 vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);
443
444 trans->base.level = level;
445 trans->base.usage = usage;
446 trans->base.box = *box;
447 trans->base.stride = metadata->stride[level];
448 trans->base.layer_stride = metadata->layer_stride[level];
449 trans->offset = offset;
450 util_range_init(&trans->range);
451 trans->copy_src_res = NULL;
452 trans->copy_src_offset = 0;
453
454 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
455 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
456 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
457 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
458 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
459 trans->l_stride = 0;
460 else
461 trans->l_stride = trans->base.layer_stride;
462
463 return trans;
464 }
465
466 void virgl_resource_destroy_transfer(struct virgl_context *vctx,
467 struct virgl_transfer *trans)
468 {
469 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
470
471 pipe_resource_reference(&trans->copy_src_res, NULL);
472
473 util_range_destroy(&trans->range);
474 vws->resource_reference(vws, &trans->hw_res, NULL);
475 pipe_resource_reference(&trans->base.resource, NULL);
476 slab_free(&vctx->transfer_pool, trans);
477 }
478
479 void virgl_resource_destroy(struct pipe_screen *screen,
480 struct pipe_resource *resource)
481 {
482 struct virgl_screen *vs = virgl_screen(screen);
483 struct virgl_resource *res = virgl_resource(resource);
484
485 if (res->u.b.target == PIPE_BUFFER)
486 util_range_destroy(&res->valid_buffer_range);
487
488 vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
489 FREE(res);
490 }
491
492 boolean virgl_resource_get_handle(struct pipe_screen *screen,
493 struct pipe_resource *resource,
494 struct winsys_handle *whandle)
495 {
496 struct virgl_screen *vs = virgl_screen(screen);
497 struct virgl_resource *res = virgl_resource(resource);
498
499 if (res->u.b.target == PIPE_BUFFER)
500 return FALSE;
501
502 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
503 res->metadata.stride[0],
504 whandle);
505 }
506
507 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
508 {
509 if (res) {
510 if (res->u.b.target == PIPE_BUFFER)
511 res->clean_mask &= ~1;
512 else
513 res->clean_mask &= ~(1 << level);
514 }
515 }
516
517 /* Calculate the minimum size of the memory required to service a resource
518 * transfer map. Also return the stride and layer_stride for the corresponding
519 * layout.
520 */
521 static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
522 unsigned *out_stride,
523 unsigned *out_layer_stride)
524 {
525 struct pipe_resource *pres = vtransfer->base.resource;
526 struct pipe_box *box = &vtransfer->base.box;
527 unsigned stride;
528 unsigned layer_stride;
529 unsigned size;
530
531 assert(out_stride);
532 assert(out_layer_stride);
533
534 stride = util_format_get_stride(pres->format, box->width);
535 layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
536
537 if (pres->target == PIPE_TEXTURE_CUBE ||
538 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
539 pres->target == PIPE_TEXTURE_3D ||
540 pres->target == PIPE_TEXTURE_2D_ARRAY) {
541 size = box->depth * layer_stride;
542 } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
543 size = box->depth * stride;
544 } else {
545 size = layer_stride;
546 }
547
548 *out_stride = stride;
549 *out_layer_stride = layer_stride;
550
551 return size;
552 }
553
554 /* Maps a region from the transfer uploader to service the transfer. */
555 void *virgl_transfer_uploader_map(struct virgl_context *vctx,
556 struct virgl_transfer *vtransfer)
557 {
558 struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
559 unsigned size;
560 unsigned align_offset;
561 unsigned stride;
562 unsigned layer_stride;
563 void *map_addr;
564
565 assert(vctx->transfer_uploader);
566 assert(!vctx->transfer_uploader_in_use);
567
568 size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
569
570 /* For buffers we need to ensure that the start of the buffer would be
571 * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
572 * actually include it. To achieve this we may need to allocate a slightly
573 * larger range from the upload buffer, and later update the uploader
574 * resource offset and map address to point to the requested x coordinate
575 * within that range.
576 *
577 * 0 A 2A 3A
578 * |-------|---bbbb|bbbbb--|
579 * |--------| ==> size
580 * |---| ==> align_offset
581 * |------------| ==> allocation of size + align_offset
582 */
583 align_offset = vres->u.b.target == PIPE_BUFFER ?
584 vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
585 0;
586
587 u_upload_alloc(vctx->transfer_uploader, 0, size + align_offset,
588 VIRGL_MAP_BUFFER_ALIGNMENT,
589 &vtransfer->copy_src_offset,
590 &vtransfer->copy_src_res, &map_addr);
591 if (map_addr) {
592 /* Update source offset and address to point to the requested x coordinate
593 * if we have an align_offset (see above for more information). */
594 vtransfer->copy_src_offset += align_offset;
595 map_addr += align_offset;
596
597 /* Mark as dirty, since we are updating the host side resource
598 * without going through the corresponding guest side resource, and
599 * hence the two will diverge.
600 */
601 virgl_resource_dirty(vres, vtransfer->base.level);
602
603 /* The pointer returned by u_upload_alloc already has +offset
604 * applied. */
605 vctx->transfer_uploader_in_use = true;
606
607 /* We are using the minimum required size to hold the contents,
608 * possibly using a layout different from the layout of the resource,
609 * so update the transfer strides accordingly.
610 */
611 vtransfer->base.stride = stride;
612 vtransfer->base.layer_stride = layer_stride;
613
614 /* Track the total size of active staging resources. */
615 vctx->queued_staging_res_size += size + align_offset;
616 }
617
618 return map_addr;
619 }
620
621 bool
622 virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
623 {
624 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
625 const struct pipe_resource *templ = &res->u.b;
626 unsigned vbind;
627 struct virgl_hw_res *hw_res;
628
629 vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
630 hw_res = vs->vws->resource_create(vs->vws,
631 templ->target,
632 templ->format,
633 vbind,
634 templ->width0,
635 templ->height0,
636 templ->depth0,
637 templ->array_size,
638 templ->last_level,
639 templ->nr_samples,
640 res->metadata.total_size);
641 if (!hw_res)
642 return false;
643
644 vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
645 res->hw_res = hw_res;
646
647 util_range_set_empty(&res->valid_buffer_range);
648
649 /* count toward the staging resource size limit */
650 vctx->queued_staging_res_size += res->metadata.total_size;
651
652 virgl_rebind_resource(vctx, &res->u.b);
653
654 return true;
655 }