2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "util/format/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "util/u_upload_mgr.h"
27 #include "virgl_context.h"
28 #include "virgl_resource.h"
29 #include "virgl_screen.h"
30 #include "virgl_staging_mgr.h"
32 /* A (soft) limit for the amount of memory we want to allow for queued staging
33 * resources. This is used to decide when we should force a flush, in order to
34 * avoid exhausting virtio-gpu memory.
36 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
38 enum virgl_transfer_map_type
{
39 VIRGL_TRANSFER_MAP_ERROR
= -1,
40 VIRGL_TRANSFER_MAP_HW_RES
,
42 /* Map a range of a staging buffer. The updated contents should be transferred
43 * with a copy transfer.
45 VIRGL_TRANSFER_MAP_STAGING
,
47 /* Reallocate the underlying virgl_hw_res. */
48 VIRGL_TRANSFER_MAP_REALLOC
,
51 /* We need to flush to properly sync the transfer with the current cmdbuf.
52 * But there are cases where the flushing can be skipped:
54 * - synchronization is disabled
55 * - the resource is not referenced by the current cmdbuf
57 static bool virgl_res_needs_flush(struct virgl_context
*vctx
,
58 struct virgl_transfer
*trans
)
60 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
61 struct virgl_resource
*res
= virgl_resource(trans
->base
.resource
);
63 if (trans
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
66 if (!vws
->res_is_referenced(vws
, vctx
->cbuf
, res
->hw_res
))
72 /* We need to read back from the host storage to make sure the guest storage
73 * is up-to-date. But there are cases where the readback can be skipped:
75 * - the content can be discarded
76 * - the host storage is read-only
78 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
79 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
80 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
82 static bool virgl_res_needs_readback(struct virgl_context
*vctx
,
83 struct virgl_resource
*res
,
84 unsigned usage
, unsigned level
)
86 if (usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
87 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
))
90 if (res
->clean_mask
& (1 << level
))
96 static enum virgl_transfer_map_type
97 virgl_resource_transfer_prepare(struct virgl_context
*vctx
,
98 struct virgl_transfer
*xfer
)
100 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
101 struct virgl_winsys
*vws
= vs
->vws
;
102 struct virgl_resource
*res
= virgl_resource(xfer
->base
.resource
);
103 enum virgl_transfer_map_type map_type
= VIRGL_TRANSFER_MAP_HW_RES
;
108 /* there is no way to map the host storage currently */
109 if (xfer
->base
.usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
110 return VIRGL_TRANSFER_MAP_ERROR
;
112 /* We break the logic down into four steps
114 * step 1: determine the required operations independently
115 * step 2: look for chances to skip the operations
116 * step 3: resolve dependencies between the operations
117 * step 4: execute the operations
120 flush
= virgl_res_needs_flush(vctx
, xfer
);
121 readback
= virgl_res_needs_readback(vctx
, res
, xfer
->base
.usage
,
123 /* We need to wait for all cmdbufs, current or previous, that access the
124 * resource to finish unless synchronization is disabled.
126 wait
= !(xfer
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
);
128 /* When the transfer range consists of only uninitialized data, we can
129 * assume the GPU is not accessing the range and readback is unnecessary.
130 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
131 * PIPE_TRANSFER_DISCARD_RANGE are set.
133 if (res
->u
.b
.target
== PIPE_BUFFER
&&
134 !util_ranges_intersect(&res
->valid_buffer_range
, xfer
->base
.box
.x
,
135 xfer
->base
.box
.x
+ xfer
->base
.box
.width
) &&
136 likely(!(virgl_debug
& VIRGL_DEBUG_XFER
))) {
142 /* When the resource is busy but its content can be discarded, we can
143 * replace its HW resource or use a staging buffer to avoid waiting.
146 (xfer
->base
.usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
147 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
)) &&
148 likely(!(virgl_debug
& VIRGL_DEBUG_XFER
))) {
149 bool can_realloc
= false;
150 bool can_staging
= false;
152 /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by
153 * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions.
154 * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer,
155 * otherwise those following unsynchronized transfers may overwrite
158 if (xfer
->base
.usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
159 can_realloc
= virgl_can_rebind_resource(vctx
, &res
->u
.b
);
161 can_staging
= vctx
->supports_staging
;
164 /* discard implies no readback */
167 if (can_realloc
|| can_staging
) {
168 /* Both map types have some costs. Do them only when the resource is
169 * (or will be) busy for real. Otherwise, set wait to false.
171 wait
= (flush
|| vws
->resource_is_busy(vws
, res
->hw_res
));
173 map_type
= (can_realloc
) ?
174 VIRGL_TRANSFER_MAP_REALLOC
:
175 VIRGL_TRANSFER_MAP_STAGING
;
178 /* There is normally no need to flush either, unless the amount of
179 * memory we are using for staging resources starts growing, in
180 * which case we want to flush to keep our memory consumption in
183 flush
= (vctx
->queued_staging_res_size
>
184 VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT
);
189 /* readback has some implications */
191 /* Readback is yet another command and is transparent to the state
192 * trackers. It should be waited for in all cases, including when
193 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
197 /* When the transfer queue has pending writes to this transfer's region,
198 * we have to flush before readback.
200 if (!flush
&& virgl_transfer_queue_is_queued(&vctx
->queue
, xfer
))
205 vctx
->base
.flush(&vctx
->base
, NULL
, 0);
207 /* If we are not allowed to block, and we know that we will have to wait,
208 * either because the resource is busy, or because it will become busy due
209 * to a readback, return early to avoid performing an incomplete
210 * transfer_get. Such an incomplete transfer_get may finish at any time,
211 * during which another unsynchronized map could write to the resource
212 * contents, leaving the contents in an undefined state.
214 if ((xfer
->base
.usage
& PIPE_TRANSFER_DONTBLOCK
) &&
215 (readback
|| (wait
&& vws
->resource_is_busy(vws
, res
->hw_res
))))
216 return VIRGL_TRANSFER_MAP_ERROR
;
219 vws
->transfer_get(vws
, res
->hw_res
, &xfer
->base
.box
, xfer
->base
.stride
,
220 xfer
->l_stride
, xfer
->offset
, xfer
->base
.level
);
224 vws
->resource_wait(vws
, res
->hw_res
);
229 /* Calculate the minimum size of the memory required to service a resource
230 * transfer map. Also return the stride and layer_stride for the corresponding
234 virgl_transfer_map_size(struct virgl_transfer
*vtransfer
,
235 unsigned *out_stride
,
236 unsigned *out_layer_stride
)
238 struct pipe_resource
*pres
= vtransfer
->base
.resource
;
239 struct pipe_box
*box
= &vtransfer
->base
.box
;
241 unsigned layer_stride
;
245 assert(out_layer_stride
);
247 stride
= util_format_get_stride(pres
->format
, box
->width
);
248 layer_stride
= util_format_get_2d_size(pres
->format
, stride
, box
->height
);
250 if (pres
->target
== PIPE_TEXTURE_CUBE
||
251 pres
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
252 pres
->target
== PIPE_TEXTURE_3D
||
253 pres
->target
== PIPE_TEXTURE_2D_ARRAY
) {
254 size
= box
->depth
* layer_stride
;
255 } else if (pres
->target
== PIPE_TEXTURE_1D_ARRAY
) {
256 size
= box
->depth
* stride
;
261 *out_stride
= stride
;
262 *out_layer_stride
= layer_stride
;
267 /* Maps a region from staging to service the transfer. */
269 virgl_staging_map(struct virgl_context
*vctx
,
270 struct virgl_transfer
*vtransfer
)
272 struct virgl_resource
*vres
= virgl_resource(vtransfer
->base
.resource
);
274 unsigned align_offset
;
276 unsigned layer_stride
;
278 bool alloc_succeeded
;
280 assert(vctx
->supports_staging
);
282 size
= virgl_transfer_map_size(vtransfer
, &stride
, &layer_stride
);
284 /* For buffers we need to ensure that the start of the buffer would be
285 * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
286 * actually include it. To achieve this we may need to allocate a slightly
287 * larger range from the upload buffer, and later update the uploader
288 * resource offset and map address to point to the requested x coordinate
292 * |-------|---bbbb|bbbbb--|
293 * |--------| ==> size
294 * |---| ==> align_offset
295 * |------------| ==> allocation of size + align_offset
297 align_offset
= vres
->u
.b
.target
== PIPE_BUFFER
?
298 vtransfer
->base
.box
.x
% VIRGL_MAP_BUFFER_ALIGNMENT
:
302 virgl_staging_alloc(&vctx
->staging
, size
+ align_offset
,
303 VIRGL_MAP_BUFFER_ALIGNMENT
,
304 &vtransfer
->copy_src_offset
,
305 &vtransfer
->copy_src_hw_res
,
307 if (alloc_succeeded
) {
308 /* Update source offset and address to point to the requested x coordinate
309 * if we have an align_offset (see above for more information). */
310 vtransfer
->copy_src_offset
+= align_offset
;
311 map_addr
+= align_offset
;
313 /* Mark as dirty, since we are updating the host side resource
314 * without going through the corresponding guest side resource, and
315 * hence the two will diverge.
317 virgl_resource_dirty(vres
, vtransfer
->base
.level
);
319 /* We are using the minimum required size to hold the contents,
320 * possibly using a layout different from the layout of the resource,
321 * so update the transfer strides accordingly.
323 vtransfer
->base
.stride
= stride
;
324 vtransfer
->base
.layer_stride
= layer_stride
;
326 /* Track the total size of active staging resources. */
327 vctx
->queued_staging_res_size
+= size
+ align_offset
;
334 virgl_resource_realloc(struct virgl_context
*vctx
, struct virgl_resource
*res
)
336 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
337 const struct pipe_resource
*templ
= &res
->u
.b
;
339 struct virgl_hw_res
*hw_res
;
341 vbind
= pipe_to_virgl_bind(vs
, templ
->bind
, templ
->flags
);
342 hw_res
= vs
->vws
->resource_create(vs
->vws
,
352 res
->metadata
.total_size
);
356 vs
->vws
->resource_reference(vs
->vws
, &res
->hw_res
, NULL
);
357 res
->hw_res
= hw_res
;
359 /* We can safely clear the range here, since it will be repopulated in the
360 * following rebind operation, according to the active buffer binds.
362 util_range_set_empty(&res
->valid_buffer_range
);
364 /* count toward the staging resource size limit */
365 vctx
->queued_staging_res_size
+= res
->metadata
.total_size
;
367 virgl_rebind_resource(vctx
, &res
->u
.b
);
373 virgl_resource_transfer_map(struct pipe_context
*ctx
,
374 struct pipe_resource
*resource
,
377 const struct pipe_box
*box
,
378 struct pipe_transfer
**transfer
)
380 struct virgl_context
*vctx
= virgl_context(ctx
);
381 struct virgl_winsys
*vws
= virgl_screen(ctx
->screen
)->vws
;
382 struct virgl_resource
*vres
= virgl_resource(resource
);
383 struct virgl_transfer
*trans
;
384 enum virgl_transfer_map_type map_type
;
387 /* Multisampled resources require resolve before mapping. */
388 assert(resource
->nr_samples
<= 1);
390 trans
= virgl_resource_create_transfer(vctx
, resource
,
391 &vres
->metadata
, level
, usage
, box
);
393 map_type
= virgl_resource_transfer_prepare(vctx
, trans
);
395 case VIRGL_TRANSFER_MAP_REALLOC
:
396 if (!virgl_resource_realloc(vctx
, vres
)) {
400 vws
->resource_reference(vws
, &trans
->hw_res
, vres
->hw_res
);
402 case VIRGL_TRANSFER_MAP_HW_RES
:
403 trans
->hw_res_map
= vws
->resource_map(vws
, vres
->hw_res
);
404 if (trans
->hw_res_map
)
405 map_addr
= trans
->hw_res_map
+ trans
->offset
;
409 case VIRGL_TRANSFER_MAP_STAGING
:
410 map_addr
= virgl_staging_map(vctx
, trans
);
411 /* Copy transfers don't make use of hw_res_map at the moment. */
412 trans
->hw_res_map
= NULL
;
414 case VIRGL_TRANSFER_MAP_ERROR
:
416 trans
->hw_res_map
= NULL
;
422 virgl_resource_destroy_transfer(vctx
, trans
);
426 if (vres
->u
.b
.target
== PIPE_BUFFER
) {
427 /* For the checks below to be able to use 'usage', we assume that
428 * transfer preparation doesn't affect the usage.
430 assert(usage
== trans
->base
.usage
);
432 /* If we are doing a whole resource discard with a hw_res map, the buffer
433 * storage can now be considered unused and we don't care about previous
434 * contents. We can thus mark the storage as uninitialized, but only if
435 * the buffer is not host writable (in which case we can't clear the
436 * valid range, since that would result in missed readbacks in future
437 * transfers). We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for
438 * VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range
439 * when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not
440 * currently used for whole resource discards.
442 if (map_type
== VIRGL_TRANSFER_MAP_HW_RES
&&
443 (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) &&
444 (vres
->clean_mask
& 1)) {
445 util_range_set_empty(&vres
->valid_buffer_range
);
448 if (usage
& PIPE_TRANSFER_WRITE
)
449 util_range_add(&vres
->u
.b
, &vres
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
452 *transfer
= &trans
->base
;
456 static void virgl_resource_layout(struct pipe_resource
*pt
,
457 struct virgl_resource_metadata
*metadata
,
459 uint32_t winsys_stride
,
460 uint32_t plane_offset
,
463 unsigned level
, nblocksy
;
464 unsigned width
= pt
->width0
;
465 unsigned height
= pt
->height0
;
466 unsigned depth
= pt
->depth0
;
467 unsigned buffer_size
= 0;
469 for (level
= 0; level
<= pt
->last_level
; level
++) {
472 if (pt
->target
== PIPE_TEXTURE_CUBE
)
474 else if (pt
->target
== PIPE_TEXTURE_3D
)
477 slices
= pt
->array_size
;
479 nblocksy
= util_format_get_nblocksy(pt
->format
, height
);
480 metadata
->stride
[level
] = winsys_stride
? winsys_stride
:
481 util_format_get_stride(pt
->format
, width
);
482 metadata
->layer_stride
[level
] = nblocksy
* metadata
->stride
[level
];
483 metadata
->level_offset
[level
] = buffer_size
;
485 buffer_size
+= slices
* metadata
->layer_stride
[level
];
487 width
= u_minify(width
, 1);
488 height
= u_minify(height
, 1);
489 depth
= u_minify(depth
, 1);
492 metadata
->plane
= plane
;
493 metadata
->plane_offset
= plane_offset
;
494 metadata
->modifier
= modifier
;
495 if (pt
->nr_samples
<= 1)
496 metadata
->total_size
= buffer_size
;
497 else /* don't create guest backing store for MSAA */
498 metadata
->total_size
= 0;
501 static struct pipe_resource
*virgl_resource_create(struct pipe_screen
*screen
,
502 const struct pipe_resource
*templ
)
505 struct virgl_screen
*vs
= virgl_screen(screen
);
506 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
509 res
->u
.b
.screen
= &vs
->base
;
510 pipe_reference_init(&res
->u
.b
.reference
, 1);
511 vbind
= pipe_to_virgl_bind(vs
, templ
->bind
, templ
->flags
);
512 virgl_resource_layout(&res
->u
.b
, &res
->metadata
, 0, 0, 0, 0);
514 if ((vs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_APP_TWEAK_SUPPORT
) &&
515 vs
->tweak_gles_emulate_bgra
&&
516 (templ
->format
== PIPE_FORMAT_B8G8R8A8_SRGB
||
517 templ
->format
== PIPE_FORMAT_B8G8R8A8_UNORM
||
518 templ
->format
== PIPE_FORMAT_B8G8R8X8_SRGB
||
519 templ
->format
== PIPE_FORMAT_B8G8R8X8_UNORM
)) {
520 vbind
|= VIRGL_BIND_PREFER_EMULATED_BGRA
;
523 res
->hw_res
= vs
->vws
->resource_create(vs
->vws
, templ
->target
,
524 templ
->format
, vbind
,
531 res
->metadata
.total_size
);
537 res
->clean_mask
= (1 << VR_MAX_TEXTURE_2D_LEVELS
) - 1;
539 if (templ
->target
== PIPE_BUFFER
) {
540 util_range_init(&res
->valid_buffer_range
);
541 virgl_buffer_init(res
);
543 virgl_texture_init(res
);
550 static struct pipe_resource
*virgl_resource_from_handle(struct pipe_screen
*screen
,
551 const struct pipe_resource
*templ
,
552 struct winsys_handle
*whandle
,
555 uint32_t winsys_stride
, plane_offset
, plane
;
557 struct virgl_screen
*vs
= virgl_screen(screen
);
558 if (templ
->target
== PIPE_BUFFER
)
561 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
563 res
->u
.b
.screen
= &vs
->base
;
564 pipe_reference_init(&res
->u
.b
.reference
, 1);
566 plane
= winsys_stride
= plane_offset
= modifier
= 0;
567 res
->hw_res
= vs
->vws
->resource_create_from_handle(vs
->vws
, whandle
,
573 virgl_resource_layout(&res
->u
.b
, &res
->metadata
, plane
, winsys_stride
,
574 plane_offset
, modifier
);
580 virgl_texture_init(res
);
585 void virgl_init_screen_resource_functions(struct pipe_screen
*screen
)
587 screen
->resource_create
= virgl_resource_create
;
588 screen
->resource_from_handle
= virgl_resource_from_handle
;
589 screen
->resource_get_handle
= u_resource_get_handle_vtbl
;
590 screen
->resource_destroy
= u_resource_destroy_vtbl
;
593 static void virgl_buffer_subdata(struct pipe_context
*pipe
,
594 struct pipe_resource
*resource
,
595 unsigned usage
, unsigned offset
,
596 unsigned size
, const void *data
)
598 struct virgl_context
*vctx
= virgl_context(pipe
);
599 struct virgl_resource
*vbuf
= virgl_resource(resource
);
601 /* We can try virgl_transfer_queue_extend_buffer when there is no
602 * flush/readback/wait required. Based on virgl_resource_transfer_prepare,
603 * the simplest way to make sure that is the case is to check the valid
606 if (!util_ranges_intersect(&vbuf
->valid_buffer_range
,
607 offset
, offset
+ size
) &&
608 likely(!(virgl_debug
& VIRGL_DEBUG_XFER
)) &&
609 virgl_transfer_queue_extend_buffer(&vctx
->queue
,
610 vbuf
->hw_res
, offset
, size
, data
)) {
611 util_range_add(&vbuf
->u
.b
, &vbuf
->valid_buffer_range
, offset
, offset
+ size
);
615 u_default_buffer_subdata(pipe
, resource
, usage
, offset
, size
, data
);
618 void virgl_init_context_resource_functions(struct pipe_context
*ctx
)
620 ctx
->transfer_map
= u_transfer_map_vtbl
;
621 ctx
->transfer_flush_region
= u_transfer_flush_region_vtbl
;
622 ctx
->transfer_unmap
= u_transfer_unmap_vtbl
;
623 ctx
->buffer_subdata
= virgl_buffer_subdata
;
624 ctx
->texture_subdata
= u_default_texture_subdata
;
628 struct virgl_transfer
*
629 virgl_resource_create_transfer(struct virgl_context
*vctx
,
630 struct pipe_resource
*pres
,
631 const struct virgl_resource_metadata
*metadata
,
632 unsigned level
, unsigned usage
,
633 const struct pipe_box
*box
)
635 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
636 struct virgl_transfer
*trans
;
637 enum pipe_format format
= pres
->format
;
638 const unsigned blocksy
= box
->y
/ util_format_get_blockheight(format
);
639 const unsigned blocksx
= box
->x
/ util_format_get_blockwidth(format
);
641 unsigned offset
= metadata
->plane_offset
+ metadata
->level_offset
[level
];
642 if (pres
->target
== PIPE_TEXTURE_CUBE
||
643 pres
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
644 pres
->target
== PIPE_TEXTURE_3D
||
645 pres
->target
== PIPE_TEXTURE_2D_ARRAY
) {
646 offset
+= box
->z
* metadata
->layer_stride
[level
];
648 else if (pres
->target
== PIPE_TEXTURE_1D_ARRAY
) {
649 offset
+= box
->z
* metadata
->stride
[level
];
651 } else if (pres
->target
== PIPE_BUFFER
) {
652 assert(box
->y
== 0 && box
->z
== 0);
657 offset
+= blocksy
* metadata
->stride
[level
];
658 offset
+= blocksx
* util_format_get_blocksize(format
);
660 trans
= slab_alloc(&vctx
->transfer_pool
);
664 /* note that trans is not zero-initialized */
665 trans
->base
.resource
= NULL
;
666 pipe_resource_reference(&trans
->base
.resource
, pres
);
667 trans
->hw_res
= NULL
;
668 vws
->resource_reference(vws
, &trans
->hw_res
, virgl_resource(pres
)->hw_res
);
670 trans
->base
.level
= level
;
671 trans
->base
.usage
= usage
;
672 trans
->base
.box
= *box
;
673 trans
->base
.stride
= metadata
->stride
[level
];
674 trans
->base
.layer_stride
= metadata
->layer_stride
[level
];
675 trans
->offset
= offset
;
676 util_range_init(&trans
->range
);
677 trans
->copy_src_hw_res
= NULL
;
678 trans
->copy_src_offset
= 0;
679 trans
->resolve_transfer
= NULL
;
681 if (trans
->base
.resource
->target
!= PIPE_TEXTURE_3D
&&
682 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE
&&
683 trans
->base
.resource
->target
!= PIPE_TEXTURE_1D_ARRAY
&&
684 trans
->base
.resource
->target
!= PIPE_TEXTURE_2D_ARRAY
&&
685 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE_ARRAY
)
688 trans
->l_stride
= trans
->base
.layer_stride
;
693 void virgl_resource_destroy_transfer(struct virgl_context
*vctx
,
694 struct virgl_transfer
*trans
)
696 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
698 vws
->resource_reference(vws
, &trans
->copy_src_hw_res
, NULL
);
700 util_range_destroy(&trans
->range
);
701 vws
->resource_reference(vws
, &trans
->hw_res
, NULL
);
702 pipe_resource_reference(&trans
->base
.resource
, NULL
);
703 slab_free(&vctx
->transfer_pool
, trans
);
706 void virgl_resource_destroy(struct pipe_screen
*screen
,
707 struct pipe_resource
*resource
)
709 struct virgl_screen
*vs
= virgl_screen(screen
);
710 struct virgl_resource
*res
= virgl_resource(resource
);
712 if (res
->u
.b
.target
== PIPE_BUFFER
)
713 util_range_destroy(&res
->valid_buffer_range
);
715 vs
->vws
->resource_reference(vs
->vws
, &res
->hw_res
, NULL
);
719 bool virgl_resource_get_handle(struct pipe_screen
*screen
,
720 struct pipe_resource
*resource
,
721 struct winsys_handle
*whandle
)
723 struct virgl_screen
*vs
= virgl_screen(screen
);
724 struct virgl_resource
*res
= virgl_resource(resource
);
726 if (res
->u
.b
.target
== PIPE_BUFFER
)
729 return vs
->vws
->resource_get_handle(vs
->vws
, res
->hw_res
,
730 res
->metadata
.stride
[0],
734 void virgl_resource_dirty(struct virgl_resource
*res
, uint32_t level
)
737 if (res
->u
.b
.target
== PIPE_BUFFER
)
738 res
->clean_mask
&= ~1;
740 res
->clean_mask
&= ~(1 << level
);