2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "util/u_upload_mgr.h"
27 #include "virgl_context.h"
28 #include "virgl_resource.h"
29 #include "virgl_screen.h"
30 #include "virgl_staging_mgr.h"
32 /* A (soft) limit for the amount of memory we want to allow for queued staging
33 * resources. This is used to decide when we should force a flush, in order to
34 * avoid exhausting virtio-gpu memory.
36 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
38 enum virgl_transfer_map_type
{
39 VIRGL_TRANSFER_MAP_ERROR
= -1,
40 VIRGL_TRANSFER_MAP_HW_RES
,
42 /* Map a range of a staging buffer. The updated contents should be transferred
43 * with a copy transfer.
45 VIRGL_TRANSFER_MAP_STAGING
,
47 /* Reallocate the underlying virgl_hw_res. */
48 VIRGL_TRANSFER_MAP_REALLOC
,
51 /* We need to flush to properly sync the transfer with the current cmdbuf.
52 * But there are cases where the flushing can be skipped:
54 * - synchronization is disabled
55 * - the resource is not referenced by the current cmdbuf
57 static bool virgl_res_needs_flush(struct virgl_context
*vctx
,
58 struct virgl_transfer
*trans
)
60 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
61 struct virgl_resource
*res
= virgl_resource(trans
->base
.resource
);
63 if (trans
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
66 if (!vws
->res_is_referenced(vws
, vctx
->cbuf
, res
->hw_res
))
72 /* We need to read back from the host storage to make sure the guest storage
73 * is up-to-date. But there are cases where the readback can be skipped:
75 * - the content can be discarded
76 * - the host storage is read-only
78 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
79 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
80 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
82 static bool virgl_res_needs_readback(struct virgl_context
*vctx
,
83 struct virgl_resource
*res
,
84 unsigned usage
, unsigned level
)
86 if (usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
87 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
))
90 if (res
->clean_mask
& (1 << level
))
96 static enum virgl_transfer_map_type
97 virgl_resource_transfer_prepare(struct virgl_context
*vctx
,
98 struct virgl_transfer
*xfer
)
100 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
101 struct virgl_winsys
*vws
= vs
->vws
;
102 struct virgl_resource
*res
= virgl_resource(xfer
->base
.resource
);
103 enum virgl_transfer_map_type map_type
= VIRGL_TRANSFER_MAP_HW_RES
;
108 /* there is no way to map the host storage currently */
109 if (xfer
->base
.usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
110 return VIRGL_TRANSFER_MAP_ERROR
;
112 /* We break the logic down into four steps
114 * step 1: determine the required operations independently
115 * step 2: look for chances to skip the operations
116 * step 3: resolve dependencies between the operations
117 * step 4: execute the operations
120 flush
= virgl_res_needs_flush(vctx
, xfer
);
121 readback
= virgl_res_needs_readback(vctx
, res
, xfer
->base
.usage
,
123 /* We need to wait for all cmdbufs, current or previous, that access the
124 * resource to finish unless synchronization is disabled.
126 wait
= !(xfer
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
);
128 /* When the transfer range consists of only uninitialized data, we can
129 * assume the GPU is not accessing the range and readback is unnecessary.
130 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
131 * PIPE_TRANSFER_DISCARD_RANGE are set.
133 if (res
->u
.b
.target
== PIPE_BUFFER
&&
134 !util_ranges_intersect(&res
->valid_buffer_range
, xfer
->base
.box
.x
,
135 xfer
->base
.box
.x
+ xfer
->base
.box
.width
) &&
136 likely(!(virgl_debug
& VIRGL_DEBUG_XFER
))) {
142 /* When the resource is busy but its content can be discarded, we can
143 * replace its HW resource or use a staging buffer to avoid waiting.
146 (xfer
->base
.usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
147 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
)) &&
148 likely(!(virgl_debug
& VIRGL_DEBUG_XFER
))) {
149 bool can_realloc
= false;
150 bool can_staging
= false;
152 /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by
153 * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions.
154 * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer,
155 * otherwise those following unsynchronized transfers may overwrite
158 if (xfer
->base
.usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
159 can_realloc
= virgl_can_rebind_resource(vctx
, &res
->u
.b
);
161 can_staging
= vctx
->supports_staging
;
164 /* discard implies no readback */
167 if (can_realloc
|| can_staging
) {
168 /* Both map types have some costs. Do them only when the resource is
169 * (or will be) busy for real. Otherwise, set wait to false.
171 wait
= (flush
|| vws
->resource_is_busy(vws
, res
->hw_res
));
173 map_type
= (can_realloc
) ?
174 VIRGL_TRANSFER_MAP_REALLOC
:
175 VIRGL_TRANSFER_MAP_STAGING
;
178 /* There is normally no need to flush either, unless the amount of
179 * memory we are using for staging resources starts growing, in
180 * which case we want to flush to keep our memory consumption in
183 flush
= (vctx
->queued_staging_res_size
>
184 VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT
);
189 /* readback has some implications */
191 /* Readback is yet another command and is transparent to the state
192 * trackers. It should be waited for in all cases, including when
193 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
197 /* When the transfer queue has pending writes to this transfer's region,
198 * we have to flush before readback.
200 if (!flush
&& virgl_transfer_queue_is_queued(&vctx
->queue
, xfer
))
205 vctx
->base
.flush(&vctx
->base
, NULL
, 0);
207 /* If we are not allowed to block, and we know that we will have to wait,
208 * either because the resource is busy, or because it will become busy due
209 * to a readback, return early to avoid performing an incomplete
210 * transfer_get. Such an incomplete transfer_get may finish at any time,
211 * during which another unsynchronized map could write to the resource
212 * contents, leaving the contents in an undefined state.
214 if ((xfer
->base
.usage
& PIPE_TRANSFER_DONTBLOCK
) &&
215 (readback
|| (wait
&& vws
->resource_is_busy(vws
, res
->hw_res
))))
216 return VIRGL_TRANSFER_MAP_ERROR
;
219 vws
->transfer_get(vws
, res
->hw_res
, &xfer
->base
.box
, xfer
->base
.stride
,
220 xfer
->l_stride
, xfer
->offset
, xfer
->base
.level
);
224 vws
->resource_wait(vws
, res
->hw_res
);
229 /* Calculate the minimum size of the memory required to service a resource
230 * transfer map. Also return the stride and layer_stride for the corresponding
234 virgl_transfer_map_size(struct virgl_transfer
*vtransfer
,
235 unsigned *out_stride
,
236 unsigned *out_layer_stride
)
238 struct pipe_resource
*pres
= vtransfer
->base
.resource
;
239 struct pipe_box
*box
= &vtransfer
->base
.box
;
241 unsigned layer_stride
;
245 assert(out_layer_stride
);
247 stride
= util_format_get_stride(pres
->format
, box
->width
);
248 layer_stride
= util_format_get_2d_size(pres
->format
, stride
, box
->height
);
250 if (pres
->target
== PIPE_TEXTURE_CUBE
||
251 pres
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
252 pres
->target
== PIPE_TEXTURE_3D
||
253 pres
->target
== PIPE_TEXTURE_2D_ARRAY
) {
254 size
= box
->depth
* layer_stride
;
255 } else if (pres
->target
== PIPE_TEXTURE_1D_ARRAY
) {
256 size
= box
->depth
* stride
;
261 *out_stride
= stride
;
262 *out_layer_stride
= layer_stride
;
267 /* Maps a region from staging to service the transfer. */
269 virgl_staging_map(struct virgl_context
*vctx
,
270 struct virgl_transfer
*vtransfer
)
272 struct virgl_resource
*vres
= virgl_resource(vtransfer
->base
.resource
);
274 unsigned align_offset
;
276 unsigned layer_stride
;
278 bool alloc_succeeded
;
280 assert(vctx
->supports_staging
);
282 size
= virgl_transfer_map_size(vtransfer
, &stride
, &layer_stride
);
284 /* For buffers we need to ensure that the start of the buffer would be
285 * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
286 * actually include it. To achieve this we may need to allocate a slightly
287 * larger range from the upload buffer, and later update the uploader
288 * resource offset and map address to point to the requested x coordinate
292 * |-------|---bbbb|bbbbb--|
293 * |--------| ==> size
294 * |---| ==> align_offset
295 * |------------| ==> allocation of size + align_offset
297 align_offset
= vres
->u
.b
.target
== PIPE_BUFFER
?
298 vtransfer
->base
.box
.x
% VIRGL_MAP_BUFFER_ALIGNMENT
:
302 virgl_staging_alloc(&vctx
->staging
, size
+ align_offset
,
303 VIRGL_MAP_BUFFER_ALIGNMENT
,
304 &vtransfer
->copy_src_offset
,
305 &vtransfer
->copy_src_hw_res
,
307 if (alloc_succeeded
) {
308 /* Update source offset and address to point to the requested x coordinate
309 * if we have an align_offset (see above for more information). */
310 vtransfer
->copy_src_offset
+= align_offset
;
311 map_addr
+= align_offset
;
313 /* Mark as dirty, since we are updating the host side resource
314 * without going through the corresponding guest side resource, and
315 * hence the two will diverge.
317 virgl_resource_dirty(vres
, vtransfer
->base
.level
);
319 /* We are using the minimum required size to hold the contents,
320 * possibly using a layout different from the layout of the resource,
321 * so update the transfer strides accordingly.
323 vtransfer
->base
.stride
= stride
;
324 vtransfer
->base
.layer_stride
= layer_stride
;
326 /* Track the total size of active staging resources. */
327 vctx
->queued_staging_res_size
+= size
+ align_offset
;
334 virgl_resource_realloc(struct virgl_context
*vctx
, struct virgl_resource
*res
)
336 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
337 const struct pipe_resource
*templ
= &res
->u
.b
;
339 struct virgl_hw_res
*hw_res
;
341 vbind
= pipe_to_virgl_bind(vs
, templ
->bind
, templ
->flags
);
342 hw_res
= vs
->vws
->resource_create(vs
->vws
,
352 res
->metadata
.total_size
);
356 vs
->vws
->resource_reference(vs
->vws
, &res
->hw_res
, NULL
);
357 res
->hw_res
= hw_res
;
359 /* We can safely clear the range here, since it will be repopulated in the
360 * following rebind operation, according to the active buffer binds.
362 util_range_set_empty(&res
->valid_buffer_range
);
364 /* count toward the staging resource size limit */
365 vctx
->queued_staging_res_size
+= res
->metadata
.total_size
;
367 virgl_rebind_resource(vctx
, &res
->u
.b
);
373 virgl_resource_transfer_map(struct pipe_context
*ctx
,
374 struct pipe_resource
*resource
,
377 const struct pipe_box
*box
,
378 struct pipe_transfer
**transfer
)
380 struct virgl_context
*vctx
= virgl_context(ctx
);
381 struct virgl_winsys
*vws
= virgl_screen(ctx
->screen
)->vws
;
382 struct virgl_resource
*vres
= virgl_resource(resource
);
383 struct virgl_transfer
*trans
;
384 enum virgl_transfer_map_type map_type
;
387 /* Multisampled resources require resolve before mapping. */
388 assert(resource
->nr_samples
<= 1);
390 trans
= virgl_resource_create_transfer(vctx
, resource
,
391 &vres
->metadata
, level
, usage
, box
);
393 map_type
= virgl_resource_transfer_prepare(vctx
, trans
);
395 case VIRGL_TRANSFER_MAP_REALLOC
:
396 if (!virgl_resource_realloc(vctx
, vres
)) {
400 vws
->resource_reference(vws
, &trans
->hw_res
, vres
->hw_res
);
402 case VIRGL_TRANSFER_MAP_HW_RES
:
403 trans
->hw_res_map
= vws
->resource_map(vws
, vres
->hw_res
);
404 if (trans
->hw_res_map
)
405 map_addr
= trans
->hw_res_map
+ trans
->offset
;
409 case VIRGL_TRANSFER_MAP_STAGING
:
410 map_addr
= virgl_staging_map(vctx
, trans
);
411 /* Copy transfers don't make use of hw_res_map at the moment. */
412 trans
->hw_res_map
= NULL
;
414 case VIRGL_TRANSFER_MAP_ERROR
:
416 trans
->hw_res_map
= NULL
;
422 virgl_resource_destroy_transfer(vctx
, trans
);
426 if (vres
->u
.b
.target
== PIPE_BUFFER
) {
427 /* For the checks below to be able to use 'usage', we assume that
428 * transfer preparation doesn't affect the usage.
430 assert(usage
== trans
->base
.usage
);
432 /* If we are doing a whole resource discard with a hw_res map, the buffer
433 * storage can now be considered unused and we don't care about previous
434 * contents. We can thus mark the storage as uninitialized, but only if
435 * the buffer is not host writable (in which case we can't clear the
436 * valid range, since that would result in missed readbacks in future
437 * transfers). We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for
438 * VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range
439 * when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not
440 * currently used for whole resource discards.
442 if (map_type
== VIRGL_TRANSFER_MAP_HW_RES
&&
443 (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) &&
444 (vres
->clean_mask
& 1)) {
445 util_range_set_empty(&vres
->valid_buffer_range
);
448 if (usage
& PIPE_TRANSFER_WRITE
)
449 util_range_add(&vres
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
452 *transfer
= &trans
->base
;
456 static struct pipe_resource
*virgl_resource_create(struct pipe_screen
*screen
,
457 const struct pipe_resource
*templ
)
460 struct virgl_screen
*vs
= virgl_screen(screen
);
461 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
464 res
->u
.b
.screen
= &vs
->base
;
465 pipe_reference_init(&res
->u
.b
.reference
, 1);
466 vbind
= pipe_to_virgl_bind(vs
, templ
->bind
, templ
->flags
);
467 virgl_resource_layout(&res
->u
.b
, &res
->metadata
);
469 if ((vs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_APP_TWEAK_SUPPORT
) &&
470 vs
->tweak_gles_emulate_bgra
&&
471 (templ
->format
== PIPE_FORMAT_B8G8R8A8_SRGB
||
472 templ
->format
== PIPE_FORMAT_B8G8R8A8_UNORM
||
473 templ
->format
== PIPE_FORMAT_B8G8R8X8_SRGB
||
474 templ
->format
== PIPE_FORMAT_B8G8R8X8_UNORM
)) {
475 vbind
|= VIRGL_BIND_PREFER_EMULATED_BGRA
;
478 res
->hw_res
= vs
->vws
->resource_create(vs
->vws
, templ
->target
,
479 templ
->format
, vbind
,
486 res
->metadata
.total_size
);
492 res
->clean_mask
= (1 << VR_MAX_TEXTURE_2D_LEVELS
) - 1;
494 if (templ
->target
== PIPE_BUFFER
) {
495 util_range_init(&res
->valid_buffer_range
);
496 virgl_buffer_init(res
);
498 virgl_texture_init(res
);
505 static struct pipe_resource
*virgl_resource_from_handle(struct pipe_screen
*screen
,
506 const struct pipe_resource
*templ
,
507 struct winsys_handle
*whandle
,
510 struct virgl_screen
*vs
= virgl_screen(screen
);
511 if (templ
->target
== PIPE_BUFFER
)
514 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
516 res
->u
.b
.screen
= &vs
->base
;
517 pipe_reference_init(&res
->u
.b
.reference
, 1);
519 res
->hw_res
= vs
->vws
->resource_create_from_handle(vs
->vws
, whandle
);
525 virgl_texture_init(res
);
530 void virgl_init_screen_resource_functions(struct pipe_screen
*screen
)
532 screen
->resource_create
= virgl_resource_create
;
533 screen
->resource_from_handle
= virgl_resource_from_handle
;
534 screen
->resource_get_handle
= u_resource_get_handle_vtbl
;
535 screen
->resource_destroy
= u_resource_destroy_vtbl
;
538 static bool virgl_buffer_transfer_extend(struct pipe_context
*ctx
,
539 struct pipe_resource
*resource
,
541 const struct pipe_box
*box
,
544 struct virgl_context
*vctx
= virgl_context(ctx
);
545 struct virgl_resource
*vbuf
= virgl_resource(resource
);
546 struct virgl_transfer dummy_trans
= { 0 };
548 struct virgl_transfer
*queued
;
551 * Attempts to short circuit the entire process of mapping and unmapping
552 * a resource if there is an existing transfer that can be extended.
553 * Pessimestically falls back if a flush is required.
555 dummy_trans
.base
.resource
= resource
;
556 dummy_trans
.base
.usage
= usage
;
557 dummy_trans
.base
.box
= *box
;
558 dummy_trans
.base
.stride
= vbuf
->metadata
.stride
[0];
559 dummy_trans
.base
.layer_stride
= vbuf
->metadata
.layer_stride
[0];
560 dummy_trans
.offset
= box
->x
;
562 flush
= virgl_res_needs_flush(vctx
, &dummy_trans
);
563 if (flush
&& util_ranges_intersect(&vbuf
->valid_buffer_range
,
564 box
->x
, box
->x
+ box
->width
))
567 queued
= virgl_transfer_queue_extend(&vctx
->queue
, &dummy_trans
);
568 if (!queued
|| !queued
->hw_res_map
)
571 memcpy(queued
->hw_res_map
+ dummy_trans
.offset
, data
, box
->width
);
572 util_range_add(&vbuf
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
577 static void virgl_buffer_subdata(struct pipe_context
*pipe
,
578 struct pipe_resource
*resource
,
579 unsigned usage
, unsigned offset
,
580 unsigned size
, const void *data
)
582 struct pipe_transfer
*transfer
;
586 assert(!(usage
& PIPE_TRANSFER_READ
));
588 /* the write flag is implicit by the nature of buffer_subdata */
589 usage
|= PIPE_TRANSFER_WRITE
;
591 if (offset
== 0 && size
== resource
->width0
)
592 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
594 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
596 u_box_1d(offset
, size
, &box
);
598 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
599 virgl_buffer_transfer_extend(pipe
, resource
, usage
, &box
, data
))
602 map
= pipe
->transfer_map(pipe
, resource
, 0, usage
, &box
, &transfer
);
604 memcpy(map
, data
, size
);
605 pipe_transfer_unmap(pipe
, transfer
);
609 void virgl_init_context_resource_functions(struct pipe_context
*ctx
)
611 ctx
->transfer_map
= u_transfer_map_vtbl
;
612 ctx
->transfer_flush_region
= u_transfer_flush_region_vtbl
;
613 ctx
->transfer_unmap
= u_transfer_unmap_vtbl
;
614 ctx
->buffer_subdata
= virgl_buffer_subdata
;
615 ctx
->texture_subdata
= u_default_texture_subdata
;
618 void virgl_resource_layout(struct pipe_resource
*pt
,
619 struct virgl_resource_metadata
*metadata
)
621 unsigned level
, nblocksy
;
622 unsigned width
= pt
->width0
;
623 unsigned height
= pt
->height0
;
624 unsigned depth
= pt
->depth0
;
625 unsigned buffer_size
= 0;
627 for (level
= 0; level
<= pt
->last_level
; level
++) {
630 if (pt
->target
== PIPE_TEXTURE_CUBE
)
632 else if (pt
->target
== PIPE_TEXTURE_3D
)
635 slices
= pt
->array_size
;
637 nblocksy
= util_format_get_nblocksy(pt
->format
, height
);
638 metadata
->stride
[level
] = util_format_get_stride(pt
->format
, width
);
639 metadata
->layer_stride
[level
] = nblocksy
* metadata
->stride
[level
];
640 metadata
->level_offset
[level
] = buffer_size
;
642 buffer_size
+= slices
* metadata
->layer_stride
[level
];
644 width
= u_minify(width
, 1);
645 height
= u_minify(height
, 1);
646 depth
= u_minify(depth
, 1);
649 if (pt
->nr_samples
<= 1)
650 metadata
->total_size
= buffer_size
;
651 else /* don't create guest backing store for MSAA */
652 metadata
->total_size
= 0;
655 struct virgl_transfer
*
656 virgl_resource_create_transfer(struct virgl_context
*vctx
,
657 struct pipe_resource
*pres
,
658 const struct virgl_resource_metadata
*metadata
,
659 unsigned level
, unsigned usage
,
660 const struct pipe_box
*box
)
662 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
663 struct virgl_transfer
*trans
;
664 enum pipe_format format
= pres
->format
;
665 const unsigned blocksy
= box
->y
/ util_format_get_blockheight(format
);
666 const unsigned blocksx
= box
->x
/ util_format_get_blockwidth(format
);
668 unsigned offset
= metadata
->level_offset
[level
];
669 if (pres
->target
== PIPE_TEXTURE_CUBE
||
670 pres
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
671 pres
->target
== PIPE_TEXTURE_3D
||
672 pres
->target
== PIPE_TEXTURE_2D_ARRAY
) {
673 offset
+= box
->z
* metadata
->layer_stride
[level
];
675 else if (pres
->target
== PIPE_TEXTURE_1D_ARRAY
) {
676 offset
+= box
->z
* metadata
->stride
[level
];
678 } else if (pres
->target
== PIPE_BUFFER
) {
679 assert(box
->y
== 0 && box
->z
== 0);
684 offset
+= blocksy
* metadata
->stride
[level
];
685 offset
+= blocksx
* util_format_get_blocksize(format
);
687 trans
= slab_alloc(&vctx
->transfer_pool
);
691 /* note that trans is not zero-initialized */
692 trans
->base
.resource
= NULL
;
693 pipe_resource_reference(&trans
->base
.resource
, pres
);
694 trans
->hw_res
= NULL
;
695 vws
->resource_reference(vws
, &trans
->hw_res
, virgl_resource(pres
)->hw_res
);
697 trans
->base
.level
= level
;
698 trans
->base
.usage
= usage
;
699 trans
->base
.box
= *box
;
700 trans
->base
.stride
= metadata
->stride
[level
];
701 trans
->base
.layer_stride
= metadata
->layer_stride
[level
];
702 trans
->offset
= offset
;
703 util_range_init(&trans
->range
);
704 trans
->copy_src_hw_res
= NULL
;
705 trans
->copy_src_offset
= 0;
706 trans
->resolve_transfer
= NULL
;
708 if (trans
->base
.resource
->target
!= PIPE_TEXTURE_3D
&&
709 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE
&&
710 trans
->base
.resource
->target
!= PIPE_TEXTURE_1D_ARRAY
&&
711 trans
->base
.resource
->target
!= PIPE_TEXTURE_2D_ARRAY
&&
712 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE_ARRAY
)
715 trans
->l_stride
= trans
->base
.layer_stride
;
720 void virgl_resource_destroy_transfer(struct virgl_context
*vctx
,
721 struct virgl_transfer
*trans
)
723 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
725 vws
->resource_reference(vws
, &trans
->copy_src_hw_res
, NULL
);
727 util_range_destroy(&trans
->range
);
728 vws
->resource_reference(vws
, &trans
->hw_res
, NULL
);
729 pipe_resource_reference(&trans
->base
.resource
, NULL
);
730 slab_free(&vctx
->transfer_pool
, trans
);
733 void virgl_resource_destroy(struct pipe_screen
*screen
,
734 struct pipe_resource
*resource
)
736 struct virgl_screen
*vs
= virgl_screen(screen
);
737 struct virgl_resource
*res
= virgl_resource(resource
);
739 if (res
->u
.b
.target
== PIPE_BUFFER
)
740 util_range_destroy(&res
->valid_buffer_range
);
742 vs
->vws
->resource_reference(vs
->vws
, &res
->hw_res
, NULL
);
746 boolean
virgl_resource_get_handle(struct pipe_screen
*screen
,
747 struct pipe_resource
*resource
,
748 struct winsys_handle
*whandle
)
750 struct virgl_screen
*vs
= virgl_screen(screen
);
751 struct virgl_resource
*res
= virgl_resource(resource
);
753 if (res
->u
.b
.target
== PIPE_BUFFER
)
756 return vs
->vws
->resource_get_handle(vs
->vws
, res
->hw_res
,
757 res
->metadata
.stride
[0],
761 void virgl_resource_dirty(struct virgl_resource
*res
, uint32_t level
)
764 if (res
->u
.b
.target
== PIPE_BUFFER
)
765 res
->clean_mask
&= ~1;
767 res
->clean_mask
&= ~(1 << level
);