2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
30 /* We need to flush to properly sync the transfer with the current cmdbuf.
31 * But there are cases where the flushing can be skipped:
33 * - synchronization is disabled
34 * - the resource is not referenced by the current cmdbuf
36 static bool virgl_res_needs_flush(struct virgl_context
*vctx
,
37 struct virgl_transfer
*trans
)
39 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
40 struct virgl_resource
*res
= virgl_resource(trans
->base
.resource
);
42 if (trans
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
45 if (!vws
->res_is_referenced(vws
, vctx
->cbuf
, res
->hw_res
))
51 /* We need to read back from the host storage to make sure the guest storage
52 * is up-to-date. But there are cases where the readback can be skipped:
54 * - the content can be discarded
55 * - the host storage is read-only
57 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
58 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
59 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
61 static bool virgl_res_needs_readback(struct virgl_context
*vctx
,
62 struct virgl_resource
*res
,
63 unsigned usage
, unsigned level
)
65 if (usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
66 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
))
69 if (res
->clean_mask
& (1 << level
))
75 enum virgl_transfer_map_type
76 virgl_resource_transfer_prepare(struct virgl_context
*vctx
,
77 struct virgl_transfer
*xfer
)
79 struct virgl_winsys
*vws
= virgl_screen(vctx
->base
.screen
)->vws
;
80 struct virgl_resource
*res
= virgl_resource(xfer
->base
.resource
);
81 enum virgl_transfer_map_type map_type
= VIRGL_TRANSFER_MAP_HW_RES
;
86 /* there is no way to map the host storage currently */
87 if (xfer
->base
.usage
& PIPE_TRANSFER_MAP_DIRECTLY
)
88 return VIRGL_TRANSFER_MAP_ERROR
;
90 /* We break the logic down into four steps
92 * step 1: determine the required operations independently
93 * step 2: look for chances to skip the operations
94 * step 3: resolve dependencies between the operations
95 * step 4: execute the operations
98 flush
= virgl_res_needs_flush(vctx
, xfer
);
99 readback
= virgl_res_needs_readback(vctx
, res
, xfer
->base
.usage
,
101 /* We need to wait for all cmdbufs, current or previous, that access the
102 * resource to finish unless synchronization is disabled.
104 wait
= !(xfer
->base
.usage
& PIPE_TRANSFER_UNSYNCHRONIZED
);
106 /* When the transfer range consists of only uninitialized data, we can
107 * assume the GPU is not accessing the range and readback is unnecessary.
108 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
109 * PIPE_TRANSFER_DISCARD_RANGE are set.
111 if (res
->u
.b
.target
== PIPE_BUFFER
&&
112 !util_ranges_intersect(&res
->valid_buffer_range
, xfer
->base
.box
.x
,
113 xfer
->base
.box
.x
+ xfer
->base
.box
.width
)) {
119 /* readback has some implications */
121 /* Readback is yet another command and is transparent to the state
122 * trackers. It should be waited for in all cases, including when
123 * PIPE_TRANSFER_UNSYNCHRONIZED is set.
127 /* When the transfer queue has pending writes to this transfer's region,
128 * we have to flush before readback.
130 if (!flush
&& virgl_transfer_queue_is_queued(&vctx
->queue
, xfer
))
134 /* XXX This is incorrect and will be removed. Consider
136 * glTexImage2D(..., data1);
139 * glTexImage2D(..., data2);
141 * readback and flush are both false in the second glTexImage2D call. The
142 * draw call might end up seeing data2. Same applies to buffers with
145 wait
= flush
|| readback
;
148 vctx
->base
.flush(&vctx
->base
, NULL
, 0);
150 /* If we are not allowed to block, and we know that we will have to wait,
151 * either because the resource is busy, or because it will become busy due
152 * to a readback, return early to avoid performing an incomplete
153 * transfer_get. Such an incomplete transfer_get may finish at any time,
154 * during which another unsynchronized map could write to the resource
155 * contents, leaving the contents in an undefined state.
157 if ((xfer
->base
.usage
& PIPE_TRANSFER_DONTBLOCK
) &&
158 (readback
|| (wait
&& vws
->resource_is_busy(vws
, res
->hw_res
))))
159 return VIRGL_TRANSFER_MAP_ERROR
;
162 vws
->transfer_get(vws
, res
->hw_res
, &xfer
->base
.box
, xfer
->base
.stride
,
163 xfer
->l_stride
, xfer
->offset
, xfer
->base
.level
);
167 vws
->resource_wait(vws
, res
->hw_res
);
172 static struct pipe_resource
*virgl_resource_create(struct pipe_screen
*screen
,
173 const struct pipe_resource
*templ
)
176 struct virgl_screen
*vs
= virgl_screen(screen
);
177 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
180 res
->u
.b
.screen
= &vs
->base
;
181 pipe_reference_init(&res
->u
.b
.reference
, 1);
182 vbind
= pipe_to_virgl_bind(vs
, templ
->bind
, templ
->flags
);
183 virgl_resource_layout(&res
->u
.b
, &res
->metadata
);
184 res
->hw_res
= vs
->vws
->resource_create(vs
->vws
, templ
->target
,
185 templ
->format
, vbind
,
192 res
->metadata
.total_size
);
198 res
->clean_mask
= (1 << VR_MAX_TEXTURE_2D_LEVELS
) - 1;
200 if (templ
->target
== PIPE_BUFFER
) {
201 util_range_init(&res
->valid_buffer_range
);
202 virgl_buffer_init(res
);
204 virgl_texture_init(res
);
211 static struct pipe_resource
*virgl_resource_from_handle(struct pipe_screen
*screen
,
212 const struct pipe_resource
*templ
,
213 struct winsys_handle
*whandle
,
216 struct virgl_screen
*vs
= virgl_screen(screen
);
217 if (templ
->target
== PIPE_BUFFER
)
220 struct virgl_resource
*res
= CALLOC_STRUCT(virgl_resource
);
222 res
->u
.b
.screen
= &vs
->base
;
223 pipe_reference_init(&res
->u
.b
.reference
, 1);
225 res
->hw_res
= vs
->vws
->resource_create_from_handle(vs
->vws
, whandle
);
231 virgl_texture_init(res
);
236 void virgl_init_screen_resource_functions(struct pipe_screen
*screen
)
238 screen
->resource_create
= virgl_resource_create
;
239 screen
->resource_from_handle
= virgl_resource_from_handle
;
240 screen
->resource_get_handle
= u_resource_get_handle_vtbl
;
241 screen
->resource_destroy
= u_resource_destroy_vtbl
;
244 static bool virgl_buffer_transfer_extend(struct pipe_context
*ctx
,
245 struct pipe_resource
*resource
,
247 const struct pipe_box
*box
,
250 struct virgl_context
*vctx
= virgl_context(ctx
);
251 struct virgl_resource
*vbuf
= virgl_resource(resource
);
252 struct virgl_transfer dummy_trans
= { 0 };
254 struct virgl_transfer
*queued
;
257 * Attempts to short circuit the entire process of mapping and unmapping
258 * a resource if there is an existing transfer that can be extended.
259 * Pessimestically falls back if a flush is required.
261 dummy_trans
.base
.resource
= resource
;
262 dummy_trans
.base
.usage
= usage
;
263 dummy_trans
.base
.box
= *box
;
264 dummy_trans
.base
.stride
= vbuf
->metadata
.stride
[0];
265 dummy_trans
.base
.layer_stride
= vbuf
->metadata
.layer_stride
[0];
266 dummy_trans
.offset
= box
->x
;
268 flush
= virgl_res_needs_flush(vctx
, &dummy_trans
);
269 if (flush
&& util_ranges_intersect(&vbuf
->valid_buffer_range
,
270 box
->x
, box
->x
+ box
->width
))
273 queued
= virgl_transfer_queue_extend(&vctx
->queue
, &dummy_trans
);
274 if (!queued
|| !queued
->hw_res_map
)
277 memcpy(queued
->hw_res_map
+ dummy_trans
.offset
, data
, box
->width
);
278 util_range_add(&vbuf
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
283 static void virgl_buffer_subdata(struct pipe_context
*pipe
,
284 struct pipe_resource
*resource
,
285 unsigned usage
, unsigned offset
,
286 unsigned size
, const void *data
)
288 struct pipe_transfer
*transfer
;
292 assert(!(usage
& PIPE_TRANSFER_READ
));
294 /* the write flag is implicit by the nature of buffer_subdata */
295 usage
|= PIPE_TRANSFER_WRITE
;
297 if (offset
== 0 && size
== resource
->width0
)
298 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
300 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
302 u_box_1d(offset
, size
, &box
);
304 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
305 virgl_buffer_transfer_extend(pipe
, resource
, usage
, &box
, data
))
308 map
= pipe
->transfer_map(pipe
, resource
, 0, usage
, &box
, &transfer
);
310 memcpy(map
, data
, size
);
311 pipe_transfer_unmap(pipe
, transfer
);
315 void virgl_init_context_resource_functions(struct pipe_context
*ctx
)
317 ctx
->transfer_map
= u_transfer_map_vtbl
;
318 ctx
->transfer_flush_region
= u_transfer_flush_region_vtbl
;
319 ctx
->transfer_unmap
= u_transfer_unmap_vtbl
;
320 ctx
->buffer_subdata
= virgl_buffer_subdata
;
321 ctx
->texture_subdata
= u_default_texture_subdata
;
324 void virgl_resource_layout(struct pipe_resource
*pt
,
325 struct virgl_resource_metadata
*metadata
)
327 unsigned level
, nblocksy
;
328 unsigned width
= pt
->width0
;
329 unsigned height
= pt
->height0
;
330 unsigned depth
= pt
->depth0
;
331 unsigned buffer_size
= 0;
333 for (level
= 0; level
<= pt
->last_level
; level
++) {
336 if (pt
->target
== PIPE_TEXTURE_CUBE
)
338 else if (pt
->target
== PIPE_TEXTURE_3D
)
341 slices
= pt
->array_size
;
343 nblocksy
= util_format_get_nblocksy(pt
->format
, height
);
344 metadata
->stride
[level
] = util_format_get_stride(pt
->format
, width
);
345 metadata
->layer_stride
[level
] = nblocksy
* metadata
->stride
[level
];
346 metadata
->level_offset
[level
] = buffer_size
;
348 buffer_size
+= slices
* metadata
->layer_stride
[level
];
350 width
= u_minify(width
, 1);
351 height
= u_minify(height
, 1);
352 depth
= u_minify(depth
, 1);
355 if (pt
->nr_samples
<= 1)
356 metadata
->total_size
= buffer_size
;
357 else /* don't create guest backing store for MSAA */
358 metadata
->total_size
= 0;
361 struct virgl_transfer
*
362 virgl_resource_create_transfer(struct slab_child_pool
*pool
,
363 struct pipe_resource
*pres
,
364 const struct virgl_resource_metadata
*metadata
,
365 unsigned level
, unsigned usage
,
366 const struct pipe_box
*box
)
368 struct virgl_transfer
*trans
;
369 enum pipe_format format
= pres
->format
;
370 const unsigned blocksy
= box
->y
/ util_format_get_blockheight(format
);
371 const unsigned blocksx
= box
->x
/ util_format_get_blockwidth(format
);
373 unsigned offset
= metadata
->level_offset
[level
];
374 if (pres
->target
== PIPE_TEXTURE_CUBE
||
375 pres
->target
== PIPE_TEXTURE_CUBE_ARRAY
||
376 pres
->target
== PIPE_TEXTURE_3D
||
377 pres
->target
== PIPE_TEXTURE_2D_ARRAY
) {
378 offset
+= box
->z
* metadata
->layer_stride
[level
];
380 else if (pres
->target
== PIPE_TEXTURE_1D_ARRAY
) {
381 offset
+= box
->z
* metadata
->stride
[level
];
383 } else if (pres
->target
== PIPE_BUFFER
) {
384 assert(box
->y
== 0 && box
->z
== 0);
389 offset
+= blocksy
* metadata
->stride
[level
];
390 offset
+= blocksx
* util_format_get_blocksize(format
);
392 trans
= slab_alloc(pool
);
396 trans
->base
.resource
= pres
;
397 trans
->base
.level
= level
;
398 trans
->base
.usage
= usage
;
399 trans
->base
.box
= *box
;
400 trans
->base
.stride
= metadata
->stride
[level
];
401 trans
->base
.layer_stride
= metadata
->layer_stride
[level
];
402 trans
->offset
= offset
;
403 util_range_init(&trans
->range
);
405 if (trans
->base
.resource
->target
!= PIPE_TEXTURE_3D
&&
406 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE
&&
407 trans
->base
.resource
->target
!= PIPE_TEXTURE_1D_ARRAY
&&
408 trans
->base
.resource
->target
!= PIPE_TEXTURE_2D_ARRAY
&&
409 trans
->base
.resource
->target
!= PIPE_TEXTURE_CUBE_ARRAY
)
412 trans
->l_stride
= trans
->base
.layer_stride
;
417 void virgl_resource_destroy_transfer(struct slab_child_pool
*pool
,
418 struct virgl_transfer
*trans
)
420 util_range_destroy(&trans
->range
);
421 slab_free(pool
, trans
);
424 void virgl_resource_destroy(struct pipe_screen
*screen
,
425 struct pipe_resource
*resource
)
427 struct virgl_screen
*vs
= virgl_screen(screen
);
428 struct virgl_resource
*res
= virgl_resource(resource
);
430 if (res
->u
.b
.target
== PIPE_BUFFER
)
431 util_range_destroy(&res
->valid_buffer_range
);
433 vs
->vws
->resource_unref(vs
->vws
, res
->hw_res
);
437 boolean
virgl_resource_get_handle(struct pipe_screen
*screen
,
438 struct pipe_resource
*resource
,
439 struct winsys_handle
*whandle
)
441 struct virgl_screen
*vs
= virgl_screen(screen
);
442 struct virgl_resource
*res
= virgl_resource(resource
);
444 if (res
->u
.b
.target
== PIPE_BUFFER
)
447 return vs
->vws
->resource_get_handle(vs
->vws
, res
->hw_res
,
448 res
->metadata
.stride
[0],
452 void virgl_resource_dirty(struct virgl_resource
*res
, uint32_t level
)
455 if (res
->u
.b
.target
== PIPE_BUFFER
)
456 res
->clean_mask
&= ~1;
458 res
->clean_mask
&= ~(1 << level
);