virgl: remove an incorrect check in virgl_res_needs_flush
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 /* We need to flush to properly sync the transfer with the current cmdbuf.
31 * But there are cases where the flushing can be skipped:
32 *
33 * - synchronization is disabled
34 * - the resource is not referenced by the current cmdbuf
35 */
36 static bool virgl_res_needs_flush(struct virgl_context *vctx,
37 struct virgl_transfer *trans)
38 {
39 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
40 struct virgl_resource *res = virgl_resource(trans->base.resource);
41
42 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
43 return false;
44
45 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
46 return false;
47
48 return true;
49 }
50
51 /* We need to read back from the host storage to make sure the guest storage
52 * is up-to-date. But there are cases where the readback can be skipped:
53 *
54 * - the content can be discarded
55 * - the host storage is read-only
56 *
57 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
58 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
59 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
60 */
61 static bool virgl_res_needs_readback(struct virgl_context *vctx,
62 struct virgl_resource *res,
63 unsigned usage, unsigned level)
64 {
65 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
66 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
67 return false;
68
69 if (res->clean_mask & (1 << level))
70 return false;
71
72 return true;
73 }
74
75 enum virgl_transfer_map_type
76 virgl_resource_transfer_prepare(struct virgl_context *vctx,
77 struct virgl_transfer *xfer)
78 {
79 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
80 struct virgl_resource *res = virgl_resource(xfer->base.resource);
81 enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
82 bool flush;
83 bool readback;
84 bool wait;
85
86 /* there is no way to map the host storage currently */
87 if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
88 return VIRGL_TRANSFER_MAP_ERROR;
89
90 flush = virgl_res_needs_flush(vctx, xfer);
91 readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
92 xfer->base.level);
93
94 /* We need to wait for all cmdbufs, current or previous, that access the
95 * resource to finish, unless synchronization is disabled. Readback, which
96 * is yet another command and is transparent to the state trackers, should
97 * also be waited for.
98 */
99 wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) || readback;
100
101 /* When the transfer range consists of only uninitialized data, we can
102 * assume the GPU is not accessing the range and readback is unnecessary.
103 * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
104 * PIPE_TRANSFER_DISCARD_RANGE are set.
105 */
106 if (res->u.b.target == PIPE_BUFFER &&
107 !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
108 xfer->base.box.x + xfer->base.box.width)) {
109 flush = false;
110 readback = false;
111 wait = false;
112 }
113
114 /* XXX This is incorrect. Consider
115 *
116 * glTexImage2D(..., data1);
117 * glDrawArrays();
118 * glFlush();
119 * glTexImage2D(..., data2);
120 *
121 * readback and flush are both false in the second glTexImage2D call. The
122 * draw call might end up seeing data2. Same applies to buffers with
123 * glBufferSubData.
124 */
125 wait = flush || readback;
126
127 if (flush)
128 vctx->base.flush(&vctx->base, NULL, 0);
129
130 if (readback) {
131 vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
132 xfer->l_stride, xfer->offset, xfer->base.level);
133 }
134
135 if (wait) {
136 /* fail the mapping after flush and readback so that it will succeed in
137 * the future
138 */
139 if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
140 vws->resource_is_busy(vws, res->hw_res))
141 return VIRGL_TRANSFER_MAP_ERROR;
142
143 vws->resource_wait(vws, res->hw_res);
144 }
145
146 return map_type;
147 }
148
149 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
150 const struct pipe_resource *templ)
151 {
152 unsigned vbind;
153 struct virgl_screen *vs = virgl_screen(screen);
154 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
155
156 res->u.b = *templ;
157 res->u.b.screen = &vs->base;
158 pipe_reference_init(&res->u.b.reference, 1);
159 vbind = pipe_to_virgl_bind(vs, templ->bind);
160 virgl_resource_layout(&res->u.b, &res->metadata);
161 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
162 templ->format, vbind,
163 templ->width0,
164 templ->height0,
165 templ->depth0,
166 templ->array_size,
167 templ->last_level,
168 templ->nr_samples,
169 res->metadata.total_size);
170 if (!res->hw_res) {
171 FREE(res);
172 return NULL;
173 }
174
175 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
176
177 if (templ->target == PIPE_BUFFER) {
178 util_range_init(&res->valid_buffer_range);
179 virgl_buffer_init(res);
180 } else {
181 virgl_texture_init(res);
182 }
183
184 return &res->u.b;
185
186 }
187
188 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
189 const struct pipe_resource *templ,
190 struct winsys_handle *whandle,
191 unsigned usage)
192 {
193 struct virgl_screen *vs = virgl_screen(screen);
194 if (templ->target == PIPE_BUFFER)
195 return NULL;
196
197 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
198 res->u.b = *templ;
199 res->u.b.screen = &vs->base;
200 pipe_reference_init(&res->u.b.reference, 1);
201
202 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
203 if (!res->hw_res) {
204 FREE(res);
205 return NULL;
206 }
207
208 virgl_texture_init(res);
209
210 return &res->u.b;
211 }
212
213 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
214 {
215 screen->resource_create = virgl_resource_create;
216 screen->resource_from_handle = virgl_resource_from_handle;
217 screen->resource_get_handle = u_resource_get_handle_vtbl;
218 screen->resource_destroy = u_resource_destroy_vtbl;
219 }
220
221 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
222 struct pipe_resource *resource,
223 unsigned usage,
224 const struct pipe_box *box,
225 const void *data)
226 {
227 struct virgl_context *vctx = virgl_context(ctx);
228 struct virgl_resource *vbuf = virgl_resource(resource);
229 struct virgl_transfer dummy_trans = { 0 };
230 bool flush;
231 struct virgl_transfer *queued;
232
233 /*
234 * Attempts to short circuit the entire process of mapping and unmapping
235 * a resource if there is an existing transfer that can be extended.
236 * Pessimestically falls back if a flush is required.
237 */
238 dummy_trans.base.resource = resource;
239 dummy_trans.base.usage = usage;
240 dummy_trans.base.box = *box;
241 dummy_trans.base.stride = vbuf->metadata.stride[0];
242 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
243 dummy_trans.offset = box->x;
244
245 flush = virgl_res_needs_flush(vctx, &dummy_trans);
246 if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
247 box->x, box->x + box->width))
248 return false;
249
250 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
251 if (!queued || !queued->hw_res_map)
252 return false;
253
254 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
255 util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
256
257 return true;
258 }
259
260 static void virgl_buffer_subdata(struct pipe_context *pipe,
261 struct pipe_resource *resource,
262 unsigned usage, unsigned offset,
263 unsigned size, const void *data)
264 {
265 struct pipe_transfer *transfer;
266 uint8_t *map;
267 struct pipe_box box;
268
269 assert(!(usage & PIPE_TRANSFER_READ));
270
271 /* the write flag is implicit by the nature of buffer_subdata */
272 usage |= PIPE_TRANSFER_WRITE;
273
274 if (offset == 0 && size == resource->width0)
275 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
276 else
277 usage |= PIPE_TRANSFER_DISCARD_RANGE;
278
279 u_box_1d(offset, size, &box);
280
281 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
282 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
283 return;
284
285 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
286 if (map) {
287 memcpy(map, data, size);
288 pipe_transfer_unmap(pipe, transfer);
289 }
290 }
291
292 void virgl_init_context_resource_functions(struct pipe_context *ctx)
293 {
294 ctx->transfer_map = u_transfer_map_vtbl;
295 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
296 ctx->transfer_unmap = u_transfer_unmap_vtbl;
297 ctx->buffer_subdata = virgl_buffer_subdata;
298 ctx->texture_subdata = u_default_texture_subdata;
299 }
300
301 void virgl_resource_layout(struct pipe_resource *pt,
302 struct virgl_resource_metadata *metadata)
303 {
304 unsigned level, nblocksy;
305 unsigned width = pt->width0;
306 unsigned height = pt->height0;
307 unsigned depth = pt->depth0;
308 unsigned buffer_size = 0;
309
310 for (level = 0; level <= pt->last_level; level++) {
311 unsigned slices;
312
313 if (pt->target == PIPE_TEXTURE_CUBE)
314 slices = 6;
315 else if (pt->target == PIPE_TEXTURE_3D)
316 slices = depth;
317 else
318 slices = pt->array_size;
319
320 nblocksy = util_format_get_nblocksy(pt->format, height);
321 metadata->stride[level] = util_format_get_stride(pt->format, width);
322 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
323 metadata->level_offset[level] = buffer_size;
324
325 buffer_size += slices * metadata->layer_stride[level];
326
327 width = u_minify(width, 1);
328 height = u_minify(height, 1);
329 depth = u_minify(depth, 1);
330 }
331
332 if (pt->nr_samples <= 1)
333 metadata->total_size = buffer_size;
334 else /* don't create guest backing store for MSAA */
335 metadata->total_size = 0;
336 }
337
338 struct virgl_transfer *
339 virgl_resource_create_transfer(struct slab_child_pool *pool,
340 struct pipe_resource *pres,
341 const struct virgl_resource_metadata *metadata,
342 unsigned level, unsigned usage,
343 const struct pipe_box *box)
344 {
345 struct virgl_transfer *trans;
346 enum pipe_format format = pres->format;
347 const unsigned blocksy = box->y / util_format_get_blockheight(format);
348 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
349
350 unsigned offset = metadata->level_offset[level];
351 if (pres->target == PIPE_TEXTURE_CUBE ||
352 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
353 pres->target == PIPE_TEXTURE_3D ||
354 pres->target == PIPE_TEXTURE_2D_ARRAY) {
355 offset += box->z * metadata->layer_stride[level];
356 }
357 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
358 offset += box->z * metadata->stride[level];
359 assert(box->y == 0);
360 } else if (pres->target == PIPE_BUFFER) {
361 assert(box->y == 0 && box->z == 0);
362 } else {
363 assert(box->z == 0);
364 }
365
366 offset += blocksy * metadata->stride[level];
367 offset += blocksx * util_format_get_blocksize(format);
368
369 trans = slab_alloc(pool);
370 if (!trans)
371 return NULL;
372
373 trans->base.resource = pres;
374 trans->base.level = level;
375 trans->base.usage = usage;
376 trans->base.box = *box;
377 trans->base.stride = metadata->stride[level];
378 trans->base.layer_stride = metadata->layer_stride[level];
379 trans->offset = offset;
380 util_range_init(&trans->range);
381
382 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
383 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
384 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
385 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
386 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
387 trans->l_stride = 0;
388 else
389 trans->l_stride = trans->base.layer_stride;
390
391 return trans;
392 }
393
394 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
395 struct virgl_transfer *trans)
396 {
397 util_range_destroy(&trans->range);
398 slab_free(pool, trans);
399 }
400
401 void virgl_resource_destroy(struct pipe_screen *screen,
402 struct pipe_resource *resource)
403 {
404 struct virgl_screen *vs = virgl_screen(screen);
405 struct virgl_resource *res = virgl_resource(resource);
406
407 if (res->u.b.target == PIPE_BUFFER)
408 util_range_destroy(&res->valid_buffer_range);
409
410 vs->vws->resource_unref(vs->vws, res->hw_res);
411 FREE(res);
412 }
413
414 boolean virgl_resource_get_handle(struct pipe_screen *screen,
415 struct pipe_resource *resource,
416 struct winsys_handle *whandle)
417 {
418 struct virgl_screen *vs = virgl_screen(screen);
419 struct virgl_resource *res = virgl_resource(resource);
420
421 if (res->u.b.target == PIPE_BUFFER)
422 return FALSE;
423
424 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
425 res->metadata.stride[0],
426 whandle);
427 }
428
429 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
430 {
431 if (res) {
432 if (res->u.b.target == PIPE_BUFFER)
433 res->clean_mask &= ~1;
434 else
435 res->clean_mask &= ~(1 << level);
436 }
437 }