virgl: handle DONT_BLOCK and MAP_DIRECTLY
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 /* We need to flush to properly sync the transfer with the current cmdbuf.
31 * But there are cases where the flushing can be skipped:
32 *
33 * - synchronization is disabled
34 * - the resource is not referenced by the current cmdbuf
35 * - the current cmdbuf has no draw/compute command that accesses the
36 * resource (XXX there are also clear or blit commands)
37 * - the transfer is to an undefined region and we can assume the current
38 * cmdbuf has no command that accesses the region (XXX we cannot just check
39 * for overlapping transfers)
40 */
41 static bool virgl_res_needs_flush(struct virgl_context *vctx,
42 struct virgl_transfer *trans)
43 {
44 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
45 struct virgl_resource *res = virgl_resource(trans->base.resource);
46
47 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
48 return false;
49
50 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
51 return false;
52
53 if (res->clean_mask & (1 << trans->base.level)) {
54 /* XXX Consider
55 *
56 * glCopyBufferSubData(src, dst, ...);
57 * glBufferSubData(src, ...);
58 *
59 * at the beginning of a cmdbuf. glBufferSubData will be incorrectly
60 * reordered before glCopyBufferSubData.
61 */
62 if (vctx->num_draws == 0 && vctx->num_compute == 0)
63 return false;
64
65 /* XXX Consider
66 *
67 * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data1);
68 * glFlush();
69 * glDrawArrays(GL_TRIANGLES, 0, 3);
70 * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data2);
71 * glDrawArrays(GL_TRIANGLES, 0, 3);
72 *
73 * Both draws will see data2.
74 */
75 if (!virgl_transfer_queue_is_queued(&vctx->queue, trans))
76 return false;
77 }
78
79 return true;
80 }
81
82 /* We need to read back from the host storage to make sure the guest storage
83 * is up-to-date. But there are cases where the readback can be skipped:
84 *
85 * - the content can be discarded
86 * - the host storage is read-only
87 *
88 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
89 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
90 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
91 */
92 static bool virgl_res_needs_readback(struct virgl_context *vctx,
93 struct virgl_resource *res,
94 unsigned usage, unsigned level)
95 {
96 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
97 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
98 return false;
99
100 if (res->clean_mask & (1 << level))
101 return false;
102
103 return true;
104 }
105
106 enum virgl_transfer_map_type
107 virgl_resource_transfer_prepare(struct virgl_context *vctx,
108 struct virgl_transfer *xfer)
109 {
110 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
111 struct virgl_resource *res = virgl_resource(xfer->base.resource);
112 enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
113 bool flush;
114 bool readback;
115 bool wait;
116
117 /* there is no way to map the host storage currently */
118 if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
119 return VIRGL_TRANSFER_MAP_ERROR;
120
121 flush = virgl_res_needs_flush(vctx, xfer);
122 readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
123 xfer->base.level);
124
125 /* XXX This is incorrect. Consider
126 *
127 * glTexImage2D(..., data1);
128 * glDrawArrays();
129 * glFlush();
130 * glTexImage2D(..., data2);
131 *
132 * readback and flush are both false in the second glTexImage2D call. The
133 * draw call might end up seeing data2. Same applies to buffers with
134 * glBufferSubData.
135 */
136 wait = flush || readback;
137
138 if (flush)
139 vctx->base.flush(&vctx->base, NULL, 0);
140
141 if (readback) {
142 vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
143 xfer->l_stride, xfer->offset, xfer->base.level);
144 }
145
146 if (wait) {
147 /* fail the mapping after flush and readback so that it will succeed in
148 * the future
149 */
150 if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
151 vws->resource_is_busy(vws, res->hw_res))
152 return VIRGL_TRANSFER_MAP_ERROR;
153
154 vws->resource_wait(vws, res->hw_res);
155 }
156
157 return map_type;
158 }
159
160 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
161 const struct pipe_resource *templ)
162 {
163 unsigned vbind;
164 struct virgl_screen *vs = virgl_screen(screen);
165 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
166
167 res->u.b = *templ;
168 res->u.b.screen = &vs->base;
169 pipe_reference_init(&res->u.b.reference, 1);
170 vbind = pipe_to_virgl_bind(vs, templ->bind);
171 virgl_resource_layout(&res->u.b, &res->metadata);
172 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
173 templ->format, vbind,
174 templ->width0,
175 templ->height0,
176 templ->depth0,
177 templ->array_size,
178 templ->last_level,
179 templ->nr_samples,
180 res->metadata.total_size);
181 if (!res->hw_res) {
182 FREE(res);
183 return NULL;
184 }
185
186 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
187
188 if (templ->target == PIPE_BUFFER)
189 virgl_buffer_init(res);
190 else
191 virgl_texture_init(res);
192
193 return &res->u.b;
194
195 }
196
197 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
198 const struct pipe_resource *templ,
199 struct winsys_handle *whandle,
200 unsigned usage)
201 {
202 struct virgl_screen *vs = virgl_screen(screen);
203 if (templ->target == PIPE_BUFFER)
204 return NULL;
205
206 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
207 res->u.b = *templ;
208 res->u.b.screen = &vs->base;
209 pipe_reference_init(&res->u.b.reference, 1);
210
211 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
212 if (!res->hw_res) {
213 FREE(res);
214 return NULL;
215 }
216
217 virgl_texture_init(res);
218
219 return &res->u.b;
220 }
221
222 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
223 {
224 screen->resource_create = virgl_resource_create;
225 screen->resource_from_handle = virgl_resource_from_handle;
226 screen->resource_get_handle = u_resource_get_handle_vtbl;
227 screen->resource_destroy = u_resource_destroy_vtbl;
228 }
229
230 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
231 struct pipe_resource *resource,
232 unsigned usage,
233 const struct pipe_box *box,
234 const void *data)
235 {
236 struct virgl_context *vctx = virgl_context(ctx);
237 struct virgl_resource *vbuf = virgl_resource(resource);
238 struct virgl_transfer dummy_trans = { 0 };
239 bool flush;
240 struct virgl_transfer *queued;
241
242 /*
243 * Attempts to short circuit the entire process of mapping and unmapping
244 * a resource if there is an existing transfer that can be extended.
245 * Pessimestically falls back if a flush is required.
246 */
247 dummy_trans.base.resource = resource;
248 dummy_trans.base.usage = usage;
249 dummy_trans.base.box = *box;
250 dummy_trans.base.stride = vbuf->metadata.stride[0];
251 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
252 dummy_trans.offset = box->x;
253
254 flush = virgl_res_needs_flush(vctx, &dummy_trans);
255 if (flush)
256 return false;
257
258 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
259 if (!queued || !queued->hw_res_map)
260 return false;
261
262 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
263
264 return true;
265 }
266
267 static void virgl_buffer_subdata(struct pipe_context *pipe,
268 struct pipe_resource *resource,
269 unsigned usage, unsigned offset,
270 unsigned size, const void *data)
271 {
272 struct pipe_transfer *transfer;
273 uint8_t *map;
274 struct pipe_box box;
275
276 assert(!(usage & PIPE_TRANSFER_READ));
277
278 /* the write flag is implicit by the nature of buffer_subdata */
279 usage |= PIPE_TRANSFER_WRITE;
280
281 if (offset == 0 && size == resource->width0)
282 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
283 else
284 usage |= PIPE_TRANSFER_DISCARD_RANGE;
285
286 u_box_1d(offset, size, &box);
287
288 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
289 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
290 return;
291
292 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
293 if (map) {
294 memcpy(map, data, size);
295 pipe_transfer_unmap(pipe, transfer);
296 }
297 }
298
299 void virgl_init_context_resource_functions(struct pipe_context *ctx)
300 {
301 ctx->transfer_map = u_transfer_map_vtbl;
302 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
303 ctx->transfer_unmap = u_transfer_unmap_vtbl;
304 ctx->buffer_subdata = virgl_buffer_subdata;
305 ctx->texture_subdata = u_default_texture_subdata;
306 }
307
308 void virgl_resource_layout(struct pipe_resource *pt,
309 struct virgl_resource_metadata *metadata)
310 {
311 unsigned level, nblocksy;
312 unsigned width = pt->width0;
313 unsigned height = pt->height0;
314 unsigned depth = pt->depth0;
315 unsigned buffer_size = 0;
316
317 for (level = 0; level <= pt->last_level; level++) {
318 unsigned slices;
319
320 if (pt->target == PIPE_TEXTURE_CUBE)
321 slices = 6;
322 else if (pt->target == PIPE_TEXTURE_3D)
323 slices = depth;
324 else
325 slices = pt->array_size;
326
327 nblocksy = util_format_get_nblocksy(pt->format, height);
328 metadata->stride[level] = util_format_get_stride(pt->format, width);
329 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
330 metadata->level_offset[level] = buffer_size;
331
332 buffer_size += slices * metadata->layer_stride[level];
333
334 width = u_minify(width, 1);
335 height = u_minify(height, 1);
336 depth = u_minify(depth, 1);
337 }
338
339 if (pt->nr_samples <= 1)
340 metadata->total_size = buffer_size;
341 else /* don't create guest backing store for MSAA */
342 metadata->total_size = 0;
343 }
344
345 struct virgl_transfer *
346 virgl_resource_create_transfer(struct slab_child_pool *pool,
347 struct pipe_resource *pres,
348 const struct virgl_resource_metadata *metadata,
349 unsigned level, unsigned usage,
350 const struct pipe_box *box)
351 {
352 struct virgl_transfer *trans;
353 enum pipe_format format = pres->format;
354 const unsigned blocksy = box->y / util_format_get_blockheight(format);
355 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
356
357 unsigned offset = metadata->level_offset[level];
358 if (pres->target == PIPE_TEXTURE_CUBE ||
359 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
360 pres->target == PIPE_TEXTURE_3D ||
361 pres->target == PIPE_TEXTURE_2D_ARRAY) {
362 offset += box->z * metadata->layer_stride[level];
363 }
364 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
365 offset += box->z * metadata->stride[level];
366 assert(box->y == 0);
367 } else if (pres->target == PIPE_BUFFER) {
368 assert(box->y == 0 && box->z == 0);
369 } else {
370 assert(box->z == 0);
371 }
372
373 offset += blocksy * metadata->stride[level];
374 offset += blocksx * util_format_get_blocksize(format);
375
376 trans = slab_alloc(pool);
377 if (!trans)
378 return NULL;
379
380 trans->base.resource = pres;
381 trans->base.level = level;
382 trans->base.usage = usage;
383 trans->base.box = *box;
384 trans->base.stride = metadata->stride[level];
385 trans->base.layer_stride = metadata->layer_stride[level];
386 trans->offset = offset;
387 util_range_init(&trans->range);
388
389 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
390 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
391 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
392 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
393 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
394 trans->l_stride = 0;
395 else
396 trans->l_stride = trans->base.layer_stride;
397
398 return trans;
399 }
400
401 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
402 struct virgl_transfer *trans)
403 {
404 util_range_destroy(&trans->range);
405 slab_free(pool, trans);
406 }
407
408 void virgl_resource_destroy(struct pipe_screen *screen,
409 struct pipe_resource *resource)
410 {
411 struct virgl_screen *vs = virgl_screen(screen);
412 struct virgl_resource *res = virgl_resource(resource);
413 vs->vws->resource_unref(vs->vws, res->hw_res);
414 FREE(res);
415 }
416
417 boolean virgl_resource_get_handle(struct pipe_screen *screen,
418 struct pipe_resource *resource,
419 struct winsys_handle *whandle)
420 {
421 struct virgl_screen *vs = virgl_screen(screen);
422 struct virgl_resource *res = virgl_resource(resource);
423
424 if (res->u.b.target == PIPE_BUFFER)
425 return FALSE;
426
427 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
428 res->metadata.stride[0],
429 whandle);
430 }
431
432 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
433 {
434 if (res) {
435 if (res->u.b.target == PIPE_BUFFER)
436 res->clean_mask &= ~1;
437 else
438 res->clean_mask &= ~(1 << level);
439 }
440 }