virgl: honor DISCARD_WHOLE_RESOURCE in virgl_res_needs_readback
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 /* We need to flush to properly sync the transfer with the current cmdbuf.
31 * But there are cases where the flushing can be skipped:
32 *
33 * - synchronization is disabled
34 * - the resource is not referenced by the current cmdbuf
35 * - the current cmdbuf has no draw/compute command that accesses the
36 * resource (XXX there are also clear or blit commands)
37 * - the transfer is to an undefined region and we can assume the current
38 * cmdbuf has no command that accesses the region (XXX we cannot just check
39 * for overlapping transfers)
40 */
41 bool virgl_res_needs_flush(struct virgl_context *vctx,
42 struct virgl_transfer *trans)
43 {
44 struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
45 struct virgl_resource *res = virgl_resource(trans->base.resource);
46
47 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
48 return false;
49
50 if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
51 return false;
52
53 if (res->clean_mask & (1 << trans->base.level)) {
54 /* XXX Consider
55 *
56 * glCopyBufferSubData(src, dst, ...);
57 * glBufferSubData(src, ...);
58 *
59 * at the beginning of a cmdbuf. glBufferSubData will be incorrectly
60 * reordered before glCopyBufferSubData.
61 */
62 if (vctx->num_draws == 0 && vctx->num_compute == 0)
63 return false;
64
65 /* XXX Consider
66 *
67 * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data1);
68 * glFlush();
69 * glDrawArrays(GL_TRIANGLES, 0, 3);
70 * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data2);
71 * glDrawArrays(GL_TRIANGLES, 0, 3);
72 *
73 * Both draws will see data2.
74 */
75 if (!virgl_transfer_queue_is_queued(&vctx->queue, trans))
76 return false;
77 }
78
79 return true;
80 }
81
82 /* We need to read back from the host storage to make sure the guest storage
83 * is up-to-date. But there are cases where the readback can be skipped:
84 *
85 * - the content can be discarded
86 * - the host storage is read-only
87 *
88 * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
89 * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
90 * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
91 */
92 bool virgl_res_needs_readback(struct virgl_context *vctx,
93 struct virgl_resource *res,
94 unsigned usage, unsigned level)
95 {
96 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
97 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
98 return false;
99
100 if (res->clean_mask & (1 << level))
101 return false;
102
103 return true;
104 }
105
106 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
107 const struct pipe_resource *templ)
108 {
109 unsigned vbind;
110 struct virgl_screen *vs = virgl_screen(screen);
111 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
112
113 res->u.b = *templ;
114 res->u.b.screen = &vs->base;
115 pipe_reference_init(&res->u.b.reference, 1);
116 vbind = pipe_to_virgl_bind(vs, templ->bind);
117 virgl_resource_layout(&res->u.b, &res->metadata);
118 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
119 templ->format, vbind,
120 templ->width0,
121 templ->height0,
122 templ->depth0,
123 templ->array_size,
124 templ->last_level,
125 templ->nr_samples,
126 res->metadata.total_size);
127 if (!res->hw_res) {
128 FREE(res);
129 return NULL;
130 }
131
132 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
133
134 if (templ->target == PIPE_BUFFER)
135 virgl_buffer_init(res);
136 else
137 virgl_texture_init(res);
138
139 return &res->u.b;
140
141 }
142
143 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
144 const struct pipe_resource *templ,
145 struct winsys_handle *whandle,
146 unsigned usage)
147 {
148 struct virgl_screen *vs = virgl_screen(screen);
149 if (templ->target == PIPE_BUFFER)
150 return NULL;
151
152 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
153 res->u.b = *templ;
154 res->u.b.screen = &vs->base;
155 pipe_reference_init(&res->u.b.reference, 1);
156
157 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
158 if (!res->hw_res) {
159 FREE(res);
160 return NULL;
161 }
162
163 virgl_texture_init(res);
164
165 return &res->u.b;
166 }
167
168 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
169 {
170 screen->resource_create = virgl_resource_create;
171 screen->resource_from_handle = virgl_resource_from_handle;
172 screen->resource_get_handle = u_resource_get_handle_vtbl;
173 screen->resource_destroy = u_resource_destroy_vtbl;
174 }
175
176 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
177 struct pipe_resource *resource,
178 unsigned usage,
179 const struct pipe_box *box,
180 const void *data)
181 {
182 struct virgl_context *vctx = virgl_context(ctx);
183 struct virgl_resource *vbuf = virgl_resource(resource);
184 struct virgl_transfer dummy_trans = { 0 };
185 bool flush;
186 struct virgl_transfer *queued;
187
188 /*
189 * Attempts to short circuit the entire process of mapping and unmapping
190 * a resource if there is an existing transfer that can be extended.
191 * Pessimestically falls back if a flush is required.
192 */
193 dummy_trans.base.resource = resource;
194 dummy_trans.base.usage = usage;
195 dummy_trans.base.box = *box;
196 dummy_trans.base.stride = vbuf->metadata.stride[0];
197 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
198 dummy_trans.offset = box->x;
199
200 flush = virgl_res_needs_flush(vctx, &dummy_trans);
201 if (flush)
202 return false;
203
204 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
205 if (!queued || !queued->hw_res_map)
206 return false;
207
208 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
209
210 return true;
211 }
212
213 static void virgl_buffer_subdata(struct pipe_context *pipe,
214 struct pipe_resource *resource,
215 unsigned usage, unsigned offset,
216 unsigned size, const void *data)
217 {
218 struct pipe_transfer *transfer;
219 uint8_t *map;
220 struct pipe_box box;
221
222 assert(!(usage & PIPE_TRANSFER_READ));
223
224 /* the write flag is implicit by the nature of buffer_subdata */
225 usage |= PIPE_TRANSFER_WRITE;
226
227 if (offset == 0 && size == resource->width0)
228 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
229 else
230 usage |= PIPE_TRANSFER_DISCARD_RANGE;
231
232 u_box_1d(offset, size, &box);
233
234 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
235 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
236 return;
237
238 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
239 if (map) {
240 memcpy(map, data, size);
241 pipe_transfer_unmap(pipe, transfer);
242 }
243 }
244
245 void virgl_init_context_resource_functions(struct pipe_context *ctx)
246 {
247 ctx->transfer_map = u_transfer_map_vtbl;
248 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
249 ctx->transfer_unmap = u_transfer_unmap_vtbl;
250 ctx->buffer_subdata = virgl_buffer_subdata;
251 ctx->texture_subdata = u_default_texture_subdata;
252 }
253
254 void virgl_resource_layout(struct pipe_resource *pt,
255 struct virgl_resource_metadata *metadata)
256 {
257 unsigned level, nblocksy;
258 unsigned width = pt->width0;
259 unsigned height = pt->height0;
260 unsigned depth = pt->depth0;
261 unsigned buffer_size = 0;
262
263 for (level = 0; level <= pt->last_level; level++) {
264 unsigned slices;
265
266 if (pt->target == PIPE_TEXTURE_CUBE)
267 slices = 6;
268 else if (pt->target == PIPE_TEXTURE_3D)
269 slices = depth;
270 else
271 slices = pt->array_size;
272
273 nblocksy = util_format_get_nblocksy(pt->format, height);
274 metadata->stride[level] = util_format_get_stride(pt->format, width);
275 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
276 metadata->level_offset[level] = buffer_size;
277
278 buffer_size += slices * metadata->layer_stride[level];
279
280 width = u_minify(width, 1);
281 height = u_minify(height, 1);
282 depth = u_minify(depth, 1);
283 }
284
285 if (pt->nr_samples <= 1)
286 metadata->total_size = buffer_size;
287 else /* don't create guest backing store for MSAA */
288 metadata->total_size = 0;
289 }
290
291 struct virgl_transfer *
292 virgl_resource_create_transfer(struct slab_child_pool *pool,
293 struct pipe_resource *pres,
294 const struct virgl_resource_metadata *metadata,
295 unsigned level, unsigned usage,
296 const struct pipe_box *box)
297 {
298 struct virgl_transfer *trans;
299 enum pipe_format format = pres->format;
300 const unsigned blocksy = box->y / util_format_get_blockheight(format);
301 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
302
303 unsigned offset = metadata->level_offset[level];
304 if (pres->target == PIPE_TEXTURE_CUBE ||
305 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
306 pres->target == PIPE_TEXTURE_3D ||
307 pres->target == PIPE_TEXTURE_2D_ARRAY) {
308 offset += box->z * metadata->layer_stride[level];
309 }
310 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
311 offset += box->z * metadata->stride[level];
312 assert(box->y == 0);
313 } else if (pres->target == PIPE_BUFFER) {
314 assert(box->y == 0 && box->z == 0);
315 } else {
316 assert(box->z == 0);
317 }
318
319 offset += blocksy * metadata->stride[level];
320 offset += blocksx * util_format_get_blocksize(format);
321
322 trans = slab_alloc(pool);
323 if (!trans)
324 return NULL;
325
326 trans->base.resource = pres;
327 trans->base.level = level;
328 trans->base.usage = usage;
329 trans->base.box = *box;
330 trans->base.stride = metadata->stride[level];
331 trans->base.layer_stride = metadata->layer_stride[level];
332 trans->offset = offset;
333 util_range_init(&trans->range);
334
335 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
336 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
337 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
338 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
339 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
340 trans->l_stride = 0;
341 else
342 trans->l_stride = trans->base.layer_stride;
343
344 return trans;
345 }
346
347 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
348 struct virgl_transfer *trans)
349 {
350 util_range_destroy(&trans->range);
351 slab_free(pool, trans);
352 }
353
354 void virgl_resource_destroy(struct pipe_screen *screen,
355 struct pipe_resource *resource)
356 {
357 struct virgl_screen *vs = virgl_screen(screen);
358 struct virgl_resource *res = virgl_resource(resource);
359 vs->vws->resource_unref(vs->vws, res->hw_res);
360 FREE(res);
361 }
362
363 boolean virgl_resource_get_handle(struct pipe_screen *screen,
364 struct pipe_resource *resource,
365 struct winsys_handle *whandle)
366 {
367 struct virgl_screen *vs = virgl_screen(screen);
368 struct virgl_resource *res = virgl_resource(resource);
369
370 if (res->u.b.target == PIPE_BUFFER)
371 return FALSE;
372
373 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
374 res->metadata.stride[0],
375 whandle);
376 }
377
378 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
379 {
380 if (res) {
381 if (res->u.b.target == PIPE_BUFFER)
382 res->clean_mask &= ~1;
383 else
384 res->clean_mask &= ~(1 << level);
385 }
386 }