virgl: add ability to do finer grain dirty tracking
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 bool virgl_res_needs_flush_wait(struct virgl_context *vctx,
31 struct virgl_resource *res,
32 unsigned usage)
33 {
34 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
35
36 if ((!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) && vs->vws->res_is_referenced(vs->vws, vctx->cbuf, res->hw_res)) {
37 return true;
38 }
39 return false;
40 }
41
42 bool virgl_res_needs_readback(struct virgl_context *vctx,
43 struct virgl_resource *res,
44 unsigned usage)
45 {
46 bool readback = true;
47 if (res->clean[0])
48 readback = false;
49 else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
50 readback = false;
51 else if ((usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) ==
52 (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT))
53 readback = false;
54 return readback;
55 }
56
57 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
58 const struct pipe_resource *templ)
59 {
60 unsigned vbind;
61 struct virgl_screen *vs = virgl_screen(screen);
62 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
63
64 res->u.b = *templ;
65 res->u.b.screen = &vs->base;
66 pipe_reference_init(&res->u.b.reference, 1);
67 vbind = pipe_to_virgl_bind(templ->bind);
68 virgl_resource_layout(&res->u.b, &res->metadata);
69 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
70 templ->format, vbind,
71 templ->width0,
72 templ->height0,
73 templ->depth0,
74 templ->array_size,
75 templ->last_level,
76 templ->nr_samples,
77 res->metadata.total_size);
78 if (!res->hw_res) {
79 FREE(res);
80 return NULL;
81 }
82
83 for (uint32_t i = 0; i < VR_MAX_TEXTURE_2D_LEVELS; i++)
84 res->clean[i] = TRUE;
85
86 if (templ->target == PIPE_BUFFER)
87 virgl_buffer_init(res);
88 else
89 virgl_texture_init(res);
90
91 return &res->u.b;
92
93 }
94
95 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
96 const struct pipe_resource *templ,
97 struct winsys_handle *whandle,
98 unsigned usage)
99 {
100 struct virgl_screen *vs = virgl_screen(screen);
101 if (templ->target == PIPE_BUFFER)
102 return NULL;
103
104 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
105 res->u.b = *templ;
106 res->u.b.screen = &vs->base;
107 pipe_reference_init(&res->u.b.reference, 1);
108
109 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
110 if (!res->hw_res) {
111 FREE(res);
112 return NULL;
113 }
114
115 virgl_texture_init(res);
116
117 return &res->u.b;
118 }
119
120 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
121 {
122 screen->resource_create = virgl_resource_create;
123 screen->resource_from_handle = virgl_resource_from_handle;
124 screen->resource_get_handle = u_resource_get_handle_vtbl;
125 screen->resource_destroy = u_resource_destroy_vtbl;
126 }
127
128 static void virgl_buffer_subdata(struct pipe_context *pipe,
129 struct pipe_resource *resource,
130 unsigned usage, unsigned offset,
131 unsigned size, const void *data)
132 {
133 struct pipe_box box;
134
135 if (offset == 0 && size == resource->width0)
136 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
137 else
138 usage |= PIPE_TRANSFER_DISCARD_RANGE;
139
140 u_box_1d(offset, size, &box);
141
142 if (size >= (VIRGL_MAX_CMDBUF_DWORDS * 4))
143 u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
144 else
145 virgl_transfer_inline_write(pipe, resource, 0, usage, &box, data, 0, 0);
146 }
147
148 void virgl_init_context_resource_functions(struct pipe_context *ctx)
149 {
150 ctx->transfer_map = u_transfer_map_vtbl;
151 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
152 ctx->transfer_unmap = u_transfer_unmap_vtbl;
153 ctx->buffer_subdata = virgl_buffer_subdata;
154 ctx->texture_subdata = u_default_texture_subdata;
155 }
156
157 void virgl_resource_layout(struct pipe_resource *pt,
158 struct virgl_resource_metadata *metadata)
159 {
160 unsigned level, nblocksy;
161 unsigned width = pt->width0;
162 unsigned height = pt->height0;
163 unsigned depth = pt->depth0;
164 unsigned buffer_size = 0;
165
166 for (level = 0; level <= pt->last_level; level++) {
167 unsigned slices;
168
169 if (pt->target == PIPE_TEXTURE_CUBE)
170 slices = 6;
171 else if (pt->target == PIPE_TEXTURE_3D)
172 slices = depth;
173 else
174 slices = pt->array_size;
175
176 nblocksy = util_format_get_nblocksy(pt->format, height);
177 metadata->stride[level] = util_format_get_stride(pt->format, width);
178 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
179 metadata->level_offset[level] = buffer_size;
180
181 buffer_size += slices * metadata->layer_stride[level];
182
183 width = u_minify(width, 1);
184 height = u_minify(height, 1);
185 depth = u_minify(depth, 1);
186 }
187
188 if (pt->nr_samples <= 1)
189 metadata->total_size = buffer_size;
190 else /* don't create guest backing store for MSAA */
191 metadata->total_size = 0;
192 }
193
194 struct virgl_transfer *
195 virgl_resource_create_transfer(struct pipe_context *ctx,
196 struct pipe_resource *pres,
197 const struct virgl_resource_metadata *metadata,
198 unsigned level, unsigned usage,
199 const struct pipe_box *box)
200 {
201 struct virgl_transfer *trans;
202 enum pipe_format format = pres->format;
203 struct virgl_context *vctx = virgl_context(ctx);
204 const unsigned blocksy = box->y / util_format_get_blockheight(format);
205 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
206
207 unsigned offset = metadata->level_offset[level];
208 if (pres->target == PIPE_TEXTURE_CUBE ||
209 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
210 pres->target == PIPE_TEXTURE_3D ||
211 pres->target == PIPE_TEXTURE_2D_ARRAY) {
212 offset += box->z * metadata->layer_stride[level];
213 }
214 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
215 offset += box->z * metadata->stride[level];
216 assert(box->y == 0);
217 } else if (pres->target == PIPE_BUFFER) {
218 assert(box->y == 0 && box->z == 0);
219 } else {
220 assert(box->z == 0);
221 }
222
223 offset += blocksy * metadata->stride[level];
224 offset += blocksx * util_format_get_blocksize(format);
225
226 trans = slab_alloc(&vctx->transfer_pool);
227 if (!trans)
228 return NULL;
229
230 trans->base.resource = pres;
231 trans->base.level = level;
232 trans->base.usage = usage;
233 trans->base.box = *box;
234 trans->base.stride = metadata->stride[level];
235 trans->base.layer_stride = metadata->layer_stride[level];
236 trans->offset = offset;
237 util_range_init(&trans->range);
238
239 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
240 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
241 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
242 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
243 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
244 trans->l_stride = 0;
245 else
246 trans->l_stride = trans->base.layer_stride;
247
248 return trans;
249 }
250
251 void virgl_resource_destroy_transfer(struct virgl_context *vctx,
252 struct virgl_transfer *trans)
253 {
254 util_range_destroy(&trans->range);
255 slab_free(&vctx->transfer_pool, trans);
256 }
257
258 void virgl_resource_destroy(struct pipe_screen *screen,
259 struct pipe_resource *resource)
260 {
261 struct virgl_screen *vs = virgl_screen(screen);
262 struct virgl_resource *res = virgl_resource(resource);
263 vs->vws->resource_unref(vs->vws, res->hw_res);
264 FREE(res);
265 }
266
267 boolean virgl_resource_get_handle(struct pipe_screen *screen,
268 struct pipe_resource *resource,
269 struct winsys_handle *whandle)
270 {
271 struct virgl_screen *vs = virgl_screen(screen);
272 struct virgl_resource *res = virgl_resource(resource);
273
274 if (res->u.b.target == PIPE_BUFFER)
275 return FALSE;
276
277 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
278 res->metadata.stride[0],
279 whandle);
280 }