nv50/ir/nir: make use of SYSTEM_VALUE_MAX when iterating read sysvals
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "virgl_context.h"
27 #include "virgl_resource.h"
28 #include "virgl_screen.h"
29
30 bool virgl_res_needs_flush(struct virgl_context *vctx,
31 struct virgl_transfer *trans)
32 {
33 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
34 struct virgl_resource *res = virgl_resource(trans->base.resource);
35
36 if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
37 return false;
38 if (!vs->vws->res_is_referenced(vs->vws, vctx->cbuf, res->hw_res))
39 return false;
40 if (res->clean_mask & (1 << trans->base.level)) {
41 if (vctx->num_draws == 0 && vctx->num_compute == 0)
42 return false;
43 if (!virgl_transfer_queue_is_queued(&vctx->queue, trans))
44 return false;
45 }
46
47 return true;
48 }
49
50 bool virgl_res_needs_readback(struct virgl_context *vctx,
51 struct virgl_resource *res,
52 unsigned usage, unsigned level)
53 {
54 bool readback = true;
55 if (res->clean_mask & (1 << level))
56 readback = false;
57 else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
58 readback = false;
59 else if ((usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) ==
60 (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT))
61 readback = false;
62 return readback;
63 }
64
65 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
66 const struct pipe_resource *templ)
67 {
68 unsigned vbind;
69 struct virgl_screen *vs = virgl_screen(screen);
70 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
71
72 res->u.b = *templ;
73 res->u.b.screen = &vs->base;
74 pipe_reference_init(&res->u.b.reference, 1);
75 vbind = pipe_to_virgl_bind(vs, templ->bind);
76 virgl_resource_layout(&res->u.b, &res->metadata);
77 res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
78 templ->format, vbind,
79 templ->width0,
80 templ->height0,
81 templ->depth0,
82 templ->array_size,
83 templ->last_level,
84 templ->nr_samples,
85 res->metadata.total_size);
86 if (!res->hw_res) {
87 FREE(res);
88 return NULL;
89 }
90
91 res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
92
93 if (templ->target == PIPE_BUFFER)
94 virgl_buffer_init(res);
95 else
96 virgl_texture_init(res);
97
98 return &res->u.b;
99
100 }
101
102 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
103 const struct pipe_resource *templ,
104 struct winsys_handle *whandle,
105 unsigned usage)
106 {
107 struct virgl_screen *vs = virgl_screen(screen);
108 if (templ->target == PIPE_BUFFER)
109 return NULL;
110
111 struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
112 res->u.b = *templ;
113 res->u.b.screen = &vs->base;
114 pipe_reference_init(&res->u.b.reference, 1);
115
116 res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle);
117 if (!res->hw_res) {
118 FREE(res);
119 return NULL;
120 }
121
122 virgl_texture_init(res);
123
124 return &res->u.b;
125 }
126
127 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
128 {
129 screen->resource_create = virgl_resource_create;
130 screen->resource_from_handle = virgl_resource_from_handle;
131 screen->resource_get_handle = u_resource_get_handle_vtbl;
132 screen->resource_destroy = u_resource_destroy_vtbl;
133 }
134
135 static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
136 struct pipe_resource *resource,
137 unsigned usage,
138 const struct pipe_box *box,
139 const void *data)
140 {
141 struct virgl_context *vctx = virgl_context(ctx);
142 struct virgl_resource *vbuf = virgl_resource(resource);
143 struct virgl_transfer dummy_trans = { 0 };
144 bool flush;
145 struct virgl_transfer *queued;
146
147 /*
148 * Attempts to short circuit the entire process of mapping and unmapping
149 * a resource if there is an existing transfer that can be extended.
150 * Pessimestically falls back if a flush is required.
151 */
152 dummy_trans.base.resource = resource;
153 dummy_trans.base.usage = usage;
154 dummy_trans.base.box = *box;
155 dummy_trans.base.stride = vbuf->metadata.stride[0];
156 dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
157 dummy_trans.offset = box->x;
158
159 flush = virgl_res_needs_flush(vctx, &dummy_trans);
160 if (flush)
161 return false;
162
163 queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
164 if (!queued || !queued->hw_res_map)
165 return false;
166
167 memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
168
169 return true;
170 }
171
172 static void virgl_buffer_subdata(struct pipe_context *pipe,
173 struct pipe_resource *resource,
174 unsigned usage, unsigned offset,
175 unsigned size, const void *data)
176 {
177 struct pipe_transfer *transfer;
178 uint8_t *map;
179 struct pipe_box box;
180
181 assert(!(usage & PIPE_TRANSFER_READ));
182
183 /* the write flag is implicit by the nature of buffer_subdata */
184 usage |= PIPE_TRANSFER_WRITE;
185
186 if (offset == 0 && size == resource->width0)
187 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
188 else
189 usage |= PIPE_TRANSFER_DISCARD_RANGE;
190
191 u_box_1d(offset, size, &box);
192
193 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
194 virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
195 return;
196
197 map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
198 if (map) {
199 memcpy(map, data, size);
200 pipe_transfer_unmap(pipe, transfer);
201 }
202 }
203
204 void virgl_init_context_resource_functions(struct pipe_context *ctx)
205 {
206 ctx->transfer_map = u_transfer_map_vtbl;
207 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
208 ctx->transfer_unmap = u_transfer_unmap_vtbl;
209 ctx->buffer_subdata = virgl_buffer_subdata;
210 ctx->texture_subdata = u_default_texture_subdata;
211 }
212
213 void virgl_resource_layout(struct pipe_resource *pt,
214 struct virgl_resource_metadata *metadata)
215 {
216 unsigned level, nblocksy;
217 unsigned width = pt->width0;
218 unsigned height = pt->height0;
219 unsigned depth = pt->depth0;
220 unsigned buffer_size = 0;
221
222 for (level = 0; level <= pt->last_level; level++) {
223 unsigned slices;
224
225 if (pt->target == PIPE_TEXTURE_CUBE)
226 slices = 6;
227 else if (pt->target == PIPE_TEXTURE_3D)
228 slices = depth;
229 else
230 slices = pt->array_size;
231
232 nblocksy = util_format_get_nblocksy(pt->format, height);
233 metadata->stride[level] = util_format_get_stride(pt->format, width);
234 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
235 metadata->level_offset[level] = buffer_size;
236
237 buffer_size += slices * metadata->layer_stride[level];
238
239 width = u_minify(width, 1);
240 height = u_minify(height, 1);
241 depth = u_minify(depth, 1);
242 }
243
244 if (pt->nr_samples <= 1)
245 metadata->total_size = buffer_size;
246 else /* don't create guest backing store for MSAA */
247 metadata->total_size = 0;
248 }
249
250 struct virgl_transfer *
251 virgl_resource_create_transfer(struct slab_child_pool *pool,
252 struct pipe_resource *pres,
253 const struct virgl_resource_metadata *metadata,
254 unsigned level, unsigned usage,
255 const struct pipe_box *box)
256 {
257 struct virgl_transfer *trans;
258 enum pipe_format format = pres->format;
259 const unsigned blocksy = box->y / util_format_get_blockheight(format);
260 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
261
262 unsigned offset = metadata->level_offset[level];
263 if (pres->target == PIPE_TEXTURE_CUBE ||
264 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
265 pres->target == PIPE_TEXTURE_3D ||
266 pres->target == PIPE_TEXTURE_2D_ARRAY) {
267 offset += box->z * metadata->layer_stride[level];
268 }
269 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
270 offset += box->z * metadata->stride[level];
271 assert(box->y == 0);
272 } else if (pres->target == PIPE_BUFFER) {
273 assert(box->y == 0 && box->z == 0);
274 } else {
275 assert(box->z == 0);
276 }
277
278 offset += blocksy * metadata->stride[level];
279 offset += blocksx * util_format_get_blocksize(format);
280
281 trans = slab_alloc(pool);
282 if (!trans)
283 return NULL;
284
285 trans->base.resource = pres;
286 trans->base.level = level;
287 trans->base.usage = usage;
288 trans->base.box = *box;
289 trans->base.stride = metadata->stride[level];
290 trans->base.layer_stride = metadata->layer_stride[level];
291 trans->offset = offset;
292 util_range_init(&trans->range);
293
294 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
295 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
296 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
297 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
298 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
299 trans->l_stride = 0;
300 else
301 trans->l_stride = trans->base.layer_stride;
302
303 return trans;
304 }
305
306 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
307 struct virgl_transfer *trans)
308 {
309 util_range_destroy(&trans->range);
310 slab_free(pool, trans);
311 }
312
313 void virgl_resource_destroy(struct pipe_screen *screen,
314 struct pipe_resource *resource)
315 {
316 struct virgl_screen *vs = virgl_screen(screen);
317 struct virgl_resource *res = virgl_resource(resource);
318 vs->vws->resource_unref(vs->vws, res->hw_res);
319 FREE(res);
320 }
321
322 boolean virgl_resource_get_handle(struct pipe_screen *screen,
323 struct pipe_resource *resource,
324 struct winsys_handle *whandle)
325 {
326 struct virgl_screen *vs = virgl_screen(screen);
327 struct virgl_resource *res = virgl_resource(resource);
328
329 if (res->u.b.target == PIPE_BUFFER)
330 return FALSE;
331
332 return vs->vws->resource_get_handle(vs->vws, res->hw_res,
333 res->metadata.stride[0],
334 whandle);
335 }
336
337 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
338 {
339 if (res) {
340 if (res->u.b.target == PIPE_BUFFER)
341 res->clean_mask &= ~1;
342 else
343 res->clean_mask &= ~(1 << level);
344 }
345 }