virgl: consolidate transfer code
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "util/u_format.h"
24 #include "util/u_inlines.h"
25 #include "virgl_context.h"
26 #include "virgl_resource.h"
27 #include "virgl_screen.h"
28
29 bool virgl_res_needs_flush_wait(struct virgl_context *vctx,
30 struct virgl_resource *res,
31 unsigned usage)
32 {
33 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
34
35 if ((!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) && vs->vws->res_is_referenced(vs->vws, vctx->cbuf, res->hw_res)) {
36 return true;
37 }
38 return false;
39 }
40
41 bool virgl_res_needs_readback(struct virgl_context *vctx,
42 struct virgl_resource *res,
43 unsigned usage)
44 {
45 bool readback = true;
46 if (res->clean)
47 readback = false;
48 else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
49 readback = false;
50 else if ((usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) ==
51 (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT))
52 readback = false;
53 return readback;
54 }
55
56 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
57 const struct pipe_resource *templ)
58 {
59 struct virgl_screen *vs = virgl_screen(screen);
60 if (templ->target == PIPE_BUFFER)
61 return virgl_buffer_create(vs, templ);
62 else
63 return virgl_texture_create(vs, templ);
64 }
65
66 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
67 const struct pipe_resource *templ,
68 struct winsys_handle *whandle,
69 unsigned usage)
70 {
71 struct virgl_screen *vs = virgl_screen(screen);
72 if (templ->target == PIPE_BUFFER)
73 return NULL;
74 else
75 return virgl_texture_from_handle(vs, templ, whandle);
76 }
77
78 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
79 {
80 screen->resource_create = virgl_resource_create;
81 screen->resource_from_handle = virgl_resource_from_handle;
82 screen->resource_get_handle = u_resource_get_handle_vtbl;
83 screen->resource_destroy = u_resource_destroy_vtbl;
84 }
85
86 static void virgl_buffer_subdata(struct pipe_context *pipe,
87 struct pipe_resource *resource,
88 unsigned usage, unsigned offset,
89 unsigned size, const void *data)
90 {
91 struct pipe_box box;
92
93 if (offset == 0 && size == resource->width0)
94 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
95 else
96 usage |= PIPE_TRANSFER_DISCARD_RANGE;
97
98 u_box_1d(offset, size, &box);
99
100 if (size >= (VIRGL_MAX_CMDBUF_DWORDS * 4))
101 u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
102 else
103 virgl_transfer_inline_write(pipe, resource, 0, usage, &box, data, 0, 0);
104 }
105
106 void virgl_init_context_resource_functions(struct pipe_context *ctx)
107 {
108 ctx->transfer_map = u_transfer_map_vtbl;
109 ctx->transfer_flush_region = u_transfer_flush_region_vtbl;
110 ctx->transfer_unmap = u_transfer_unmap_vtbl;
111 ctx->buffer_subdata = virgl_buffer_subdata;
112 ctx->texture_subdata = u_default_texture_subdata;
113 }
114
115 void virgl_resource_layout(struct pipe_resource *pt,
116 struct virgl_resource_metadata *metadata)
117 {
118 unsigned level, nblocksy;
119 unsigned width = pt->width0;
120 unsigned height = pt->height0;
121 unsigned depth = pt->depth0;
122 unsigned buffer_size = 0;
123
124 for (level = 0; level <= pt->last_level; level++) {
125 unsigned slices;
126
127 if (pt->target == PIPE_TEXTURE_CUBE)
128 slices = 6;
129 else if (pt->target == PIPE_TEXTURE_3D)
130 slices = depth;
131 else
132 slices = pt->array_size;
133
134 nblocksy = util_format_get_nblocksy(pt->format, height);
135 metadata->stride[level] = util_format_get_stride(pt->format, width);
136 metadata->layer_stride[level] = nblocksy * metadata->stride[level];
137 metadata->level_offset[level] = buffer_size;
138
139 buffer_size += slices * metadata->layer_stride[level];
140
141 width = u_minify(width, 1);
142 height = u_minify(height, 1);
143 depth = u_minify(depth, 1);
144 }
145
146 if (pt->nr_samples <= 1)
147 metadata->total_size = buffer_size;
148 else /* don't create guest backing store for MSAA */
149 metadata->total_size = 0;
150 }
151
152 struct virgl_transfer *
153 virgl_resource_create_transfer(struct pipe_context *ctx,
154 struct pipe_resource *pres,
155 const struct virgl_resource_metadata *metadata,
156 unsigned level, unsigned usage,
157 const struct pipe_box *box)
158 {
159 struct virgl_transfer *trans;
160 enum pipe_format format = pres->format;
161 struct virgl_context *vctx = virgl_context(ctx);
162 const unsigned blocksy = box->y / util_format_get_blockheight(format);
163 const unsigned blocksx = box->x / util_format_get_blockwidth(format);
164
165 unsigned offset = metadata->level_offset[level];
166 if (pres->target == PIPE_TEXTURE_CUBE ||
167 pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
168 pres->target == PIPE_TEXTURE_3D ||
169 pres->target == PIPE_TEXTURE_2D_ARRAY) {
170 offset += box->z * metadata->layer_stride[level];
171 }
172 else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
173 offset += box->z * metadata->stride[level];
174 }
175 else {
176 assert(box->z == 0);
177 }
178
179 offset += blocksy * metadata->stride[level];
180 offset += blocksx * util_format_get_blocksize(format);
181
182 trans = slab_alloc(&vctx->transfer_pool);
183 if (!trans)
184 return NULL;
185
186 trans->base.resource = pres;
187 trans->base.level = level;
188 trans->base.usage = usage;
189 trans->base.box = *box;
190 trans->base.stride = metadata->stride[level];
191 trans->base.layer_stride = metadata->layer_stride[level];
192 trans->offset = offset;
193 util_range_init(&trans->range);
194
195 if (trans->base.resource->target != PIPE_TEXTURE_3D &&
196 trans->base.resource->target != PIPE_TEXTURE_CUBE &&
197 trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
198 trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
199 trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
200 trans->l_stride = 0;
201 else
202 trans->l_stride = trans->base.layer_stride;
203
204 return trans;
205 }
206
207 void virgl_resource_destroy_transfer(struct virgl_context *vctx,
208 struct virgl_transfer *trans)
209 {
210 util_range_destroy(&trans->range);
211 slab_free(&vctx->transfer_pool, trans);
212 }