virgl: Store mapped hw resource with transfer object.
[mesa.git] / src / gallium / drivers / virgl / virgl_transfer_queue.c
1 /*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
26
27 #include "virgl_protocol.h"
28 #include "virgl_screen.h"
29 #include "virgl_encode.h"
30 #include "virgl_transfer_queue.h"
31
32 struct list_action_args
33 {
34 void *data;
35 struct virgl_transfer *queued;
36 struct virgl_transfer *current;
37 };
38
39 typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
40 struct virgl_transfer *current);
41
42 typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
43 struct list_action_args *args);
44
45 struct list_iteration_args
46 {
47 void *data;
48 list_action_t action;
49 compare_transfers_t compare;
50 struct virgl_transfer *current;
51 enum virgl_transfer_queue_lists type;
52 };
53
54 static bool transfers_intersect(struct virgl_transfer *queued,
55 struct virgl_transfer *current)
56 {
57 boolean tmp;
58 struct pipe_resource *queued_res = queued->base.resource;
59 struct pipe_resource *current_res = current->base.resource;
60
61 if (queued_res != current_res)
62 return false;
63
64 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
65 return (tmp == TRUE);
66 }
67
68 static bool transfers_overlap(struct virgl_transfer *queued,
69 struct virgl_transfer *current)
70 {
71 boolean tmp;
72 struct pipe_resource *queued_res = queued->base.resource;
73 struct pipe_resource *current_res = current->base.resource;
74
75 if (queued_res != current_res)
76 return false;
77
78 if (queued->base.level != current->base.level)
79 return false;
80
81 if (queued->base.box.z != current->base.box.z)
82 return true;
83
84 if (queued->base.box.depth != 1 || current->base.box.depth != 1)
85 return true;
86
87 /*
88 * Special case for boxes with [x: 0, width: 1] and [x: 1, width: 1].
89 */
90 if (queued_res->target == PIPE_BUFFER) {
91 if (queued->base.box.x + queued->base.box.width == current->base.box.x)
92 return false;
93
94 if (current->base.box.x + current->base.box.width == queued->base.box.x)
95 return false;
96 }
97
98 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
99 return (tmp == TRUE);
100 }
101
102 static void set_true(UNUSED struct virgl_transfer_queue *queue,
103 struct list_action_args *args)
104 {
105 bool *val = args->data;
106 *val = true;
107 }
108
109 static void remove_transfer(struct virgl_transfer_queue *queue,
110 struct list_action_args *args)
111 {
112 struct virgl_transfer *queued = args->queued;
113 struct pipe_resource *pres = queued->base.resource;
114 list_del(&queued->queue_link);
115 pipe_resource_reference(&pres, NULL);
116 virgl_resource_destroy_transfer(queue->pool, queued);
117 }
118
119 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
120 struct list_action_args *args)
121 {
122 struct virgl_transfer *current = args->current;
123 struct virgl_transfer *queued = args->queued;
124
125 u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
126 current->offset = current->base.box.x;
127
128 remove_transfer(queue, args);
129 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
130 }
131
132 static void transfer_put(struct virgl_transfer_queue *queue,
133 struct list_action_args *args)
134 {
135 struct virgl_transfer *queued = args->queued;
136 struct virgl_resource *res = virgl_resource(queued->base.resource);
137
138 queue->vs->vws->transfer_put(queue->vs->vws, res->hw_res, &queued->base.box,
139 queued->base.stride, queued->l_stride,
140 queued->offset, queued->base.level);
141
142 remove_transfer(queue, args);
143 }
144
145 static void transfer_write(struct virgl_transfer_queue *queue,
146 struct list_action_args *args)
147 {
148 struct virgl_transfer *queued = args->queued;
149 struct virgl_cmd_buf *buf = args->data;
150
151 // Takes a reference on the HW resource, which is released after
152 // the exec buffer command.
153 virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
154
155 list_delinit(&queued->queue_link);
156 list_addtail(&queued->queue_link, &queue->lists[COMPLETED_LIST]);
157 }
158
159 static void compare_and_perform_action(struct virgl_transfer_queue *queue,
160 struct list_iteration_args *iter)
161 {
162 struct list_action_args args;
163 struct virgl_transfer *queued, *tmp;
164 enum virgl_transfer_queue_lists type = iter->type;
165
166 memset(&args, 0, sizeof(args));
167 args.current = iter->current;
168 args.data = iter->data;
169
170 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
171 if (iter->compare(queued, iter->current)) {
172 args.queued = queued;
173 iter->action(queue, &args);
174 }
175 }
176 }
177
178 static void perform_action(struct virgl_transfer_queue *queue,
179 struct list_iteration_args *iter)
180 {
181 struct list_action_args args;
182 struct virgl_transfer *queued, *tmp;
183 enum virgl_transfer_queue_lists type = iter->type;
184
185 memset(&args, 0, sizeof(args));
186 args.data = iter->data;
187
188 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
189 args.queued = queued;
190 iter->action(queue, &args);
191 }
192 }
193
194 static void add_internal(struct virgl_transfer_queue *queue,
195 struct virgl_transfer *transfer)
196 {
197 uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
198 if (queue->tbuf) {
199 if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
200 struct list_iteration_args iter;
201 struct virgl_winsys *vws = queue->vs->vws;
202
203 memset(&iter, 0, sizeof(iter));
204 iter.type = PENDING_LIST;
205 iter.action = transfer_write;
206 iter.data = queue->tbuf;
207 perform_action(queue, &iter);
208
209 vws->submit_cmd(vws, queue->tbuf, NULL);
210 queue->num_dwords = 0;
211 }
212 }
213
214 list_addtail(&transfer->queue_link, &queue->lists[PENDING_LIST]);
215 queue->num_dwords += dwords;
216 }
217
218
219 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
220 struct virgl_screen *vs,
221 struct slab_child_pool *pool)
222 {
223 queue->vs = vs;
224 queue->pool = pool;
225 queue->num_dwords = 0;
226
227 for (uint32_t i = 0; i < MAX_LISTS; i++)
228 list_inithead(&queue->lists[i]);
229
230 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
231 vs->vws->supports_encoded_transfers)
232 queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
233 else
234 queue->tbuf = NULL;
235 }
236
237 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
238 {
239 struct virgl_winsys *vws = queue->vs->vws;
240 struct list_iteration_args iter;
241
242 memset(&iter, 0, sizeof(iter));
243
244 iter.action = transfer_put;
245 iter.type = PENDING_LIST;
246 perform_action(queue, &iter);
247
248 iter.action = remove_transfer;
249 iter.type = COMPLETED_LIST;
250 perform_action(queue, &iter);
251
252 if (queue->tbuf)
253 vws->cmd_buf_destroy(queue->tbuf);
254
255 queue->vs = NULL;
256 queue->pool = NULL;
257 queue->tbuf = NULL;
258 queue->num_dwords = 0;
259 }
260
261 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
262 struct virgl_transfer *transfer)
263 {
264 struct pipe_resource *res, *pres;
265 struct list_iteration_args iter;
266
267 pres = NULL;
268 res = transfer->base.resource;
269 pipe_resource_reference(&pres, res);
270
271 if (res->target == PIPE_BUFFER) {
272 memset(&iter, 0, sizeof(iter));
273 iter.current = transfer;
274 iter.compare = transfers_intersect;
275 iter.action = replace_unmapped_transfer;
276 iter.type = PENDING_LIST;
277 compare_and_perform_action(queue, &iter);
278 }
279
280 add_internal(queue, transfer);
281 return 0;
282 }
283
284 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
285 struct virgl_cmd_buf *cbuf)
286 {
287 struct list_iteration_args iter;
288
289 memset(&iter, 0, sizeof(iter));
290 iter.type = PENDING_LIST;
291 if (queue->tbuf) {
292 uint32_t prior_num_dwords = cbuf->cdw;
293 cbuf->cdw = 0;
294
295 iter.action = transfer_write;
296 iter.data = cbuf;
297 perform_action(queue, &iter);
298
299 virgl_encode_end_transfers(cbuf);
300 cbuf->cdw = prior_num_dwords;
301 } else {
302 iter.action = transfer_put;
303 perform_action(queue, &iter);
304 }
305
306 iter.action = remove_transfer;
307 iter.type = COMPLETED_LIST;
308 perform_action(queue, &iter);
309 queue->num_dwords = 0;
310
311 return 0;
312 }
313
314 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
315 struct virgl_transfer *transfer)
316 {
317 bool queued = false;
318 struct list_iteration_args iter;
319
320 memset(&iter, 0, sizeof(iter));
321 iter.current = transfer;
322 iter.compare = transfers_overlap;
323 iter.action = set_true;
324 iter.data = &queued;
325
326 iter.type = PENDING_LIST;
327 compare_and_perform_action(queue, &iter);
328
329 iter.type = COMPLETED_LIST;
330 compare_and_perform_action(queue, &iter);
331
332 return queued;
333 }