virgl: init transfer queue from virgl_context
[mesa.git] / src / gallium / drivers / virgl / virgl_transfer_queue.c
1 /*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
26
27 #include "virgl_protocol.h"
28 #include "virgl_context.h"
29 #include "virgl_screen.h"
30 #include "virgl_encode.h"
31 #include "virgl_resource.h"
32 #include "virgl_transfer_queue.h"
33
34 struct list_action_args
35 {
36 void *data;
37 struct virgl_transfer *queued;
38 struct virgl_transfer *current;
39 };
40
41 typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
42 struct virgl_transfer *current);
43
44 typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
45 struct list_action_args *args);
46
47 struct list_iteration_args
48 {
49 void *data;
50 list_action_t action;
51 compare_transfers_t compare;
52 struct virgl_transfer *current;
53 enum virgl_transfer_queue_lists type;
54 };
55
56 static bool transfers_intersect(struct virgl_transfer *queued,
57 struct virgl_transfer *current)
58 {
59 boolean tmp;
60 struct pipe_resource *queued_res = queued->base.resource;
61 struct pipe_resource *current_res = current->base.resource;
62
63 if (queued_res != current_res)
64 return false;
65
66 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
67 return (tmp == TRUE);
68 }
69
70 static bool transfers_overlap(struct virgl_transfer *queued,
71 struct virgl_transfer *current)
72 {
73 boolean tmp;
74 struct pipe_resource *queued_res = queued->base.resource;
75 struct pipe_resource *current_res = current->base.resource;
76
77 if (queued_res != current_res)
78 return false;
79
80 if (queued->base.level != current->base.level)
81 return false;
82
83 if (queued->base.box.z != current->base.box.z)
84 return true;
85
86 if (queued->base.box.depth != 1 || current->base.box.depth != 1)
87 return true;
88
89 /*
90 * Special case for boxes with [x: 0, width: 1] and [x: 1, width: 1].
91 */
92 if (queued_res->target == PIPE_BUFFER) {
93 if (queued->base.box.x + queued->base.box.width == current->base.box.x)
94 return false;
95
96 if (current->base.box.x + current->base.box.width == queued->base.box.x)
97 return false;
98 }
99
100 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
101 return (tmp == TRUE);
102 }
103
104 static void set_true(UNUSED struct virgl_transfer_queue *queue,
105 struct list_action_args *args)
106 {
107 bool *val = args->data;
108 *val = true;
109 }
110
111 static void set_queued(UNUSED struct virgl_transfer_queue *queue,
112 struct list_action_args *args)
113 {
114 struct virgl_transfer *queued = args->queued;
115 struct virgl_transfer **val = args->data;
116 *val = queued;
117 }
118
119 static void remove_transfer(struct virgl_transfer_queue *queue,
120 struct list_action_args *args)
121 {
122 struct virgl_transfer *queued = args->queued;
123 struct pipe_resource *pres = queued->base.resource;
124 list_del(&queued->queue_link);
125 pipe_resource_reference(&pres, NULL);
126 virgl_resource_destroy_transfer(&queue->vctx->transfer_pool, queued);
127 }
128
129 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
130 struct list_action_args *args)
131 {
132 struct virgl_transfer *current = args->current;
133 struct virgl_transfer *queued = args->queued;
134
135 u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
136 current->offset = current->base.box.x;
137
138 remove_transfer(queue, args);
139 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
140 }
141
142 static void transfer_put(struct virgl_transfer_queue *queue,
143 struct list_action_args *args)
144 {
145 struct virgl_transfer *queued = args->queued;
146 struct virgl_resource *res = virgl_resource(queued->base.resource);
147
148 queue->vs->vws->transfer_put(queue->vs->vws, res->hw_res, &queued->base.box,
149 queued->base.stride, queued->l_stride,
150 queued->offset, queued->base.level);
151
152 remove_transfer(queue, args);
153 }
154
155 static void transfer_write(struct virgl_transfer_queue *queue,
156 struct list_action_args *args)
157 {
158 struct virgl_transfer *queued = args->queued;
159 struct virgl_cmd_buf *buf = args->data;
160
161 // Takes a reference on the HW resource, which is released after
162 // the exec buffer command.
163 virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
164
165 list_delinit(&queued->queue_link);
166 list_addtail(&queued->queue_link, &queue->lists[COMPLETED_LIST]);
167 }
168
169 static void compare_and_perform_action(struct virgl_transfer_queue *queue,
170 struct list_iteration_args *iter)
171 {
172 struct list_action_args args;
173 struct virgl_transfer *queued, *tmp;
174 enum virgl_transfer_queue_lists type = iter->type;
175
176 memset(&args, 0, sizeof(args));
177 args.current = iter->current;
178 args.data = iter->data;
179
180 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
181 if (iter->compare(queued, iter->current)) {
182 args.queued = queued;
183 iter->action(queue, &args);
184 }
185 }
186 }
187
188 static void intersect_and_set_queued_once(struct virgl_transfer_queue *queue,
189 struct list_iteration_args *iter)
190 {
191 struct list_action_args args;
192 struct virgl_transfer *queued, *tmp;
193 enum virgl_transfer_queue_lists type = iter->type;
194
195 memset(&args, 0, sizeof(args));
196 args.current = iter->current;
197 args.data = iter->data;
198
199 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
200 if (transfers_intersect(queued, iter->current)) {
201 args.queued = queued;
202 set_queued(queue, &args);
203 return;
204 }
205 }
206 }
207
208 static void perform_action(struct virgl_transfer_queue *queue,
209 struct list_iteration_args *iter)
210 {
211 struct list_action_args args;
212 struct virgl_transfer *queued, *tmp;
213 enum virgl_transfer_queue_lists type = iter->type;
214
215 memset(&args, 0, sizeof(args));
216 args.data = iter->data;
217
218 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
219 args.queued = queued;
220 iter->action(queue, &args);
221 }
222 }
223
224 static void add_internal(struct virgl_transfer_queue *queue,
225 struct virgl_transfer *transfer)
226 {
227 uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
228 if (queue->tbuf) {
229 if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
230 struct list_iteration_args iter;
231 struct virgl_winsys *vws = queue->vs->vws;
232
233 memset(&iter, 0, sizeof(iter));
234 iter.type = PENDING_LIST;
235 iter.action = transfer_write;
236 iter.data = queue->tbuf;
237 perform_action(queue, &iter);
238
239 vws->submit_cmd(vws, queue->tbuf, NULL);
240 queue->num_dwords = 0;
241 }
242 }
243
244 list_addtail(&transfer->queue_link, &queue->lists[PENDING_LIST]);
245 queue->num_dwords += dwords;
246 }
247
248
249 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
250 struct virgl_context *vctx)
251 {
252 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
253
254 queue->vs = vs;
255 queue->vctx = vctx;
256 queue->num_dwords = 0;
257
258 for (uint32_t i = 0; i < MAX_LISTS; i++)
259 list_inithead(&queue->lists[i]);
260
261 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
262 vs->vws->supports_encoded_transfers)
263 queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
264 else
265 queue->tbuf = NULL;
266 }
267
268 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
269 {
270 struct virgl_winsys *vws = queue->vs->vws;
271 struct list_iteration_args iter;
272
273 memset(&iter, 0, sizeof(iter));
274
275 iter.action = transfer_put;
276 iter.type = PENDING_LIST;
277 perform_action(queue, &iter);
278
279 iter.action = remove_transfer;
280 iter.type = COMPLETED_LIST;
281 perform_action(queue, &iter);
282
283 if (queue->tbuf)
284 vws->cmd_buf_destroy(queue->tbuf);
285
286 queue->vs = NULL;
287 queue->vctx = NULL;
288 queue->tbuf = NULL;
289 queue->num_dwords = 0;
290 }
291
292 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
293 struct virgl_transfer *transfer)
294 {
295 struct pipe_resource *res, *pres;
296 struct list_iteration_args iter;
297
298 pres = NULL;
299 res = transfer->base.resource;
300 pipe_resource_reference(&pres, res);
301
302 /* We don't support copy transfers in the transfer queue. */
303 assert(!transfer->copy_src_res);
304
305 /* Attempt to merge multiple intersecting transfers into a single one. */
306 if (res->target == PIPE_BUFFER) {
307 memset(&iter, 0, sizeof(iter));
308 iter.current = transfer;
309 iter.compare = transfers_intersect;
310 iter.action = replace_unmapped_transfer;
311 iter.type = PENDING_LIST;
312 compare_and_perform_action(queue, &iter);
313 }
314
315 add_internal(queue, transfer);
316 return 0;
317 }
318
319 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
320 struct virgl_cmd_buf *cbuf)
321 {
322 struct list_iteration_args iter;
323
324 memset(&iter, 0, sizeof(iter));
325 iter.type = PENDING_LIST;
326 if (queue->tbuf) {
327 uint32_t prior_num_dwords = cbuf->cdw;
328 cbuf->cdw = 0;
329
330 iter.action = transfer_write;
331 iter.data = cbuf;
332 perform_action(queue, &iter);
333
334 virgl_encode_end_transfers(cbuf);
335 cbuf->cdw = prior_num_dwords;
336 } else {
337 iter.action = transfer_put;
338 perform_action(queue, &iter);
339 }
340
341 iter.action = remove_transfer;
342 iter.type = COMPLETED_LIST;
343 perform_action(queue, &iter);
344 queue->num_dwords = 0;
345
346 return 0;
347 }
348
349 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
350 struct virgl_transfer *transfer)
351 {
352 bool queued = false;
353 struct list_iteration_args iter;
354
355 memset(&iter, 0, sizeof(iter));
356 iter.current = transfer;
357 iter.compare = transfers_overlap;
358 iter.action = set_true;
359 iter.data = &queued;
360
361 iter.type = PENDING_LIST;
362 compare_and_perform_action(queue, &iter);
363
364 iter.type = COMPLETED_LIST;
365 compare_and_perform_action(queue, &iter);
366
367 return queued;
368 }
369
370 struct virgl_transfer *
371 virgl_transfer_queue_extend(struct virgl_transfer_queue *queue,
372 struct virgl_transfer *transfer)
373 {
374 struct virgl_transfer *queued = NULL;
375 struct list_iteration_args iter;
376
377 /* We don't support extending from copy transfers. */
378 assert(!transfer->copy_src_res);
379
380 if (transfer->base.resource->target == PIPE_BUFFER) {
381 memset(&iter, 0, sizeof(iter));
382 iter.current = transfer;
383 iter.data = &queued;
384 iter.type = PENDING_LIST;
385 intersect_and_set_queued_once(queue, &iter);
386 }
387
388 if (queued) {
389 u_box_union_2d(&queued->base.box, &queued->base.box, &transfer->base.box);
390 queued->offset = queued->base.box.x;
391 }
392
393 return queued;
394 }