virgl: improve virgl_transfer_queue_is_queued
[mesa.git] / src / gallium / drivers / virgl / virgl_transfer_queue.c
1 /*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
26
27 #include "virgl_protocol.h"
28 #include "virgl_context.h"
29 #include "virgl_screen.h"
30 #include "virgl_encode.h"
31 #include "virgl_resource.h"
32 #include "virgl_transfer_queue.h"
33
34 struct list_action_args
35 {
36 void *data;
37 struct virgl_transfer *queued;
38 struct virgl_transfer *current;
39 };
40
41 typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
42 struct virgl_transfer *current);
43
44 typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
45 struct list_action_args *args);
46
47 struct list_iteration_args
48 {
49 void *data;
50 list_action_t action;
51 compare_transfers_t compare;
52 struct virgl_transfer *current;
53 enum virgl_transfer_queue_lists type;
54 };
55
56 static int
57 transfer_dim(const struct virgl_transfer *xfer)
58 {
59 switch (xfer->base.resource->target) {
60 case PIPE_BUFFER:
61 case PIPE_TEXTURE_1D:
62 return 1;
63 case PIPE_TEXTURE_2D:
64 case PIPE_TEXTURE_RECT:
65 return 2;
66 default:
67 return 3;
68 }
69 }
70
71 static void
72 box_min_max(const struct pipe_box *box, int dim, int *min, int *max)
73 {
74 switch (dim) {
75 case 0:
76 if (box->width > 0) {
77 *min = box->x;
78 *max = box->x + box->width;
79 } else {
80 *max = box->x;
81 *min = box->x + box->width;
82 }
83 break;
84 case 1:
85 if (box->height > 0) {
86 *min = box->y;
87 *max = box->y + box->height;
88 } else {
89 *max = box->y;
90 *min = box->y + box->height;
91 }
92 break;
93 default:
94 if (box->depth > 0) {
95 *min = box->z;
96 *max = box->z + box->depth;
97 } else {
98 *max = box->z;
99 *min = box->z + box->depth;
100 }
101 break;
102 }
103 }
104
105 static bool
106 transfer_overlap(const struct virgl_transfer *xfer,
107 const struct virgl_hw_res *hw_res,
108 unsigned level,
109 const struct pipe_box *box,
110 bool include_touching)
111 {
112 const int dim_count = transfer_dim(xfer);
113
114 if (xfer->hw_res != hw_res || xfer->base.level != level)
115 return false;
116
117 for (int dim = 0; dim < dim_count; dim++) {
118 int xfer_min;
119 int xfer_max;
120 int box_min;
121 int box_max;
122
123 box_min_max(&xfer->base.box, dim, &xfer_min, &xfer_max);
124 box_min_max(box, dim, &box_min, &box_max);
125
126 if (include_touching) {
127 /* touching is considered overlapping */
128 if (xfer_min > box_max || xfer_max < box_min)
129 return false;
130 } else {
131 /* touching is not considered overlapping */
132 if (xfer_min >= box_max || xfer_max <= box_min)
133 return false;
134 }
135 }
136
137 return true;
138 }
139
140 static struct virgl_transfer *
141 virgl_transfer_queue_find_pending(const struct virgl_transfer_queue *queue,
142 const struct virgl_hw_res *hw_res,
143 unsigned level,
144 const struct pipe_box *box,
145 bool include_touching)
146 {
147 struct virgl_transfer *xfer;
148 LIST_FOR_EACH_ENTRY(xfer, &queue->lists[PENDING_LIST], queue_link) {
149 if (transfer_overlap(xfer, hw_res, level, box, include_touching))
150 return xfer;
151 }
152
153 return NULL;
154 }
155
156 static bool transfers_intersect(struct virgl_transfer *queued,
157 struct virgl_transfer *current)
158 {
159 return transfer_overlap(queued, current->hw_res, current->base.level,
160 &current->base.box, true);
161 }
162
163 static void set_queued(UNUSED struct virgl_transfer_queue *queue,
164 struct list_action_args *args)
165 {
166 struct virgl_transfer *queued = args->queued;
167 struct virgl_transfer **val = args->data;
168 *val = queued;
169 }
170
171 static void remove_transfer(struct virgl_transfer_queue *queue,
172 struct list_action_args *args)
173 {
174 struct virgl_transfer *queued = args->queued;
175 list_del(&queued->queue_link);
176 virgl_resource_destroy_transfer(queue->vctx, queued);
177 }
178
179 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
180 struct list_action_args *args)
181 {
182 struct virgl_transfer *current = args->current;
183 struct virgl_transfer *queued = args->queued;
184
185 u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
186 current->offset = current->base.box.x;
187
188 remove_transfer(queue, args);
189 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
190 }
191
192 static void transfer_put(struct virgl_transfer_queue *queue,
193 struct list_action_args *args)
194 {
195 struct virgl_transfer *queued = args->queued;
196
197 queue->vs->vws->transfer_put(queue->vs->vws, queued->hw_res,
198 &queued->base.box,
199 queued->base.stride, queued->l_stride,
200 queued->offset, queued->base.level);
201
202 remove_transfer(queue, args);
203 }
204
205 static void transfer_write(struct virgl_transfer_queue *queue,
206 struct list_action_args *args)
207 {
208 struct virgl_transfer *queued = args->queued;
209 struct virgl_cmd_buf *buf = args->data;
210
211 // Takes a reference on the HW resource, which is released after
212 // the exec buffer command.
213 virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
214
215 list_delinit(&queued->queue_link);
216 list_addtail(&queued->queue_link, &queue->lists[COMPLETED_LIST]);
217 }
218
219 static void compare_and_perform_action(struct virgl_transfer_queue *queue,
220 struct list_iteration_args *iter)
221 {
222 struct list_action_args args;
223 struct virgl_transfer *queued, *tmp;
224 enum virgl_transfer_queue_lists type = iter->type;
225
226 memset(&args, 0, sizeof(args));
227 args.current = iter->current;
228 args.data = iter->data;
229
230 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
231 if (iter->compare(queued, iter->current)) {
232 args.queued = queued;
233 iter->action(queue, &args);
234 }
235 }
236 }
237
238 static void intersect_and_set_queued_once(struct virgl_transfer_queue *queue,
239 struct list_iteration_args *iter)
240 {
241 struct list_action_args args;
242 struct virgl_transfer *queued, *tmp;
243 enum virgl_transfer_queue_lists type = iter->type;
244
245 memset(&args, 0, sizeof(args));
246 args.current = iter->current;
247 args.data = iter->data;
248
249 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
250 if (transfers_intersect(queued, iter->current)) {
251 args.queued = queued;
252 set_queued(queue, &args);
253 return;
254 }
255 }
256 }
257
258 static void perform_action(struct virgl_transfer_queue *queue,
259 struct list_iteration_args *iter)
260 {
261 struct list_action_args args;
262 struct virgl_transfer *queued, *tmp;
263 enum virgl_transfer_queue_lists type = iter->type;
264
265 memset(&args, 0, sizeof(args));
266 args.data = iter->data;
267
268 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
269 args.queued = queued;
270 iter->action(queue, &args);
271 }
272 }
273
274 static void add_internal(struct virgl_transfer_queue *queue,
275 struct virgl_transfer *transfer)
276 {
277 uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
278 if (queue->tbuf) {
279 if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
280 struct list_iteration_args iter;
281 struct virgl_winsys *vws = queue->vs->vws;
282
283 memset(&iter, 0, sizeof(iter));
284 iter.type = PENDING_LIST;
285 iter.action = transfer_write;
286 iter.data = queue->tbuf;
287 perform_action(queue, &iter);
288
289 vws->submit_cmd(vws, queue->tbuf, NULL);
290 queue->num_dwords = 0;
291 }
292 }
293
294 list_addtail(&transfer->queue_link, &queue->lists[PENDING_LIST]);
295 queue->num_dwords += dwords;
296 }
297
298
299 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
300 struct virgl_context *vctx)
301 {
302 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
303
304 queue->vs = vs;
305 queue->vctx = vctx;
306 queue->num_dwords = 0;
307
308 for (uint32_t i = 0; i < MAX_LISTS; i++)
309 list_inithead(&queue->lists[i]);
310
311 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
312 vs->vws->supports_encoded_transfers)
313 queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
314 else
315 queue->tbuf = NULL;
316 }
317
318 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
319 {
320 struct virgl_winsys *vws = queue->vs->vws;
321 struct list_iteration_args iter;
322
323 memset(&iter, 0, sizeof(iter));
324
325 iter.action = transfer_put;
326 iter.type = PENDING_LIST;
327 perform_action(queue, &iter);
328
329 iter.action = remove_transfer;
330 iter.type = COMPLETED_LIST;
331 perform_action(queue, &iter);
332
333 if (queue->tbuf)
334 vws->cmd_buf_destroy(queue->tbuf);
335
336 queue->vs = NULL;
337 queue->vctx = NULL;
338 queue->tbuf = NULL;
339 queue->num_dwords = 0;
340 }
341
342 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
343 struct virgl_transfer *transfer)
344 {
345 struct list_iteration_args iter;
346
347 /* We don't support copy transfers in the transfer queue. */
348 assert(!transfer->copy_src_hw_res);
349
350 /* Attempt to merge multiple intersecting transfers into a single one. */
351 if (transfer->base.resource->target == PIPE_BUFFER) {
352 memset(&iter, 0, sizeof(iter));
353 iter.current = transfer;
354 iter.compare = transfers_intersect;
355 iter.action = replace_unmapped_transfer;
356 iter.type = PENDING_LIST;
357 compare_and_perform_action(queue, &iter);
358 }
359
360 add_internal(queue, transfer);
361 return 0;
362 }
363
364 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
365 struct virgl_cmd_buf *cbuf)
366 {
367 struct list_iteration_args iter;
368
369 memset(&iter, 0, sizeof(iter));
370 iter.type = PENDING_LIST;
371 if (queue->tbuf) {
372 uint32_t prior_num_dwords = cbuf->cdw;
373 cbuf->cdw = 0;
374
375 iter.action = transfer_write;
376 iter.data = cbuf;
377 perform_action(queue, &iter);
378
379 virgl_encode_end_transfers(cbuf);
380 cbuf->cdw = prior_num_dwords;
381 } else {
382 iter.action = transfer_put;
383 perform_action(queue, &iter);
384 }
385
386 iter.action = remove_transfer;
387 iter.type = COMPLETED_LIST;
388 perform_action(queue, &iter);
389 queue->num_dwords = 0;
390
391 return 0;
392 }
393
394 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
395 struct virgl_transfer *transfer)
396 {
397 return virgl_transfer_queue_find_pending(queue,
398 transfer->hw_res,
399 transfer->base.level,
400 &transfer->base.box,
401 false);
402 }
403
404 struct virgl_transfer *
405 virgl_transfer_queue_extend(struct virgl_transfer_queue *queue,
406 struct virgl_transfer *transfer)
407 {
408 struct virgl_transfer *queued = NULL;
409 struct list_iteration_args iter;
410
411 /* We don't support extending from copy transfers. */
412 assert(!transfer->copy_src_hw_res);
413
414 if (transfer->base.resource->target == PIPE_BUFFER) {
415 memset(&iter, 0, sizeof(iter));
416 iter.current = transfer;
417 iter.data = &queued;
418 iter.type = PENDING_LIST;
419 intersect_and_set_queued_once(queue, &iter);
420 }
421
422 if (queued) {
423 u_box_union_2d(&queued->base.box, &queued->base.box, &transfer->base.box);
424 queued->offset = queued->base.box.x;
425 }
426
427 return queued;
428 }