2 * Copyright 2018 Chromium.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
27 #include "virgl_protocol.h"
28 #include "virgl_context.h"
29 #include "virgl_screen.h"
30 #include "virgl_encode.h"
31 #include "virgl_resource.h"
32 #include "virgl_transfer_queue.h"
34 struct list_action_args
37 struct virgl_transfer
*queued
;
38 struct virgl_transfer
*current
;
41 typedef bool (*compare_transfers_t
)(struct virgl_transfer
*queued
,
42 struct virgl_transfer
*current
);
44 typedef void (*list_action_t
)(struct virgl_transfer_queue
*queue
,
45 struct list_action_args
*args
);
47 struct list_iteration_args
51 compare_transfers_t compare
;
52 struct virgl_transfer
*current
;
53 enum virgl_transfer_queue_lists type
;
57 transfer_dim(const struct virgl_transfer
*xfer
)
59 switch (xfer
->base
.resource
->target
) {
64 case PIPE_TEXTURE_RECT
:
72 box_min_max(const struct pipe_box
*box
, int dim
, int *min
, int *max
)
78 *max
= box
->x
+ box
->width
;
81 *min
= box
->x
+ box
->width
;
85 if (box
->height
> 0) {
87 *max
= box
->y
+ box
->height
;
90 *min
= box
->y
+ box
->height
;
96 *max
= box
->z
+ box
->depth
;
99 *min
= box
->z
+ box
->depth
;
106 transfer_overlap(const struct virgl_transfer
*xfer
,
107 const struct virgl_hw_res
*hw_res
,
109 const struct pipe_box
*box
,
110 bool include_touching
)
112 const int dim_count
= transfer_dim(xfer
);
114 if (xfer
->hw_res
!= hw_res
|| xfer
->base
.level
!= level
)
117 for (int dim
= 0; dim
< dim_count
; dim
++) {
123 box_min_max(&xfer
->base
.box
, dim
, &xfer_min
, &xfer_max
);
124 box_min_max(box
, dim
, &box_min
, &box_max
);
126 if (include_touching
) {
127 /* touching is considered overlapping */
128 if (xfer_min
> box_max
|| xfer_max
< box_min
)
131 /* touching is not considered overlapping */
132 if (xfer_min
>= box_max
|| xfer_max
<= box_min
)
140 static struct virgl_transfer
*
141 virgl_transfer_queue_find_pending(const struct virgl_transfer_queue
*queue
,
142 const struct virgl_hw_res
*hw_res
,
144 const struct pipe_box
*box
,
145 bool include_touching
)
147 struct virgl_transfer
*xfer
;
148 LIST_FOR_EACH_ENTRY(xfer
, &queue
->lists
[PENDING_LIST
], queue_link
) {
149 if (transfer_overlap(xfer
, hw_res
, level
, box
, include_touching
))
156 static bool transfers_intersect(struct virgl_transfer
*queued
,
157 struct virgl_transfer
*current
)
159 return transfer_overlap(queued
, current
->hw_res
, current
->base
.level
,
160 ¤t
->base
.box
, true);
163 static void set_queued(UNUSED
struct virgl_transfer_queue
*queue
,
164 struct list_action_args
*args
)
166 struct virgl_transfer
*queued
= args
->queued
;
167 struct virgl_transfer
**val
= args
->data
;
171 static void remove_transfer(struct virgl_transfer_queue
*queue
,
172 struct list_action_args
*args
)
174 struct virgl_transfer
*queued
= args
->queued
;
175 list_del(&queued
->queue_link
);
176 virgl_resource_destroy_transfer(queue
->vctx
, queued
);
179 static void replace_unmapped_transfer(struct virgl_transfer_queue
*queue
,
180 struct list_action_args
*args
)
182 struct virgl_transfer
*current
= args
->current
;
183 struct virgl_transfer
*queued
= args
->queued
;
185 u_box_union_2d(¤t
->base
.box
, ¤t
->base
.box
, &queued
->base
.box
);
186 current
->offset
= current
->base
.box
.x
;
188 remove_transfer(queue
, args
);
189 queue
->num_dwords
-= (VIRGL_TRANSFER3D_SIZE
+ 1);
192 static void transfer_put(struct virgl_transfer_queue
*queue
,
193 struct list_action_args
*args
)
195 struct virgl_transfer
*queued
= args
->queued
;
197 queue
->vs
->vws
->transfer_put(queue
->vs
->vws
, queued
->hw_res
,
199 queued
->base
.stride
, queued
->l_stride
,
200 queued
->offset
, queued
->base
.level
);
202 remove_transfer(queue
, args
);
205 static void transfer_write(struct virgl_transfer_queue
*queue
,
206 struct list_action_args
*args
)
208 struct virgl_transfer
*queued
= args
->queued
;
209 struct virgl_cmd_buf
*buf
= args
->data
;
211 // Takes a reference on the HW resource, which is released after
212 // the exec buffer command.
213 virgl_encode_transfer(queue
->vs
, buf
, queued
, VIRGL_TRANSFER_TO_HOST
);
215 list_delinit(&queued
->queue_link
);
216 list_addtail(&queued
->queue_link
, &queue
->lists
[COMPLETED_LIST
]);
219 static void compare_and_perform_action(struct virgl_transfer_queue
*queue
,
220 struct list_iteration_args
*iter
)
222 struct list_action_args args
;
223 struct virgl_transfer
*queued
, *tmp
;
224 enum virgl_transfer_queue_lists type
= iter
->type
;
226 memset(&args
, 0, sizeof(args
));
227 args
.current
= iter
->current
;
228 args
.data
= iter
->data
;
230 LIST_FOR_EACH_ENTRY_SAFE(queued
, tmp
, &queue
->lists
[type
], queue_link
) {
231 if (iter
->compare(queued
, iter
->current
)) {
232 args
.queued
= queued
;
233 iter
->action(queue
, &args
);
238 static void intersect_and_set_queued_once(struct virgl_transfer_queue
*queue
,
239 struct list_iteration_args
*iter
)
241 struct list_action_args args
;
242 struct virgl_transfer
*queued
, *tmp
;
243 enum virgl_transfer_queue_lists type
= iter
->type
;
245 memset(&args
, 0, sizeof(args
));
246 args
.current
= iter
->current
;
247 args
.data
= iter
->data
;
249 LIST_FOR_EACH_ENTRY_SAFE(queued
, tmp
, &queue
->lists
[type
], queue_link
) {
250 if (transfers_intersect(queued
, iter
->current
)) {
251 args
.queued
= queued
;
252 set_queued(queue
, &args
);
258 static void perform_action(struct virgl_transfer_queue
*queue
,
259 struct list_iteration_args
*iter
)
261 struct list_action_args args
;
262 struct virgl_transfer
*queued
, *tmp
;
263 enum virgl_transfer_queue_lists type
= iter
->type
;
265 memset(&args
, 0, sizeof(args
));
266 args
.data
= iter
->data
;
268 LIST_FOR_EACH_ENTRY_SAFE(queued
, tmp
, &queue
->lists
[type
], queue_link
) {
269 args
.queued
= queued
;
270 iter
->action(queue
, &args
);
274 static void add_internal(struct virgl_transfer_queue
*queue
,
275 struct virgl_transfer
*transfer
)
277 uint32_t dwords
= VIRGL_TRANSFER3D_SIZE
+ 1;
279 if (queue
->num_dwords
+ dwords
>= VIRGL_MAX_TBUF_DWORDS
) {
280 struct list_iteration_args iter
;
281 struct virgl_winsys
*vws
= queue
->vs
->vws
;
283 memset(&iter
, 0, sizeof(iter
));
284 iter
.type
= PENDING_LIST
;
285 iter
.action
= transfer_write
;
286 iter
.data
= queue
->tbuf
;
287 perform_action(queue
, &iter
);
289 vws
->submit_cmd(vws
, queue
->tbuf
, NULL
);
290 queue
->num_dwords
= 0;
294 list_addtail(&transfer
->queue_link
, &queue
->lists
[PENDING_LIST
]);
295 queue
->num_dwords
+= dwords
;
299 void virgl_transfer_queue_init(struct virgl_transfer_queue
*queue
,
300 struct virgl_context
*vctx
)
302 struct virgl_screen
*vs
= virgl_screen(vctx
->base
.screen
);
306 queue
->num_dwords
= 0;
308 for (uint32_t i
= 0; i
< MAX_LISTS
; i
++)
309 list_inithead(&queue
->lists
[i
]);
311 if ((vs
->caps
.caps
.v2
.capability_bits
& VIRGL_CAP_TRANSFER
) &&
312 vs
->vws
->supports_encoded_transfers
)
313 queue
->tbuf
= vs
->vws
->cmd_buf_create(vs
->vws
, VIRGL_MAX_TBUF_DWORDS
);
318 void virgl_transfer_queue_fini(struct virgl_transfer_queue
*queue
)
320 struct virgl_winsys
*vws
= queue
->vs
->vws
;
321 struct list_iteration_args iter
;
323 memset(&iter
, 0, sizeof(iter
));
325 iter
.action
= transfer_put
;
326 iter
.type
= PENDING_LIST
;
327 perform_action(queue
, &iter
);
329 iter
.action
= remove_transfer
;
330 iter
.type
= COMPLETED_LIST
;
331 perform_action(queue
, &iter
);
334 vws
->cmd_buf_destroy(queue
->tbuf
);
339 queue
->num_dwords
= 0;
342 int virgl_transfer_queue_unmap(struct virgl_transfer_queue
*queue
,
343 struct virgl_transfer
*transfer
)
345 struct list_iteration_args iter
;
347 /* We don't support copy transfers in the transfer queue. */
348 assert(!transfer
->copy_src_hw_res
);
350 /* Attempt to merge multiple intersecting transfers into a single one. */
351 if (transfer
->base
.resource
->target
== PIPE_BUFFER
) {
352 memset(&iter
, 0, sizeof(iter
));
353 iter
.current
= transfer
;
354 iter
.compare
= transfers_intersect
;
355 iter
.action
= replace_unmapped_transfer
;
356 iter
.type
= PENDING_LIST
;
357 compare_and_perform_action(queue
, &iter
);
360 add_internal(queue
, transfer
);
364 int virgl_transfer_queue_clear(struct virgl_transfer_queue
*queue
,
365 struct virgl_cmd_buf
*cbuf
)
367 struct list_iteration_args iter
;
369 memset(&iter
, 0, sizeof(iter
));
370 iter
.type
= PENDING_LIST
;
372 uint32_t prior_num_dwords
= cbuf
->cdw
;
375 iter
.action
= transfer_write
;
377 perform_action(queue
, &iter
);
379 virgl_encode_end_transfers(cbuf
);
380 cbuf
->cdw
= prior_num_dwords
;
382 iter
.action
= transfer_put
;
383 perform_action(queue
, &iter
);
386 iter
.action
= remove_transfer
;
387 iter
.type
= COMPLETED_LIST
;
388 perform_action(queue
, &iter
);
389 queue
->num_dwords
= 0;
394 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue
*queue
,
395 struct virgl_transfer
*transfer
)
397 return virgl_transfer_queue_find_pending(queue
,
399 transfer
->base
.level
,
404 struct virgl_transfer
*
405 virgl_transfer_queue_extend(struct virgl_transfer_queue
*queue
,
406 struct virgl_transfer
*transfer
)
408 struct virgl_transfer
*queued
= NULL
;
409 struct list_iteration_args iter
;
411 /* We don't support extending from copy transfers. */
412 assert(!transfer
->copy_src_hw_res
);
414 if (transfer
->base
.resource
->target
== PIPE_BUFFER
) {
415 memset(&iter
, 0, sizeof(iter
));
416 iter
.current
= transfer
;
418 iter
.type
= PENDING_LIST
;
419 intersect_and_set_queued_once(queue
, &iter
);
423 u_box_union_2d(&queued
->base
.box
, &queued
->base
.box
, &transfer
->base
.box
);
424 queued
->offset
= queued
->base
.box
.x
;