virgl: Introduce virgl_resource_transfer_map
[mesa.git] / src / gallium / drivers / virgl / virgl_transfer_queue.c
1 /*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
26
27 #include "virgl_protocol.h"
28 #include "virgl_context.h"
29 #include "virgl_screen.h"
30 #include "virgl_encode.h"
31 #include "virgl_resource.h"
32 #include "virgl_transfer_queue.h"
33
34 struct list_action_args
35 {
36 void *data;
37 struct virgl_transfer *queued;
38 struct virgl_transfer *current;
39 };
40
41 typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
42 struct virgl_transfer *current);
43
44 typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
45 struct list_action_args *args);
46
47 struct list_iteration_args
48 {
49 void *data;
50 list_action_t action;
51 compare_transfers_t compare;
52 struct virgl_transfer *current;
53 enum virgl_transfer_queue_lists type;
54 };
55
56 static bool transfers_intersect(struct virgl_transfer *queued,
57 struct virgl_transfer *current)
58 {
59 boolean tmp;
60
61 if (queued->hw_res != current->hw_res)
62 return false;
63
64 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
65 return (tmp == TRUE);
66 }
67
68 static bool transfers_overlap(struct virgl_transfer *queued,
69 struct virgl_transfer *current)
70 {
71 boolean tmp;
72
73 if (queued->hw_res != current->hw_res)
74 return false;
75
76 if (queued->base.level != current->base.level)
77 return false;
78
79 if (queued->base.box.z != current->base.box.z)
80 return true;
81
82 if (queued->base.box.depth != 1 || current->base.box.depth != 1)
83 return true;
84
85 /*
86 * Special case for boxes with [x: 0, width: 1] and [x: 1, width: 1].
87 */
88 if (queued->base.resource->target == PIPE_BUFFER) {
89 if (queued->base.box.x + queued->base.box.width == current->base.box.x)
90 return false;
91
92 if (current->base.box.x + current->base.box.width == queued->base.box.x)
93 return false;
94 }
95
96 tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
97 return (tmp == TRUE);
98 }
99
100 static void set_true(UNUSED struct virgl_transfer_queue *queue,
101 struct list_action_args *args)
102 {
103 bool *val = args->data;
104 *val = true;
105 }
106
107 static void set_queued(UNUSED struct virgl_transfer_queue *queue,
108 struct list_action_args *args)
109 {
110 struct virgl_transfer *queued = args->queued;
111 struct virgl_transfer **val = args->data;
112 *val = queued;
113 }
114
115 static void remove_transfer(struct virgl_transfer_queue *queue,
116 struct list_action_args *args)
117 {
118 struct virgl_transfer *queued = args->queued;
119 list_del(&queued->queue_link);
120 virgl_resource_destroy_transfer(queue->vctx, queued);
121 }
122
123 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
124 struct list_action_args *args)
125 {
126 struct virgl_transfer *current = args->current;
127 struct virgl_transfer *queued = args->queued;
128
129 u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
130 current->offset = current->base.box.x;
131
132 remove_transfer(queue, args);
133 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
134 }
135
136 static void transfer_put(struct virgl_transfer_queue *queue,
137 struct list_action_args *args)
138 {
139 struct virgl_transfer *queued = args->queued;
140
141 queue->vs->vws->transfer_put(queue->vs->vws, queued->hw_res,
142 &queued->base.box,
143 queued->base.stride, queued->l_stride,
144 queued->offset, queued->base.level);
145
146 remove_transfer(queue, args);
147 }
148
149 static void transfer_write(struct virgl_transfer_queue *queue,
150 struct list_action_args *args)
151 {
152 struct virgl_transfer *queued = args->queued;
153 struct virgl_cmd_buf *buf = args->data;
154
155 // Takes a reference on the HW resource, which is released after
156 // the exec buffer command.
157 virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
158
159 list_delinit(&queued->queue_link);
160 list_addtail(&queued->queue_link, &queue->lists[COMPLETED_LIST]);
161 }
162
163 static void compare_and_perform_action(struct virgl_transfer_queue *queue,
164 struct list_iteration_args *iter)
165 {
166 struct list_action_args args;
167 struct virgl_transfer *queued, *tmp;
168 enum virgl_transfer_queue_lists type = iter->type;
169
170 memset(&args, 0, sizeof(args));
171 args.current = iter->current;
172 args.data = iter->data;
173
174 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
175 if (iter->compare(queued, iter->current)) {
176 args.queued = queued;
177 iter->action(queue, &args);
178 }
179 }
180 }
181
182 static void intersect_and_set_queued_once(struct virgl_transfer_queue *queue,
183 struct list_iteration_args *iter)
184 {
185 struct list_action_args args;
186 struct virgl_transfer *queued, *tmp;
187 enum virgl_transfer_queue_lists type = iter->type;
188
189 memset(&args, 0, sizeof(args));
190 args.current = iter->current;
191 args.data = iter->data;
192
193 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
194 if (transfers_intersect(queued, iter->current)) {
195 args.queued = queued;
196 set_queued(queue, &args);
197 return;
198 }
199 }
200 }
201
202 static void perform_action(struct virgl_transfer_queue *queue,
203 struct list_iteration_args *iter)
204 {
205 struct list_action_args args;
206 struct virgl_transfer *queued, *tmp;
207 enum virgl_transfer_queue_lists type = iter->type;
208
209 memset(&args, 0, sizeof(args));
210 args.data = iter->data;
211
212 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
213 args.queued = queued;
214 iter->action(queue, &args);
215 }
216 }
217
218 static void add_internal(struct virgl_transfer_queue *queue,
219 struct virgl_transfer *transfer)
220 {
221 uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
222 if (queue->tbuf) {
223 if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
224 struct list_iteration_args iter;
225 struct virgl_winsys *vws = queue->vs->vws;
226
227 memset(&iter, 0, sizeof(iter));
228 iter.type = PENDING_LIST;
229 iter.action = transfer_write;
230 iter.data = queue->tbuf;
231 perform_action(queue, &iter);
232
233 vws->submit_cmd(vws, queue->tbuf, NULL);
234 queue->num_dwords = 0;
235 }
236 }
237
238 list_addtail(&transfer->queue_link, &queue->lists[PENDING_LIST]);
239 queue->num_dwords += dwords;
240 }
241
242
243 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
244 struct virgl_context *vctx)
245 {
246 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
247
248 queue->vs = vs;
249 queue->vctx = vctx;
250 queue->num_dwords = 0;
251
252 for (uint32_t i = 0; i < MAX_LISTS; i++)
253 list_inithead(&queue->lists[i]);
254
255 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
256 vs->vws->supports_encoded_transfers)
257 queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
258 else
259 queue->tbuf = NULL;
260 }
261
262 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
263 {
264 struct virgl_winsys *vws = queue->vs->vws;
265 struct list_iteration_args iter;
266
267 memset(&iter, 0, sizeof(iter));
268
269 iter.action = transfer_put;
270 iter.type = PENDING_LIST;
271 perform_action(queue, &iter);
272
273 iter.action = remove_transfer;
274 iter.type = COMPLETED_LIST;
275 perform_action(queue, &iter);
276
277 if (queue->tbuf)
278 vws->cmd_buf_destroy(queue->tbuf);
279
280 queue->vs = NULL;
281 queue->vctx = NULL;
282 queue->tbuf = NULL;
283 queue->num_dwords = 0;
284 }
285
286 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
287 struct virgl_transfer *transfer)
288 {
289 struct list_iteration_args iter;
290
291 /* We don't support copy transfers in the transfer queue. */
292 assert(!transfer->copy_src_hw_res);
293
294 /* Attempt to merge multiple intersecting transfers into a single one. */
295 if (transfer->base.resource->target == PIPE_BUFFER) {
296 memset(&iter, 0, sizeof(iter));
297 iter.current = transfer;
298 iter.compare = transfers_intersect;
299 iter.action = replace_unmapped_transfer;
300 iter.type = PENDING_LIST;
301 compare_and_perform_action(queue, &iter);
302 }
303
304 add_internal(queue, transfer);
305 return 0;
306 }
307
308 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
309 struct virgl_cmd_buf *cbuf)
310 {
311 struct list_iteration_args iter;
312
313 memset(&iter, 0, sizeof(iter));
314 iter.type = PENDING_LIST;
315 if (queue->tbuf) {
316 uint32_t prior_num_dwords = cbuf->cdw;
317 cbuf->cdw = 0;
318
319 iter.action = transfer_write;
320 iter.data = cbuf;
321 perform_action(queue, &iter);
322
323 virgl_encode_end_transfers(cbuf);
324 cbuf->cdw = prior_num_dwords;
325 } else {
326 iter.action = transfer_put;
327 perform_action(queue, &iter);
328 }
329
330 iter.action = remove_transfer;
331 iter.type = COMPLETED_LIST;
332 perform_action(queue, &iter);
333 queue->num_dwords = 0;
334
335 return 0;
336 }
337
338 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
339 struct virgl_transfer *transfer)
340 {
341 bool queued = false;
342 struct list_iteration_args iter;
343
344 memset(&iter, 0, sizeof(iter));
345 iter.current = transfer;
346 iter.compare = transfers_overlap;
347 iter.action = set_true;
348 iter.data = &queued;
349
350 iter.type = PENDING_LIST;
351 compare_and_perform_action(queue, &iter);
352
353 iter.type = COMPLETED_LIST;
354 compare_and_perform_action(queue, &iter);
355
356 return queued;
357 }
358
359 struct virgl_transfer *
360 virgl_transfer_queue_extend(struct virgl_transfer_queue *queue,
361 struct virgl_transfer *transfer)
362 {
363 struct virgl_transfer *queued = NULL;
364 struct list_iteration_args iter;
365
366 /* We don't support extending from copy transfers. */
367 assert(!transfer->copy_src_hw_res);
368
369 if (transfer->base.resource->target == PIPE_BUFFER) {
370 memset(&iter, 0, sizeof(iter));
371 iter.current = transfer;
372 iter.data = &queued;
373 iter.type = PENDING_LIST;
374 intersect_and_set_queued_once(queue, &iter);
375 }
376
377 if (queued) {
378 u_box_union_2d(&queued->base.box, &queued->base.box, &transfer->base.box);
379 queued->offset = queued->base.box.x;
380 }
381
382 return queued;
383 }