meson: inline `inc_common`
[mesa.git] / src / gallium / drivers / virgl / virgl_transfer_queue.c
1 /*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "util/u_box.h"
25 #include "util/u_inlines.h"
26
27 #include "virgl_protocol.h"
28 #include "virgl_context.h"
29 #include "virgl_screen.h"
30 #include "virgl_encode.h"
31 #include "virgl_resource.h"
32 #include "virgl_transfer_queue.h"
33
34 struct list_action_args
35 {
36 void *data;
37 struct virgl_transfer *queued;
38 struct virgl_transfer *current;
39 };
40
41 typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
42 struct virgl_transfer *current);
43
44 typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
45 struct list_action_args *args);
46
47 struct list_iteration_args
48 {
49 void *data;
50 list_action_t action;
51 compare_transfers_t compare;
52 struct virgl_transfer *current;
53 };
54
55 static int
56 transfer_dim(const struct virgl_transfer *xfer)
57 {
58 switch (xfer->base.resource->target) {
59 case PIPE_BUFFER:
60 case PIPE_TEXTURE_1D:
61 return 1;
62 case PIPE_TEXTURE_2D:
63 case PIPE_TEXTURE_RECT:
64 return 2;
65 default:
66 return 3;
67 }
68 }
69
70 static void
71 box_min_max(const struct pipe_box *box, int dim, int *min, int *max)
72 {
73 switch (dim) {
74 case 0:
75 if (box->width > 0) {
76 *min = box->x;
77 *max = box->x + box->width;
78 } else {
79 *max = box->x;
80 *min = box->x + box->width;
81 }
82 break;
83 case 1:
84 if (box->height > 0) {
85 *min = box->y;
86 *max = box->y + box->height;
87 } else {
88 *max = box->y;
89 *min = box->y + box->height;
90 }
91 break;
92 default:
93 if (box->depth > 0) {
94 *min = box->z;
95 *max = box->z + box->depth;
96 } else {
97 *max = box->z;
98 *min = box->z + box->depth;
99 }
100 break;
101 }
102 }
103
104 static bool
105 transfer_overlap(const struct virgl_transfer *xfer,
106 const struct virgl_hw_res *hw_res,
107 unsigned level,
108 const struct pipe_box *box,
109 bool include_touching)
110 {
111 const int dim_count = transfer_dim(xfer);
112
113 if (xfer->hw_res != hw_res || xfer->base.level != level)
114 return false;
115
116 for (int dim = 0; dim < dim_count; dim++) {
117 int xfer_min;
118 int xfer_max;
119 int box_min;
120 int box_max;
121
122 box_min_max(&xfer->base.box, dim, &xfer_min, &xfer_max);
123 box_min_max(box, dim, &box_min, &box_max);
124
125 if (include_touching) {
126 /* touching is considered overlapping */
127 if (xfer_min > box_max || xfer_max < box_min)
128 return false;
129 } else {
130 /* touching is not considered overlapping */
131 if (xfer_min >= box_max || xfer_max <= box_min)
132 return false;
133 }
134 }
135
136 return true;
137 }
138
139 static struct virgl_transfer *
140 virgl_transfer_queue_find_overlap(const struct virgl_transfer_queue *queue,
141 const struct virgl_hw_res *hw_res,
142 unsigned level,
143 const struct pipe_box *box,
144 bool include_touching)
145 {
146 struct virgl_transfer *xfer;
147 LIST_FOR_EACH_ENTRY(xfer, &queue->transfer_list, queue_link) {
148 if (transfer_overlap(xfer, hw_res, level, box, include_touching))
149 return xfer;
150 }
151
152 return NULL;
153 }
154
155 static bool transfers_intersect(struct virgl_transfer *queued,
156 struct virgl_transfer *current)
157 {
158 return transfer_overlap(queued, current->hw_res, current->base.level,
159 &current->base.box, true);
160 }
161
162 static void remove_transfer(struct virgl_transfer_queue *queue,
163 struct virgl_transfer *queued)
164 {
165 list_del(&queued->queue_link);
166 virgl_resource_destroy_transfer(queue->vctx, queued);
167 }
168
169 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
170 struct list_action_args *args)
171 {
172 struct virgl_transfer *current = args->current;
173 struct virgl_transfer *queued = args->queued;
174
175 u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
176 current->offset = current->base.box.x;
177
178 remove_transfer(queue, queued);
179 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
180 }
181
182 static void transfer_put(struct virgl_transfer_queue *queue,
183 struct list_action_args *args)
184 {
185 struct virgl_transfer *queued = args->queued;
186
187 queue->vs->vws->transfer_put(queue->vs->vws, queued->hw_res,
188 &queued->base.box,
189 queued->base.stride, queued->l_stride,
190 queued->offset, queued->base.level);
191
192 remove_transfer(queue, queued);
193 }
194
195 static void transfer_write(struct virgl_transfer_queue *queue,
196 struct list_action_args *args)
197 {
198 struct virgl_transfer *queued = args->queued;
199 struct virgl_cmd_buf *buf = args->data;
200
201 // Takes a reference on the HW resource, which is released after
202 // the exec buffer command.
203 virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
204
205 remove_transfer(queue, queued);
206 }
207
208 static void compare_and_perform_action(struct virgl_transfer_queue *queue,
209 struct list_iteration_args *iter)
210 {
211 struct list_action_args args;
212 struct virgl_transfer *queued, *tmp;
213
214 memset(&args, 0, sizeof(args));
215 args.current = iter->current;
216 args.data = iter->data;
217
218 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->transfer_list, queue_link) {
219 if (iter->compare(queued, iter->current)) {
220 args.queued = queued;
221 iter->action(queue, &args);
222 }
223 }
224 }
225
226 static void perform_action(struct virgl_transfer_queue *queue,
227 struct list_iteration_args *iter)
228 {
229 struct list_action_args args;
230 struct virgl_transfer *queued, *tmp;
231
232 memset(&args, 0, sizeof(args));
233 args.data = iter->data;
234
235 LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->transfer_list, queue_link) {
236 args.queued = queued;
237 iter->action(queue, &args);
238 }
239 }
240
241 static void add_internal(struct virgl_transfer_queue *queue,
242 struct virgl_transfer *transfer)
243 {
244 uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
245 if (queue->tbuf) {
246 if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
247 struct list_iteration_args iter;
248 struct virgl_winsys *vws = queue->vs->vws;
249
250 memset(&iter, 0, sizeof(iter));
251 iter.action = transfer_write;
252 iter.data = queue->tbuf;
253 perform_action(queue, &iter);
254
255 vws->submit_cmd(vws, queue->tbuf, NULL);
256 queue->num_dwords = 0;
257 }
258 }
259
260 list_addtail(&transfer->queue_link, &queue->transfer_list);
261 queue->num_dwords += dwords;
262 }
263
264 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
265 struct virgl_context *vctx)
266 {
267 struct virgl_screen *vs = virgl_screen(vctx->base.screen);
268
269 queue->vs = vs;
270 queue->vctx = vctx;
271 queue->num_dwords = 0;
272
273 list_inithead(&queue->transfer_list);
274
275 if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
276 vs->vws->supports_encoded_transfers)
277 queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
278 else
279 queue->tbuf = NULL;
280 }
281
282 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
283 {
284 struct virgl_winsys *vws = queue->vs->vws;
285 struct list_iteration_args iter;
286
287 memset(&iter, 0, sizeof(iter));
288
289 iter.action = transfer_put;
290 perform_action(queue, &iter);
291
292 if (queue->tbuf)
293 vws->cmd_buf_destroy(queue->tbuf);
294
295 queue->vs = NULL;
296 queue->vctx = NULL;
297 queue->tbuf = NULL;
298 queue->num_dwords = 0;
299 }
300
301 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
302 struct virgl_transfer *transfer)
303 {
304 struct list_iteration_args iter;
305
306 /* We don't support copy transfers in the transfer queue. */
307 assert(!transfer->copy_src_hw_res);
308
309 /* Attempt to merge multiple intersecting transfers into a single one. */
310 if (transfer->base.resource->target == PIPE_BUFFER) {
311 memset(&iter, 0, sizeof(iter));
312 iter.current = transfer;
313 iter.compare = transfers_intersect;
314 iter.action = replace_unmapped_transfer;
315 compare_and_perform_action(queue, &iter);
316 }
317
318 add_internal(queue, transfer);
319 return 0;
320 }
321
322 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
323 struct virgl_cmd_buf *cbuf)
324 {
325 struct list_iteration_args iter;
326
327 memset(&iter, 0, sizeof(iter));
328 if (queue->tbuf) {
329 uint32_t prior_num_dwords = cbuf->cdw;
330 cbuf->cdw = 0;
331
332 iter.action = transfer_write;
333 iter.data = cbuf;
334 perform_action(queue, &iter);
335
336 virgl_encode_end_transfers(cbuf);
337 cbuf->cdw = prior_num_dwords;
338 } else {
339 iter.action = transfer_put;
340 perform_action(queue, &iter);
341 }
342
343 queue->num_dwords = 0;
344
345 return 0;
346 }
347
348 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
349 struct virgl_transfer *transfer)
350 {
351 return virgl_transfer_queue_find_overlap(queue,
352 transfer->hw_res,
353 transfer->base.level,
354 &transfer->base.box,
355 false);
356 }
357
358 bool
359 virgl_transfer_queue_extend_buffer(struct virgl_transfer_queue *queue,
360 const struct virgl_hw_res *hw_res,
361 unsigned offset, unsigned size,
362 const void *data)
363 {
364 struct virgl_transfer *queued;
365 struct pipe_box box;
366
367 u_box_1d(offset, size, &box);
368 queued = virgl_transfer_queue_find_overlap(queue, hw_res, 0, &box, true);
369 if (!queued)
370 return false;
371
372 assert(queued->base.resource->target == PIPE_BUFFER);
373 assert(queued->hw_res_map);
374
375 memcpy(queued->hw_res_map + offset, data, size);
376 u_box_union_2d(&queued->base.box, &queued->base.box, &box);
377 queued->offset = queued->base.box.x;
378
379 return true;
380 }