radv/winsys: Trivial style and readability fixups
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "amdgpu_id.h"
31 #include "radv_radeon_winsys.h"
32 #include "radv_amdgpu_cs.h"
33 #include "radv_amdgpu_bo.h"
34 #include "sid.h"
35
36 struct radv_amdgpu_cs {
37 struct radeon_winsys_cs base;
38 struct radv_amdgpu_winsys *ws;
39
40 struct amdgpu_cs_ib_info ib;
41
42 struct radeon_winsys_bo *ib_buffer;
43 uint8_t *ib_mapped;
44 unsigned max_num_buffers;
45 unsigned num_buffers;
46 amdgpu_bo_handle *handles;
47 uint8_t *priorities;
48
49 struct radeon_winsys_bo **old_ib_buffers;
50 unsigned num_old_ib_buffers;
51 unsigned max_num_old_ib_buffers;
52 unsigned *ib_size_ptr;
53 bool failed;
54 bool is_chained;
55
56 int buffer_hash_table[1024];
57 };
58
59 static inline struct radv_amdgpu_cs *
60 radv_amdgpu_cs(struct radeon_winsys_cs *base)
61 {
62 return (struct radv_amdgpu_cs*)base;
63 }
64
65
66 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
67 {
68 struct radv_amdgpu_cs_fence *fence = calloc(1, sizeof(struct amdgpu_cs_fence));
69 return (struct radeon_winsys_fence*)fence;
70 }
71
72 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
73 {
74 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
75 free(fence);
76 }
77
78 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
79 struct radeon_winsys_fence *_fence,
80 bool absolute,
81 uint64_t timeout)
82 {
83 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
84 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
85 int r;
86 uint32_t expired = 0;
87
88 /* Now use the libdrm query. */
89 r = amdgpu_cs_query_fence_status(fence,
90 timeout,
91 flags,
92 &expired);
93
94 if (r) {
95 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
96 return false;
97 }
98
99 if (expired)
100 return true;
101
102 return false;
103 }
104
105 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
106 {
107 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
108
109 if (cs->ib_buffer)
110 cs->ws->base.buffer_destroy(cs->ib_buffer);
111 else
112 free(cs->base.buf);
113
114 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
115 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
116
117 free(cs->old_ib_buffers);
118 free(cs->handles);
119 free(cs->priorities);
120 free(cs);
121 }
122
123 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
124 enum ring_type ring_type)
125 {
126 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
127 cs->buffer_hash_table[i] = -1;
128
129 return true;
130 }
131
132 static struct radeon_winsys_cs *
133 radv_amdgpu_cs_create(struct radeon_winsys *ws,
134 enum ring_type ring_type)
135 {
136 struct radv_amdgpu_cs *cs;
137 uint32_t ib_size = 20 * 1024 * 4;
138 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
139 if (!cs)
140 return NULL;
141
142 cs->ws = radv_amdgpu_winsys(ws);
143 radv_amdgpu_init_cs(cs, RING_GFX);
144
145 if (cs->ws->use_ib_bos) {
146 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
147 RADEON_DOMAIN_GTT,
148 RADEON_FLAG_CPU_ACCESS);
149 if (!cs->ib_buffer) {
150 free(cs);
151 return NULL;
152 }
153
154 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
155 if (!cs->ib_mapped) {
156 ws->buffer_destroy(cs->ib_buffer);
157 free(cs);
158 return NULL;
159 }
160
161 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
162 cs->base.buf = (uint32_t *)cs->ib_mapped;
163 cs->base.max_dw = ib_size / 4 - 4;
164 cs->ib_size_ptr = &cs->ib.size;
165 cs->ib.size = 0;
166
167 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
168 } else {
169 cs->base.buf = malloc(16384);
170 cs->base.max_dw = 4096;
171 if (!cs->base.buf) {
172 free(cs);
173 return NULL;
174 }
175 }
176
177 return &cs->base;
178 }
179
180 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
181 {
182 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
183 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
184
185 /* max that fits in the chain size field. */
186 ib_size = MIN2(ib_size, 0xfffff);
187
188 if (cs->failed) {
189 cs->base.cdw = 0;
190 return;
191 }
192
193 if (!cs->ws->use_ib_bos) {
194 uint32_t *new_buf = realloc(cs->base.buf, ib_size);
195 if (new_buf) {
196 cs->base.buf = new_buf;
197 cs->base.max_dw = ib_size / 4;
198 } else {
199 cs->failed = true;
200 cs->base.cdw = 0;
201 }
202 return;
203 }
204
205 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
206 cs->base.buf[cs->base.cdw++] = 0xffff1000;
207
208 *cs->ib_size_ptr |= cs->base.cdw + 4;
209
210 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
211 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
212 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
213 cs->max_num_old_ib_buffers * sizeof(void*));
214 }
215
216 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
217
218 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
219 RADEON_DOMAIN_GTT,
220 RADEON_FLAG_CPU_ACCESS);
221
222 if (!cs->ib_buffer) {
223 cs->base.cdw = 0;
224 cs->failed = true;
225 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
226 }
227
228 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
229 if (!cs->ib_mapped) {
230 cs->ws->base.buffer_destroy(cs->ib_buffer);
231 cs->base.cdw = 0;
232 cs->failed = true;
233 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
234 }
235
236 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
237
238 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
239 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
240 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
241 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
242 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
243
244 cs->base.buf = (uint32_t *)cs->ib_mapped;
245 cs->base.cdw = 0;
246 cs->base.max_dw = ib_size / 4 - 4;
247
248 }
249
250 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
251 {
252 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
253
254 if (cs->ws->use_ib_bos) {
255 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
256 cs->base.buf[cs->base.cdw++] = 0xffff1000;
257
258 *cs->ib_size_ptr |= cs->base.cdw;
259
260 cs->is_chained = false;
261 }
262
263 return !cs->failed;
264 }
265
266 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
267 {
268 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
269 cs->base.cdw = 0;
270 cs->failed = false;
271
272 for (unsigned i = 0; i < cs->num_buffers; ++i) {
273 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
274 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
275 cs->buffer_hash_table[hash] = -1;
276 }
277
278 cs->num_buffers = 0;
279
280 if (cs->ws->use_ib_bos) {
281 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
282
283 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
284 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
285
286 cs->num_old_ib_buffers = 0;
287 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
288 cs->ib_size_ptr = &cs->ib.size;
289 cs->ib.size = 0;
290 }
291 }
292
293 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
294 amdgpu_bo_handle bo)
295 {
296 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
297 int index = cs->buffer_hash_table[hash];
298
299 if (index == -1)
300 return -1;
301
302 if (cs->handles[index] == bo)
303 return index;
304
305 for (unsigned i = 0; i < cs->num_buffers; ++i) {
306 if (cs->handles[i] == bo) {
307 cs->buffer_hash_table[hash] = i;
308 return i;
309 }
310 }
311
312 return -1;
313 }
314
315 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
316 amdgpu_bo_handle bo,
317 uint8_t priority)
318 {
319 unsigned hash;
320 int index = radv_amdgpu_cs_find_buffer(cs, bo);
321
322 if (index != -1) {
323 cs->priorities[index] = MAX2(cs->priorities[index], priority);
324 return;
325 }
326
327 if (cs->num_buffers == cs->max_num_buffers) {
328 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
329 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
330 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
331 cs->max_num_buffers = new_count;
332 }
333
334 cs->handles[cs->num_buffers] = bo;
335 cs->priorities[cs->num_buffers] = priority;
336
337 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
338 cs->buffer_hash_table[hash] = cs->num_buffers;
339
340 ++cs->num_buffers;
341 }
342
343 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
344 struct radeon_winsys_bo *_bo,
345 uint8_t priority)
346 {
347 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
348 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
349
350 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
351 }
352
353 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
354 struct radeon_winsys_cs *_child)
355 {
356 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
357 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
358
359 for (unsigned i = 0; i < child->num_buffers; ++i) {
360 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
361 child->priorities[i]);
362 }
363
364 if (parent->ws->use_ib_bos) {
365 if (parent->base.cdw + 4 > parent->base.max_dw)
366 radv_amdgpu_cs_grow(&parent->base, 4);
367
368 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
369 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
370 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
371 parent->base.buf[parent->base.cdw++] = child->ib.size;
372 } else {
373 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
374 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
375
376 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
377 parent->base.cdw += child->base.cdw;
378 }
379 }
380
381 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
382 struct radeon_winsys_cs **cs_array,
383 unsigned count,
384 struct radv_amdgpu_winsys_bo *extra_bo,
385 amdgpu_bo_list_handle *bo_list)
386 {
387 int r;
388 if (ws->debug_all_bos) {
389 struct radv_amdgpu_winsys_bo *bo;
390 amdgpu_bo_handle *handles;
391 unsigned num = 0;
392
393 pthread_mutex_lock(&ws->global_bo_list_lock);
394
395 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
396 if (!handles) {
397 pthread_mutex_unlock(&ws->global_bo_list_lock);
398 return -ENOMEM;
399 }
400
401 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
402 assert(num < ws->num_buffers);
403 handles[num++] = bo->bo;
404 }
405
406 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
407 handles, NULL,
408 bo_list);
409 free(handles);
410 pthread_mutex_unlock(&ws->global_bo_list_lock);
411 } else if (count == 1 && !extra_bo) {
412 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
413 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
414 cs->priorities, bo_list);
415 } else {
416 unsigned total_buffer_count = !!extra_bo;
417 unsigned unique_bo_count = !!extra_bo;
418 for (unsigned i = 0; i < count; ++i) {
419 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
420 total_buffer_count += cs->num_buffers;
421 }
422
423 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
424 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
425 if (!handles || !priorities) {
426 free(handles);
427 free(priorities);
428 return -ENOMEM;
429 }
430
431 if (extra_bo) {
432 handles[0] = extra_bo->bo;
433 priorities[0] = 8;
434 }
435
436 for (unsigned i = 0; i < count; ++i) {
437 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
438 for (unsigned j = 0; j < cs->num_buffers; ++j) {
439 bool found = false;
440 for (unsigned k = 0; k < unique_bo_count; ++k) {
441 if (handles[k] == cs->handles[j]) {
442 found = true;
443 priorities[k] = MAX2(priorities[k],
444 cs->priorities[j]);
445 break;
446 }
447 }
448 if (!found) {
449 handles[unique_bo_count] = cs->handles[j];
450 priorities[unique_bo_count] = cs->priorities[j];
451 ++unique_bo_count;
452 }
453 }
454 }
455 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
456 priorities, bo_list);
457
458 free(handles);
459 free(priorities);
460 }
461
462 return r;
463 }
464
465 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
466 struct radeon_winsys_cs **cs_array,
467 unsigned cs_count,
468 struct radeon_winsys_fence *_fence)
469 {
470 int r;
471 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
472 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
473 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
474 amdgpu_bo_list_handle bo_list;
475 struct amdgpu_cs_request request = {0};
476
477 for (unsigned i = cs_count; i--;) {
478 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
479
480 if (cs->is_chained) {
481 *cs->ib_size_ptr -= 4;
482 cs->is_chained = false;
483 }
484
485 if (i + 1 < cs_count) {
486 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
487 assert(cs->base.cdw + 4 <= cs->base.max_dw);
488
489 cs->is_chained = true;
490 *cs->ib_size_ptr += 4;
491
492 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
493 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
494 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
495 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
496 }
497 }
498
499 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, &bo_list);
500 if (r) {
501 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
502 return r;
503 }
504
505 request.ip_type = AMDGPU_HW_IP_GFX;
506 request.number_of_ibs = 1;
507 request.ibs = &cs0->ib;
508 request.resources = bo_list;
509
510 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
511 if (r) {
512 if (r == -ENOMEM)
513 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
514 else
515 fprintf(stderr, "amdgpu: The CS has been rejected, "
516 "see dmesg for more information.\n");
517 }
518
519 amdgpu_bo_list_destroy(bo_list);
520
521 if (fence) {
522 fence->context = ctx->ctx;
523 fence->ip_type = request.ip_type;
524 fence->ip_instance = request.ip_instance;
525 fence->ring = request.ring;
526 fence->fence = request.seq_no;
527 }
528 ctx->last_seq_no = request.seq_no;
529
530 return r;
531 }
532
533 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
534 struct radeon_winsys_cs **cs_array,
535 unsigned cs_count,
536 struct radeon_winsys_fence *_fence)
537 {
538 int r;
539 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
540 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
541 amdgpu_bo_list_handle bo_list;
542 struct amdgpu_cs_request request;
543
544 assert(cs_count);
545
546 for (unsigned i = 0; i < cs_count;) {
547 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
548 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
549 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT, cs_count - i);
550
551 memset(&request, 0, sizeof(request));
552
553 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL, &bo_list);
554 if (r) {
555 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
556 return r;
557 }
558
559 request.ip_type = AMDGPU_HW_IP_GFX;
560 request.resources = bo_list;
561 request.number_of_ibs = cnt;
562 request.ibs = ibs;
563
564 for (unsigned j = 0; j < cnt; ++j) {
565 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
566 ibs[j] = cs->ib;
567
568 if (cs->is_chained) {
569 *cs->ib_size_ptr -= 4;
570 cs->is_chained = false;
571 }
572 }
573
574 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
575 if (r) {
576 if (r == -ENOMEM)
577 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
578 else
579 fprintf(stderr, "amdgpu: The CS has been rejected, "
580 "see dmesg for more information.\n");
581 }
582
583 amdgpu_bo_list_destroy(bo_list);
584
585 if (r)
586 return r;
587
588 i += cnt;
589 }
590 if (fence) {
591 fence->context = ctx->ctx;
592 fence->ip_type = request.ip_type;
593 fence->ip_instance = request.ip_instance;
594 fence->ring = request.ring;
595 fence->fence = request.seq_no;
596 }
597 ctx->last_seq_no = request.seq_no;
598
599 return 0;
600 }
601
602 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
603 struct radeon_winsys_cs **cs_array,
604 unsigned cs_count,
605 struct radeon_winsys_fence *_fence)
606 {
607 int r;
608 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
609 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
610 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
611 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
612 amdgpu_bo_list_handle bo_list;
613 struct amdgpu_cs_request request;
614 uint32_t pad_word = 0xffff1000U;
615
616 if (radv_amdgpu_winsys(ws)->family == FAMILY_SI)
617 pad_word = 0x80000000;
618
619 assert(cs_count);
620
621 for (unsigned i = 0; i < cs_count;) {
622 struct amdgpu_cs_ib_info ib = {0};
623 struct radeon_winsys_bo *bo = NULL;
624 uint32_t *ptr;
625 unsigned cnt = 0;
626 unsigned size = 0;
627
628 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
629 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
630 ++cnt;
631 }
632
633 assert(cnt);
634
635 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
636 ptr = ws->buffer_map(bo);
637
638 for (unsigned j = 0; j < cnt; ++j) {
639 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
640 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
641 ptr += cs->base.cdw;
642
643 }
644
645 while(!size || (size & 7)) {
646 *ptr++ = pad_word;
647 ++size;
648 }
649
650 memset(&request, 0, sizeof(request));
651
652
653 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
654 (struct radv_amdgpu_winsys_bo*)bo, &bo_list);
655 if (r) {
656 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
657 return r;
658 }
659
660 ib.size = size;
661 ib.ib_mc_address = ws->buffer_get_va(bo);
662
663 request.ip_type = AMDGPU_HW_IP_GFX;
664 request.resources = bo_list;
665 request.number_of_ibs = 1;
666 request.ibs = &ib;
667
668 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
669 if (r) {
670 if (r == -ENOMEM)
671 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
672 else
673 fprintf(stderr, "amdgpu: The CS has been rejected, "
674 "see dmesg for more information.\n");
675 }
676
677 amdgpu_bo_list_destroy(bo_list);
678
679 ws->buffer_destroy(bo);
680 if (r)
681 return r;
682
683 i += cnt;
684 }
685 if (fence) {
686 fence->context = ctx->ctx;
687 fence->ip_type = request.ip_type;
688 fence->ip_instance = request.ip_instance;
689 fence->ring = request.ring;
690 fence->fence = request.seq_no;
691 }
692 ctx->last_seq_no = request.seq_no;
693
694 return 0;
695 }
696
697 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
698 struct radeon_winsys_cs **cs_array,
699 unsigned cs_count,
700 bool can_patch,
701 struct radeon_winsys_fence *_fence)
702 {
703 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
704 if (!cs->ws->use_ib_bos) {
705 return radv_amdgpu_winsys_cs_submit_sysmem(_ctx, cs_array,
706 cs_count, _fence);
707 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && false) {
708 return radv_amdgpu_winsys_cs_submit_chained(_ctx, cs_array,
709 cs_count, _fence);
710 } else {
711 return radv_amdgpu_winsys_cs_submit_fallback(_ctx, cs_array,
712 cs_count, _fence);
713 }
714 }
715
716 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws)
717 {
718 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
719 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
720 int r;
721
722 if (!ctx)
723 return NULL;
724 r = amdgpu_cs_ctx_create(ws->dev, &ctx->ctx);
725 if (r) {
726 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create failed. (%i)\n", r);
727 goto error_create;
728 }
729 ctx->ws = ws;
730 return (struct radeon_winsys_ctx *)ctx;
731 error_create:
732 return NULL;
733 }
734
735 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
736 {
737 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
738 amdgpu_cs_ctx_free(ctx->ctx);
739 FREE(ctx);
740 }
741
742 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx)
743 {
744 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
745
746 if (ctx->last_seq_no) {
747 uint32_t expired;
748 struct amdgpu_cs_fence fence;
749
750 fence.context = ctx->ctx;
751 fence.ip_type = RING_GFX;
752 fence.ip_instance = 0;
753 fence.ring = 0;
754 fence.fence = ctx->last_seq_no;
755
756 int ret = amdgpu_cs_query_fence_status(&fence, 1000000000ull, 0,
757 &expired);
758
759 if (ret || !expired)
760 return false;
761 }
762
763 return true;
764 }
765
766 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
767 {
768 ws->base.ctx_create = radv_amdgpu_ctx_create;
769 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
770 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
771 ws->base.cs_create = radv_amdgpu_cs_create;
772 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
773 ws->base.cs_grow = radv_amdgpu_cs_grow;
774 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
775 ws->base.cs_reset = radv_amdgpu_cs_reset;
776 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
777 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
778 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
779 ws->base.create_fence = radv_amdgpu_create_fence;
780 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
781 ws->base.fence_wait = radv_amdgpu_fence_wait;
782 }