radv/winsys: consolidate request->fence code
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "amdgpu_id.h"
31 #include "radv_radeon_winsys.h"
32 #include "radv_amdgpu_cs.h"
33 #include "radv_amdgpu_bo.h"
34 #include "sid.h"
35
36 struct radv_amdgpu_cs {
37 struct radeon_winsys_cs base;
38 struct radv_amdgpu_winsys *ws;
39
40 struct amdgpu_cs_ib_info ib;
41
42 struct radeon_winsys_bo *ib_buffer;
43 uint8_t *ib_mapped;
44 unsigned max_num_buffers;
45 unsigned num_buffers;
46 amdgpu_bo_handle *handles;
47 uint8_t *priorities;
48
49 struct radeon_winsys_bo **old_ib_buffers;
50 unsigned num_old_ib_buffers;
51 unsigned max_num_old_ib_buffers;
52 unsigned *ib_size_ptr;
53 bool failed;
54 bool is_chained;
55
56 int buffer_hash_table[1024];
57 };
58
59 static inline struct radv_amdgpu_cs *
60 radv_amdgpu_cs(struct radeon_winsys_cs *base)
61 {
62 return (struct radv_amdgpu_cs*)base;
63 }
64
65
66 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
67 struct amdgpu_cs_fence *fence,
68 struct amdgpu_cs_request *req)
69 {
70 fence->context = ctx->ctx;
71 fence->ip_type = req->ip_type;
72 fence->ip_instance = req->ip_instance;
73 fence->ring = req->ring;
74 fence->fence = req->seq_no;
75 }
76
77 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
78 {
79 struct radv_amdgpu_cs_fence *fence = calloc(1, sizeof(struct amdgpu_cs_fence));
80 return (struct radeon_winsys_fence*)fence;
81 }
82
83 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
84 {
85 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
86 free(fence);
87 }
88
89 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
90 struct radeon_winsys_fence *_fence,
91 bool absolute,
92 uint64_t timeout)
93 {
94 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
95 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
96 int r;
97 uint32_t expired = 0;
98
99 /* Now use the libdrm query. */
100 r = amdgpu_cs_query_fence_status(fence,
101 timeout,
102 flags,
103 &expired);
104
105 if (r) {
106 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
107 return false;
108 }
109
110 if (expired)
111 return true;
112
113 return false;
114 }
115
116 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
117 {
118 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
119
120 if (cs->ib_buffer)
121 cs->ws->base.buffer_destroy(cs->ib_buffer);
122 else
123 free(cs->base.buf);
124
125 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
126 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
127
128 free(cs->old_ib_buffers);
129 free(cs->handles);
130 free(cs->priorities);
131 free(cs);
132 }
133
134 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
135 enum ring_type ring_type)
136 {
137 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
138 cs->buffer_hash_table[i] = -1;
139
140 return true;
141 }
142
143 static struct radeon_winsys_cs *
144 radv_amdgpu_cs_create(struct radeon_winsys *ws,
145 enum ring_type ring_type)
146 {
147 struct radv_amdgpu_cs *cs;
148 uint32_t ib_size = 20 * 1024 * 4;
149 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
150 if (!cs)
151 return NULL;
152
153 cs->ws = radv_amdgpu_winsys(ws);
154 radv_amdgpu_init_cs(cs, RING_GFX);
155
156 if (cs->ws->use_ib_bos) {
157 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
158 RADEON_DOMAIN_GTT,
159 RADEON_FLAG_CPU_ACCESS);
160 if (!cs->ib_buffer) {
161 free(cs);
162 return NULL;
163 }
164
165 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
166 if (!cs->ib_mapped) {
167 ws->buffer_destroy(cs->ib_buffer);
168 free(cs);
169 return NULL;
170 }
171
172 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
173 cs->base.buf = (uint32_t *)cs->ib_mapped;
174 cs->base.max_dw = ib_size / 4 - 4;
175 cs->ib_size_ptr = &cs->ib.size;
176 cs->ib.size = 0;
177
178 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
179 } else {
180 cs->base.buf = malloc(16384);
181 cs->base.max_dw = 4096;
182 if (!cs->base.buf) {
183 free(cs);
184 return NULL;
185 }
186 }
187
188 return &cs->base;
189 }
190
191 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
192 {
193 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
194
195 if (cs->failed) {
196 cs->base.cdw = 0;
197 return;
198 }
199
200 if (!cs->ws->use_ib_bos) {
201 const uint64_t limit_dws = 0xffff8;
202 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
203 MIN2(cs->base.max_dw * 2, limit_dws));
204
205 /* The total ib size cannot exceed limit_dws dwords. */
206 if (ib_dws > limit_dws)
207 {
208 cs->failed = true;
209 cs->base.cdw = 0;
210 return;
211 }
212
213 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
214 if (new_buf) {
215 cs->base.buf = new_buf;
216 cs->base.max_dw = ib_dws;
217 } else {
218 cs->failed = true;
219 cs->base.cdw = 0;
220 }
221 return;
222 }
223
224 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
225
226 /* max that fits in the chain size field. */
227 ib_size = MIN2(ib_size, 0xfffff);
228
229 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
230 cs->base.buf[cs->base.cdw++] = 0xffff1000;
231
232 *cs->ib_size_ptr |= cs->base.cdw + 4;
233
234 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
235 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
236 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
237 cs->max_num_old_ib_buffers * sizeof(void*));
238 }
239
240 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
241
242 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
243 RADEON_DOMAIN_GTT,
244 RADEON_FLAG_CPU_ACCESS);
245
246 if (!cs->ib_buffer) {
247 cs->base.cdw = 0;
248 cs->failed = true;
249 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
250 }
251
252 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
253 if (!cs->ib_mapped) {
254 cs->ws->base.buffer_destroy(cs->ib_buffer);
255 cs->base.cdw = 0;
256 cs->failed = true;
257 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
258 }
259
260 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
261
262 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
263 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
264 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
265 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
266 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
267
268 cs->base.buf = (uint32_t *)cs->ib_mapped;
269 cs->base.cdw = 0;
270 cs->base.max_dw = ib_size / 4 - 4;
271
272 }
273
274 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
275 {
276 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
277
278 if (cs->ws->use_ib_bos) {
279 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
280 cs->base.buf[cs->base.cdw++] = 0xffff1000;
281
282 *cs->ib_size_ptr |= cs->base.cdw;
283
284 cs->is_chained = false;
285 }
286
287 return !cs->failed;
288 }
289
290 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
291 {
292 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
293 cs->base.cdw = 0;
294 cs->failed = false;
295
296 for (unsigned i = 0; i < cs->num_buffers; ++i) {
297 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
298 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
299 cs->buffer_hash_table[hash] = -1;
300 }
301
302 cs->num_buffers = 0;
303
304 if (cs->ws->use_ib_bos) {
305 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
306
307 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
308 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
309
310 cs->num_old_ib_buffers = 0;
311 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
312 cs->ib_size_ptr = &cs->ib.size;
313 cs->ib.size = 0;
314 }
315 }
316
317 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
318 amdgpu_bo_handle bo)
319 {
320 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
321 int index = cs->buffer_hash_table[hash];
322
323 if (index == -1)
324 return -1;
325
326 if (cs->handles[index] == bo)
327 return index;
328
329 for (unsigned i = 0; i < cs->num_buffers; ++i) {
330 if (cs->handles[i] == bo) {
331 cs->buffer_hash_table[hash] = i;
332 return i;
333 }
334 }
335
336 return -1;
337 }
338
339 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
340 amdgpu_bo_handle bo,
341 uint8_t priority)
342 {
343 unsigned hash;
344 int index = radv_amdgpu_cs_find_buffer(cs, bo);
345
346 if (index != -1) {
347 cs->priorities[index] = MAX2(cs->priorities[index], priority);
348 return;
349 }
350
351 if (cs->num_buffers == cs->max_num_buffers) {
352 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
353 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
354 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
355 cs->max_num_buffers = new_count;
356 }
357
358 cs->handles[cs->num_buffers] = bo;
359 cs->priorities[cs->num_buffers] = priority;
360
361 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
362 cs->buffer_hash_table[hash] = cs->num_buffers;
363
364 ++cs->num_buffers;
365 }
366
367 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
368 struct radeon_winsys_bo *_bo,
369 uint8_t priority)
370 {
371 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
372 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
373
374 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
375 }
376
377 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
378 struct radeon_winsys_cs *_child)
379 {
380 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
381 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
382
383 for (unsigned i = 0; i < child->num_buffers; ++i) {
384 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
385 child->priorities[i]);
386 }
387
388 if (parent->ws->use_ib_bos) {
389 if (parent->base.cdw + 4 > parent->base.max_dw)
390 radv_amdgpu_cs_grow(&parent->base, 4);
391
392 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
393 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
394 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
395 parent->base.buf[parent->base.cdw++] = child->ib.size;
396 } else {
397 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
398 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
399
400 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
401 parent->base.cdw += child->base.cdw;
402 }
403 }
404
405 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
406 struct radeon_winsys_cs **cs_array,
407 unsigned count,
408 struct radv_amdgpu_winsys_bo *extra_bo,
409 amdgpu_bo_list_handle *bo_list)
410 {
411 int r;
412 if (ws->debug_all_bos) {
413 struct radv_amdgpu_winsys_bo *bo;
414 amdgpu_bo_handle *handles;
415 unsigned num = 0;
416
417 pthread_mutex_lock(&ws->global_bo_list_lock);
418
419 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
420 if (!handles) {
421 pthread_mutex_unlock(&ws->global_bo_list_lock);
422 return -ENOMEM;
423 }
424
425 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
426 assert(num < ws->num_buffers);
427 handles[num++] = bo->bo;
428 }
429
430 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
431 handles, NULL,
432 bo_list);
433 free(handles);
434 pthread_mutex_unlock(&ws->global_bo_list_lock);
435 } else if (count == 1 && !extra_bo) {
436 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
437 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
438 cs->priorities, bo_list);
439 } else {
440 unsigned total_buffer_count = !!extra_bo;
441 unsigned unique_bo_count = !!extra_bo;
442 for (unsigned i = 0; i < count; ++i) {
443 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
444 total_buffer_count += cs->num_buffers;
445 }
446
447 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
448 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
449 if (!handles || !priorities) {
450 free(handles);
451 free(priorities);
452 return -ENOMEM;
453 }
454
455 if (extra_bo) {
456 handles[0] = extra_bo->bo;
457 priorities[0] = 8;
458 }
459
460 for (unsigned i = 0; i < count; ++i) {
461 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
462 for (unsigned j = 0; j < cs->num_buffers; ++j) {
463 bool found = false;
464 for (unsigned k = 0; k < unique_bo_count; ++k) {
465 if (handles[k] == cs->handles[j]) {
466 found = true;
467 priorities[k] = MAX2(priorities[k],
468 cs->priorities[j]);
469 break;
470 }
471 }
472 if (!found) {
473 handles[unique_bo_count] = cs->handles[j];
474 priorities[unique_bo_count] = cs->priorities[j];
475 ++unique_bo_count;
476 }
477 }
478 }
479 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
480 priorities, bo_list);
481
482 free(handles);
483 free(priorities);
484 }
485
486 return r;
487 }
488
489 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
490 struct radeon_winsys_cs **cs_array,
491 unsigned cs_count,
492 struct radeon_winsys_fence *_fence)
493 {
494 int r;
495 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
496 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
497 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
498 amdgpu_bo_list_handle bo_list;
499 struct amdgpu_cs_request request = {0};
500
501 for (unsigned i = cs_count; i--;) {
502 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
503
504 if (cs->is_chained) {
505 *cs->ib_size_ptr -= 4;
506 cs->is_chained = false;
507 }
508
509 if (i + 1 < cs_count) {
510 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
511 assert(cs->base.cdw + 4 <= cs->base.max_dw);
512
513 cs->is_chained = true;
514 *cs->ib_size_ptr += 4;
515
516 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
517 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
518 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
519 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
520 }
521 }
522
523 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, &bo_list);
524 if (r) {
525 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
526 return r;
527 }
528
529 request.ip_type = AMDGPU_HW_IP_GFX;
530 request.number_of_ibs = 1;
531 request.ibs = &cs0->ib;
532 request.resources = bo_list;
533
534 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
535 if (r) {
536 if (r == -ENOMEM)
537 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
538 else
539 fprintf(stderr, "amdgpu: The CS has been rejected, "
540 "see dmesg for more information.\n");
541 }
542
543 amdgpu_bo_list_destroy(bo_list);
544
545 if (fence)
546 radv_amdgpu_request_to_fence(ctx, fence, &request);
547
548 ctx->last_seq_no = request.seq_no;
549
550 return r;
551 }
552
553 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
554 struct radeon_winsys_cs **cs_array,
555 unsigned cs_count,
556 struct radeon_winsys_fence *_fence)
557 {
558 int r;
559 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
560 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
561 amdgpu_bo_list_handle bo_list;
562 struct amdgpu_cs_request request;
563
564 assert(cs_count);
565
566 for (unsigned i = 0; i < cs_count;) {
567 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
568 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
569 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT, cs_count - i);
570
571 memset(&request, 0, sizeof(request));
572
573 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL, &bo_list);
574 if (r) {
575 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
576 return r;
577 }
578
579 request.ip_type = AMDGPU_HW_IP_GFX;
580 request.resources = bo_list;
581 request.number_of_ibs = cnt;
582 request.ibs = ibs;
583
584 for (unsigned j = 0; j < cnt; ++j) {
585 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
586 ibs[j] = cs->ib;
587
588 if (cs->is_chained) {
589 *cs->ib_size_ptr -= 4;
590 cs->is_chained = false;
591 }
592 }
593
594 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
595 if (r) {
596 if (r == -ENOMEM)
597 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
598 else
599 fprintf(stderr, "amdgpu: The CS has been rejected, "
600 "see dmesg for more information.\n");
601 }
602
603 amdgpu_bo_list_destroy(bo_list);
604
605 if (r)
606 return r;
607
608 i += cnt;
609 }
610 if (fence)
611 radv_amdgpu_request_to_fence(ctx, fence, &request);
612
613 ctx->last_seq_no = request.seq_no;
614
615 return 0;
616 }
617
618 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
619 struct radeon_winsys_cs **cs_array,
620 unsigned cs_count,
621 struct radeon_winsys_fence *_fence)
622 {
623 int r;
624 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
625 struct amdgpu_cs_fence *fence = (struct amdgpu_cs_fence *)_fence;
626 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
627 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
628 amdgpu_bo_list_handle bo_list;
629 struct amdgpu_cs_request request;
630 uint32_t pad_word = 0xffff1000U;
631
632 if (radv_amdgpu_winsys(ws)->family == FAMILY_SI)
633 pad_word = 0x80000000;
634
635 assert(cs_count);
636
637 for (unsigned i = 0; i < cs_count;) {
638 struct amdgpu_cs_ib_info ib = {0};
639 struct radeon_winsys_bo *bo = NULL;
640 uint32_t *ptr;
641 unsigned cnt = 0;
642 unsigned size = 0;
643
644 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
645 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
646 ++cnt;
647 }
648
649 assert(cnt);
650
651 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
652 ptr = ws->buffer_map(bo);
653
654 for (unsigned j = 0; j < cnt; ++j) {
655 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
656 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
657 ptr += cs->base.cdw;
658
659 }
660
661 while(!size || (size & 7)) {
662 *ptr++ = pad_word;
663 ++size;
664 }
665
666 memset(&request, 0, sizeof(request));
667
668
669 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
670 (struct radv_amdgpu_winsys_bo*)bo, &bo_list);
671 if (r) {
672 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
673 return r;
674 }
675
676 ib.size = size;
677 ib.ib_mc_address = ws->buffer_get_va(bo);
678
679 request.ip_type = AMDGPU_HW_IP_GFX;
680 request.resources = bo_list;
681 request.number_of_ibs = 1;
682 request.ibs = &ib;
683
684 r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
685 if (r) {
686 if (r == -ENOMEM)
687 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
688 else
689 fprintf(stderr, "amdgpu: The CS has been rejected, "
690 "see dmesg for more information.\n");
691 }
692
693 amdgpu_bo_list_destroy(bo_list);
694
695 ws->buffer_destroy(bo);
696 if (r)
697 return r;
698
699 i += cnt;
700 }
701 if (fence)
702 radv_amdgpu_request_to_fence(ctx, fence, &request);
703 ctx->last_seq_no = request.seq_no;
704 return 0;
705 }
706
707 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
708 struct radeon_winsys_cs **cs_array,
709 unsigned cs_count,
710 bool can_patch,
711 struct radeon_winsys_fence *_fence)
712 {
713 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
714 if (!cs->ws->use_ib_bos) {
715 return radv_amdgpu_winsys_cs_submit_sysmem(_ctx, cs_array,
716 cs_count, _fence);
717 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && false) {
718 return radv_amdgpu_winsys_cs_submit_chained(_ctx, cs_array,
719 cs_count, _fence);
720 } else {
721 return radv_amdgpu_winsys_cs_submit_fallback(_ctx, cs_array,
722 cs_count, _fence);
723 }
724 }
725
726 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws)
727 {
728 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
729 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
730 int r;
731
732 if (!ctx)
733 return NULL;
734 r = amdgpu_cs_ctx_create(ws->dev, &ctx->ctx);
735 if (r) {
736 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create failed. (%i)\n", r);
737 goto error_create;
738 }
739 ctx->ws = ws;
740 return (struct radeon_winsys_ctx *)ctx;
741 error_create:
742 FREE(ctx);
743 return NULL;
744 }
745
746 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
747 {
748 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
749 amdgpu_cs_ctx_free(ctx->ctx);
750 FREE(ctx);
751 }
752
753 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx)
754 {
755 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
756
757 if (ctx->last_seq_no) {
758 uint32_t expired;
759 struct amdgpu_cs_fence fence;
760
761 fence.context = ctx->ctx;
762 fence.ip_type = RING_GFX;
763 fence.ip_instance = 0;
764 fence.ring = 0;
765 fence.fence = ctx->last_seq_no;
766
767 int ret = amdgpu_cs_query_fence_status(&fence, 1000000000ull, 0,
768 &expired);
769
770 if (ret || !expired)
771 return false;
772 }
773
774 return true;
775 }
776
777 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
778 {
779 ws->base.ctx_create = radv_amdgpu_ctx_create;
780 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
781 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
782 ws->base.cs_create = radv_amdgpu_cs_create;
783 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
784 ws->base.cs_grow = radv_amdgpu_cs_grow;
785 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
786 ws->base.cs_reset = radv_amdgpu_cs_reset;
787 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
788 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
789 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
790 ws->base.create_fence = radv_amdgpu_create_fence;
791 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
792 ws->base.fence_wait = radv_amdgpu_fence_wait;
793 }